gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
"""
Most of the api endpoints here use django_rest_framework to expose the content app APIs,
except some set methods that do not return anything.
"""
from django.conf.urls import include, url
from kolibri.content import api, models, serializers
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from rest_framework_nested import routers
class ChannelMetadataViewSet(viewsets.ViewSet):
lookup_field = 'channel_id'
def list(self, request, channel_pk=None):
channels = serializers.ChannelMetadataSerializer(models.ChannelMetadata.objects.all(), context={'request': request}, many=True).data
return Response(channels)
def retrieve(self, request, pk=None, channel_id=None):
channel = serializers.ChannelMetadataSerializer(models.ChannelMetadata.objects.get(channel_id=channel_id), context={'request': request}).data
return Response(channel)
class ContentMetadataViewset(viewsets.ViewSet):
lookup_field = 'content_id'
def list(self, request, channelmetadata_channel_id=None):
context = {'request': request, 'channel_id': channelmetadata_channel_id}
contents = serializers.ContentMetadataSerializer(
models.ContentMetadata.objects.using(channelmetadata_channel_id).all(), context=context, many=True
).data
return Response(contents)
def retrieve(self, request, content_id=None, channelmetadata_channel_id=None):
context = {'request': request, 'channel_id': channelmetadata_channel_id}
content = serializers.ContentMetadataSerializer(
models.ContentMetadata.objects.using(channelmetadata_channel_id).get(content_id=content_id), context=context
).data
return Response(content)
@detail_route()
def ancestor_topics(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
get_ancestor_topics(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.ContentMetadataSerializer(
api.get_ancestor_topics(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
@detail_route()
def immediate_children(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
immediate_children(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.ContentMetadataSerializer(
api.immediate_children(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
@detail_route()
def leaves(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
leaves(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.ContentMetadataSerializer(
api.leaves(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
@detail_route()
def all_prerequisites(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
get_all_prerequisites(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.ContentMetadataSerializer(
api.get_all_prerequisites(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
@detail_route()
def all_related(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
get_all_related(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.ContentMetadataSerializer(
api.get_all_related(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
@detail_route()
def all_formats(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
get_all_formats(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.FormatSerializer(
api.get_all_formats(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
@detail_route()
def available_formats(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
get_available_formats(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.FormatSerializer(
api.get_available_formats(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
@detail_route()
def possible_formats(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
get_possible_formats(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.FormatSerializer(
api.get_possible_formats(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
@detail_route()
def missing_files(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
get_missing_files(channel_id=None, content=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.FileSerializer(
api.get_missing_files(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id']), context=context, many=True
).data
return Response(data)
def files_for_quality(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
get_files_for_quality(channel_id=None, content=None, format_quality=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.FileSerializer(
api.get_files_for_quality(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id'], format_quality=self.kwargs['quality']),
context=context,
many=True
).data
return Response(data)
def set_prerequisite(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
set_prerequisite(channel_id=None, content1=None, content2=None, **kwargs)
"""
return Response(api.set_prerequisite(channel_id=channelmetadata_channel_id, content1=self.kwargs['content_id'], content2=self.kwargs['prerequisite']))
def set_is_related(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
set_is_related(channel_id=None, content1=None, content2=None, **kwargs)
"""
return Response(api.set_is_related(channel_id=channelmetadata_channel_id, content1=self.kwargs['content_id'], content2=self.kwargs['related']))
def children_of_kind(self, request, channelmetadata_channel_id, *args, **kwargs):
"""
endpoint for content api method
children_of_kind(channel_id=None, content=None, kind=None, **kwargs)
"""
context = {'request': request, 'channel_id': channelmetadata_channel_id}
data = serializers.ContentMetadataSerializer(
api.children_of_kind(channel_id=channelmetadata_channel_id, content=self.kwargs['content_id'], kind=self.kwargs['kind']), context=context, many=True
).data
return Response(data)
class FileViewset(viewsets.ViewSet):
def list(self, request, channelmetadata_channel_id=None):
context = {'request': request, 'channel_id': channelmetadata_channel_id}
files = serializers.FileSerializer(models.File.objects.using(channelmetadata_channel_id).all(), context=context, many=True).data
return Response(files)
def retrieve(self, request, pk=None, channelmetadata_channel_id=None):
context = {'request': request, 'channel_id': channelmetadata_channel_id}
file = serializers.FileSerializer(
models.File.objects.using(channelmetadata_channel_id).get(pk=pk), context=context
).data
return Response(file)
def update_content_copy(self, request, channelmetadata_channel_id, pk, content_copy, *args, **kwargs):
"""
endpoint for content api method
update_content_copy(file_object=None, content_copy=None)
"""
target_file = models.File.objects.using(channelmetadata_channel_id).get(pk=pk)
return Response(api.update_content_copy(file_object=target_file, content_copy=str(content_copy)))
router = routers.SimpleRouter()
router.register(r'channel', ChannelMetadataViewSet, base_name='channelmetadata')
channel_router = routers.NestedSimpleRouter(router, r'channel', lookup='channelmetadata')
channel_router.register(r'content', ContentMetadataViewset, base_name='contentmetadata')
channel_router.register(r'file', FileViewset, base_name='file')
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^', include(channel_router.urls)),
url(r'^channel/(?P<channelmetadata_channel_id>[^/.]+)/content/(?P<content_id>[^/.]+)/files_for_quality/(?P<quality>\w+)',
ContentMetadataViewset.as_view({'get': 'files_for_quality'}), name="contentmetadata_files_for_quality"),
url(r'^channel/(?P<channelmetadata_channel_id>[^/.]+)/content/(?P<content_id>[^/.]+)/children_of_kind/(?P<kind>\w+)',
ContentMetadataViewset.as_view({'get': 'children_of_kind'}), name="contentmetadata_children_of_kind"),
url(r'^channel/(?P<channelmetadata_channel_id>[^/.]+)/content/(?P<content_id>[^/.]+)/set_prerequisite/(?P<prerequisite>[^/.]+)',
ContentMetadataViewset.as_view({'put': 'set_prerequisite'}), name="contentmetadata_set_prerequisite"),
url(r'^channel/(?P<channelmetadata_channel_id>[^/.]+)/content/(?P<content_id>[^/.]+)/set_is_related/(?P<related>[^/.]+)',
ContentMetadataViewset.as_view({'put': 'set_is_related'}), name="contentmetadata_set_is_related"),
url(r'^channel/(?P<channelmetadata_channel_id>[^/.]+)/file/(?P<pk>[^/.]+)/update_content_copy/(?P<content_copy>.*)',
FileViewset.as_view({'put': 'update_content_copy'}), name="file_update_content_copy"),
]
|
|
from ifaint import *
import os
def style_lw1(obj):
obj.linewidth = 1
def style_lw2(obj):
obj.linewidth = 2
def style_lw20(obj):
obj.linewidth = 20
def style_none(obj):
obj.fillstyle = 'none'
obj.linewidth = 2
def style_border(obj):
obj.fillstyle = 'b'
obj.linewidth = 2
def style_fill(obj):
obj.fillstyle = 'f'
obj.linewidth = 2
def style_border_fill(obj):
obj.fillstyle = 'bf'
obj.linewidth = 2
# Row styles
def style_solid(obj):
obj.linestyle = 's'
def style_solid_filled(obj):
obj.linestyle = 's'
obj.fillstyle = 'f'
def style_dashed(obj):
obj.linestyle = 'ld'
def style_dashed_filled(obj):
obj.linestyle = 'ld'
obj.fillstyle = 'f'
# Column styles
def style_color(obj):
obj.fg = (255,0,255)
obj.bg = (0,255,255)
def style_gradient(obj):
obj.fg = LinearGradient(0.0, (0.0,(255,0,0)), (1.0,(0,255,0)))
obj.bg = LinearGradient(0.0, (0.0,(0,0,255)), (1.0,(255,0,0)))
def style_pattern(obj):
p1 = Bitmap((10,10))
p1.fill((0,0),(255,0,255))
p1.line((0,0,9,9),(0,0,0))
p2 = Bitmap((10,10))
p2.fill((0,0),(0,255,255))
p2.line((0,9,9,0),(0,0,0))
obj.fg = Pattern(p1)
obj.bg = Pattern(p2)
col_funcs = [(style_solid, "Solid"), (style_dashed, "Dashed")]
row_funcs = [(style_color, "Color"),
(style_gradient, "Gradient"),
(style_pattern, "Pattern")]
col_outer = [(style_border, "Border"),
(style_fill, "Fill"),
(style_border_fill, "Border+Fill"),
(style_none, "None")]
row_outer = [(style_lw1, "1"),
(style_lw2, "2"),
(style_lw20, "20")]
CELL_WIDTH = 100
CELL_HEIGHT = 100
START_COL = 120
START_ROW = 40
HEADING_COL_2 = 40
HEADING_ROW_2 = 20
NUM_ROWS = len(row_funcs) * len(row_outer)
NUM_COLS = len(col_funcs) * len(col_outer)
IMAGE_WIDTH = CELL_WIDTH * NUM_COLS + START_COL
IMAGE_HEIGHT = CELL_HEIGHT * NUM_ROWS + START_ROW
img = app.new(IMAGE_WIDTH,IMAGE_HEIGHT)
def apply_funcs(obj, row, col):
cf = col_funcs[col % len(col_funcs)][0]
rf = row_funcs[row % len(row_funcs)][0]
outer_cf = col_outer[col // len(col_funcs)][0]
outer_rf = row_outer[row // len(row_funcs)][0]
cf(obj)
rf(obj)
outer_cf(obj)
outer_rf(obj)
return obj
def create_rect(img, row, col):
r = img.Rect((START_COL + CELL_WIDTH * col + CELL_WIDTH / 4,
START_ROW + CELL_HEIGHT * row + CELL_HEIGHT / 4,
CELL_WIDTH / 2,
CELL_HEIGHT / 2))
r.name = "Rect r%d c%d" % (row, col)
return apply_funcs(r, row, col)
def create_ellipse(img, row, col):
e = img.Ellipse((START_COL + CELL_WIDTH * col + CELL_WIDTH / 4,
START_ROW + CELL_HEIGHT * row + CELL_HEIGHT / 4,
CELL_WIDTH / 2,
CELL_HEIGHT / 2))
e.name = "Ellipse r%d c%d" % (row, col)
return apply_funcs(e, row, col)
def create_line(img, row, col):
l = img.Line((START_COL + CELL_WIDTH * col + CELL_WIDTH / 4,
START_ROW + CELL_HEIGHT * row + CELL_HEIGHT / 4,
CELL_WIDTH / 2,
CELL_HEIGHT / 2))
l.name = "Line r%d c%d" % (row, col)
return apply_funcs(l, row, col)
def create_polygon(img, row, col):
POLYGON_POINTS = [15.25, 0.25, 67.25, 31.5, 49.0, 85.25,
-0.5, 88.0, 71.5, 53.75, 5.25, 40.75, 35.0, 66.5]
x0 = START_COL + CELL_WIDTH * col + 10
y0 = START_ROW + CELL_HEIGHT * row + 5
p = img.Polygon(POLYGON_POINTS)
p.pos = x0, y0
return apply_funcs(p, row, col)
# Lines separating inner and outer captions
img.Line((START_COL, HEADING_ROW_2, IMAGE_WIDTH, HEADING_ROW_2))
img.Line((HEADING_COL_2, START_ROW, HEADING_COL_2, IMAGE_HEIGHT))
# Vertical lines
for i in range(NUM_COLS):
y0 = 0 if i % len(col_funcs) == 0 else HEADING_ROW_2
img.Line((START_COL + CELL_WIDTH * i, y0,
START_COL + CELL_WIDTH * i,IMAGE_HEIGHT))
# Horizontal lines
for i in range(NUM_ROWS):
x0 = 0 if i % len(row_funcs) == 0 else HEADING_COL_2
img.Line((x0,START_ROW + CELL_HEIGHT * i,
IMAGE_WIDTH, START_ROW + CELL_HEIGHT * i))
# Column headings
for x, outer in enumerate(col_outer):
offset = x * len(col_funcs) * CELL_WIDTH + 5
w = CELL_WIDTH * len(col_funcs)
text = img.Text((START_COL + CELL_WIDTH * len(col_funcs) * x, 5,
CELL_WIDTH * len(col_funcs), 20), outer[1])
text.halign = 'center' # Fails on load
text.text_render_style = 'pangolayout'
for i, item in enumerate(col_funcs):
img.Text((START_COL + CELL_WIDTH * i + 5 +
offset, HEADING_ROW_2 + 5), item[1])
# Row headings
for x, outer in enumerate(row_outer):
offset = x * len(row_funcs) * CELL_HEIGHT + 5
text = img.Text((10, START_ROW + CELL_HEIGHT * len(row_funcs) * x, 10,
CELL_HEIGHT * len(row_funcs)),
outer[1])
text.valign = 'middle'
for i, item in enumerate(row_funcs):
text = img.Text((HEADING_COL_2 + 10,
START_ROW + CELL_HEIGHT * i + offset,
100, CELL_HEIGHT),
item[1])
text.valign = 'middle'
def for_each_cell(img, func):
for row in range(NUM_ROWS):
for col in range(NUM_COLS):
func(img, row, col)
#for_each_cell(img, create_polygon)
for_each_cell(img, create_ellipse)
#for_each_cell(img, create_line)
def in_out_dir(file_name):
return os.path.join(os.getcwd(), 'out', file_name)
# Save as a png for comparison
app.save_backup(img, in_out_dir('test-save-svg.png'))
# Save as SVG and load again
file_name = in_out_dir('test-save-svg.svg')
app.save_backup(img, file_name)
img2 = app.open(file_name)
|
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import, unicode_literals
import sys
import os
import logging
import copy
from optparse import OptionParser
from svtplay_dl.error import UIException
from svtplay_dl.log import log
from svtplay_dl.utils import select_quality, list_quality, is_py2, ensure_unicode
from svtplay_dl.service import service_handler, Generic
from svtplay_dl.fetcher import VideoRetriever
from svtplay_dl.subtitle import subtitle
from svtplay_dl.info import info
from svtplay_dl.output import filename
from svtplay_dl.postprocess import postprocess
from svtplay_dl.service.aftonbladet import Aftonbladet
from svtplay_dl.service.bambuser import Bambuser
from svtplay_dl.service.bigbrother import Bigbrother
from svtplay_dl.service.dbtv import Dbtv
from svtplay_dl.service.disney import Disney
from svtplay_dl.service.dplay import Dplay
from svtplay_dl.service.dr import Dr
from svtplay_dl.service.efn import Efn
from svtplay_dl.service.expressen import Expressen
from svtplay_dl.service.facebook import Facebook
from svtplay_dl.service.filmarkivet import Filmarkivet
from svtplay_dl.service.flowonline import Flowonline
from svtplay_dl.service.hbo import Hbo
from svtplay_dl.service.twitch import Twitch
from svtplay_dl.service.lemonwhale import Lemonwhale
from svtplay_dl.service.mtvnn import Mtvnn
from svtplay_dl.service.mtvservices import Mtvservices
from svtplay_dl.service.nhl import NHL
from svtplay_dl.service.nrk import Nrk
from svtplay_dl.service.oppetarkiv import OppetArkiv
from svtplay_dl.service.picsearch import Picsearch
from svtplay_dl.service.pokemon import Pokemon
from svtplay_dl.service.qbrick import Qbrick
from svtplay_dl.service.radioplay import Radioplay
from svtplay_dl.service.riksdagen import Riksdagen
from svtplay_dl.service.ruv import Ruv
from svtplay_dl.service.raw import Raw
from svtplay_dl.service.solidtango import Solidtango
from svtplay_dl.service.sr import Sr
from svtplay_dl.service.svtplay import Svtplay
from svtplay_dl.service.tv4play import Tv4play
from svtplay_dl.service.urplay import Urplay
from svtplay_dl.service.vg import Vg
from svtplay_dl.service.viaplay import Viaplay
from svtplay_dl.service.viasatsport import Viasatsport
from svtplay_dl.service.vimeo import Vimeo
from svtplay_dl.service.youplay import Youplay
__version__ = "1.9.3"
sites = [
Aftonbladet,
Bambuser,
Bigbrother,
Dbtv,
Disney,
Dplay,
Dr,
Efn,
Expressen,
Facebook,
Filmarkivet,
Flowonline,
Hbo,
Twitch,
Lemonwhale,
Mtvservices,
Mtvnn,
NHL,
Nrk,
Qbrick,
Picsearch,
Pokemon,
Ruv,
Radioplay,
Solidtango,
Sr,
Svtplay,
OppetArkiv,
Tv4play,
Urplay,
Viaplay,
Viasatsport,
Vimeo,
Vg,
Youplay,
Riksdagen,
Raw]
class Options(object):
"""
Options used when invoking the script from another Python script.
Simple container class used when calling get_media() from another Python
script. The variables corresponds to the command line parameters parsed
in main() when the script is called directly.
When called from a script there are a few more things to consider:
* Logging is done to 'log'. main() calls setup_log() which sets the
logging to either stdout or stderr depending on the silent level.
A user calling get_media() directly can either also use setup_log()
or configure the log manually.
* Progress information is printed to 'progress_stream' which defaults to
sys.stderr but can be changed to any stream.
* Many errors results in calls to system.exit() so catch 'SystemExit'-
Exceptions to prevent the entire application from exiting if that happens.
"""
def __init__(self):
self.output = None
self.resume = False
self.live = False
self.silent = False
self.force = False
self.quality = 0
self.flexibleq = 0
self.list_quality = False
self.other = None
self.subtitle = False
self.info = False
self.username = None
self.password = None
self.thumbnail = False
self.all_episodes = False
self.all_last = -1
self.merge_subtitle = False
self.force_subtitle = False
self.require_subtitle = False
self.get_all_subtitles = False
self.get_raw_subtitles = False
self.convert_subtitle_colors = False
self.preferred = None
self.verbose = False
self.output_auto = False
self.service = None
self.cookies = None
self.exclude = None
self.get_url = False
self.ssl_verify = True
self.http_headers = None
self.stream_prio = None
self.remux = False
self.silent_semi = False
self.get_info = False
self.include_clips = False
def get_multiple_media(urls, options):
if options.output and os.path.isfile(options.output):
log.error("Output must be a directory if used with multiple URLs")
sys.exit(2)
elif options.output and not os.path.exists(options.output):
try:
os.makedirs(options.output)
except OSError as e:
log.error("%s: %s", e.strerror, e.filename)
return
for url in urls:
get_media(url, copy.copy(options))
def get_media(url, options):
if "http" not in url[:4]:
url = "http://%s" % url
if options.silent_semi:
options.silent = True
stream = service_handler(sites, options, url)
if not stream:
generic = Generic(options, url)
url, stream = generic.get(sites)
if not stream:
if url.find(".f4m") > 0 or url.find(".m3u8") > 0:
stream = Raw(options, url)
if not stream:
log.error("That site is not supported. Make a ticket or send a message")
sys.exit(2)
if is_py2:
url = ensure_unicode(url)
if options.all_episodes:
get_all_episodes(stream, copy.copy(options), url)
else:
get_one_media(stream, copy.copy(options))
def get_all_episodes(stream, options, url):
if options.output and os.path.isfile(options.output):
log.error("Output must be a directory if used with --all-episodes")
sys.exit(2)
elif options.output and not os.path.exists(options.output):
try:
os.makedirs(options.output)
except OSError as e:
log.error("%s: %s", e.strerror, e.filename)
return
episodes = stream.find_all_episodes(options)
if episodes is None:
return
for idx, o in enumerate(episodes):
if o == url:
substream = stream
else:
substream = service_handler(sites, copy.copy(options), o)
log.info("Episode %d of %d", idx + 1, len(episodes))
log.info("Url: %s",o)
# get_one_media overwrites options.output...
get_one_media(substream, copy.copy(options))
def get_one_media(stream, options):
# Make an automagic filename
if not filename(stream):
return
if options.merge_subtitle:
from svtplay_dl.utils import which
if not which('ffmpeg'):
log.error("--merge-subtitle needs ffmpeg. Please install ffmpeg.")
log.info("https://ffmpeg.org/download.html")
sys.exit(2)
videos = []
subs = []
infos = []
subfixes = []
error = []
streams = stream.get()
try:
for i in streams:
if isinstance(i, VideoRetriever):
if options.preferred:
if options.preferred.lower() == i.name():
videos.append(i)
else:
videos.append(i)
if isinstance(i, subtitle):
subs.append(i)
if isinstance(i, info):
infos.append(i)
if isinstance(i, Exception):
error.append(i)
except Exception as e:
if options.verbose:
log.error("version: %s" % __version__)
raise
else:
log.error("svtplay-dl crashed")
log.error("Run again and add --verbose as an argument, to get more information")
log.error("If the error persists, you can report it at https://github.com/spaam/svtplay-dl/issues")
log.error("Include the URL used, the stack trace and the output of svtplay-dl --version in the issue")
sys.exit(3)
if options.require_subtitle and not subs:
log.info("No subtitles available")
return
if options.subtitle and options.get_url:
if subs:
if options.get_all_subtitles:
for sub in subs:
print(sub.url)
else:
print(subs[0].url)
if options.force_subtitle:
return
def options_subs_dl(subfixes):
if subs:
if options.get_all_subtitles:
for sub in subs:
sub.download()
if options.merge_subtitle:
if sub.subfix:
subfixes += [sub.subfix]
else:
options.get_all_subtitles = False
else:
subs[0].download()
elif options.merge_subtitle:
options.merge_subtitle = False
if options.subtitle and options.output != "-" and not options.get_url:
options_subs_dl(subfixes)
if options.force_subtitle:
return
if options.get_info and options.output != "-" and not options.get_url:
for inf in infos:
inf.save_info()
if options.merge_subtitle and not options.subtitle:
options_subs_dl(subfixes)
if len(videos) == 0:
for exc in error:
log.error(str(exc))
else:
if options.list_quality:
list_quality(videos)
return
try:
stream = select_quality(options, videos)
if options.get_url:
print(stream.url)
return
log.info("Selected to download %s, bitrate: %s",
stream.name(), stream.bitrate)
stream.download()
except UIException as e:
if options.verbose:
raise e
log.error(e)
sys.exit(2)
if options.thumbnail and hasattr(stream, "get_thumbnail"):
if options.output != "-":
log.info("Getting thumbnail")
stream.get_thumbnail(options)
else:
log.warning("Can not get thumbnail when fetching to stdout")
post = postprocess(stream, options, subfixes)
if stream.name() == "dash" and post.detect:
post.merge()
if stream.name() == "dash" and not post.detect and stream.finished:
log.warning("Cant find ffmpeg/avconv. audio and video is in seperate files. if you dont want this use -P hls or hds")
if options.remux:
post.remux()
if options.silent_semi and stream.finished:
log.log(25, "Download of %s was completed" % stream.options.output)
def setup_log(silent, verbose=False):
logging.addLevelName(25, "INFO")
fmt = logging.Formatter('%(levelname)s: %(message)s')
if silent:
stream = sys.stderr
level = 25
elif verbose:
stream = sys.stderr
level = logging.DEBUG
fmt = logging.Formatter('%(levelname)s [%(created)s] %(pathname)s/%(funcName)s: %(message)s')
else:
stream = sys.stdout
level = logging.INFO
hdlr = logging.StreamHandler(stream)
hdlr.setFormatter(fmt)
log.addHandler(hdlr)
log.setLevel(level)
def main():
""" Main program """
usage = "Usage: %prog [options] [urls]"
parser = OptionParser(usage=usage, version=__version__)
parser.add_option("-o", "--output",
metavar="OUTPUT", help="outputs to the given filename or folder")
parser.add_option("-f", "--force",
action="store_true", dest="force", default=False,
help="overwrite if file exists already")
parser.add_option("-r", "--resume",
action="store_true", dest="resume", default=False,
help="resume a download (RTMP based ones)")
parser.add_option("-l", "--live",
action="store_true", dest="live", default=False,
help="enable for live streams (RTMP based ones)")
parser.add_option("-s", "--silent",
action="store_true", dest="silent", default=False,
help="be less verbose")
parser.add_option("--silent-semi", action="store_true",
dest="silent_semi", default=False, help="only show a message when the file is downloaded")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="explain what is going on")
parser.add_option("-q", "--quality", default=0,
metavar="quality", help="choose what format to download based on bitrate / video resolution. "
"it will download the best format by default")
parser.add_option("-Q", "--flexible-quality", default=0,
metavar="amount", dest="flexibleq", help="allow given quality (as above) to differ by an amount")
parser.add_option("--list-quality", dest="list_quality", action="store_true", default=False,
help="list the quality for a video")
parser.add_option("-S", "--subtitle",
action="store_true", dest="subtitle", default=False,
help="download subtitle from the site if available")
parser.add_option("-M", "--merge-subtitle", action="store_true", dest="merge_subtitle",
default=False, help="merge subtitle with video/audio file with corresponding ISO639-3 language code. use with -S for external also.")
parser.add_option("--force-subtitle", dest="force_subtitle", default=False,
action="store_true", help="download only subtitle if its used with -S")
parser.add_option("--require-subtitle", dest="require_subtitle", default=False,
action="store_true", help="download only if a subtitle is available")
parser.add_option("--all-subtitles", dest="get_all_subtitles", default=False, action="store_true",
help="Download all available subtitles for the video")
parser.add_option("--raw-subtitles", dest="get_raw_subtitles", default=False, action="store_true",
help="also download the subtitles in their native format")
parser.add_option("--convert-subtitle-colors", dest="convert_subtitle_colors", default=False, action="store_true",
help="converts the color information in subtitles, to <font color=""> tags")
parser.add_option("-u", "--username", default=None,
help="username")
parser.add_option("-p", "--password", default=None,
help="password")
parser.add_option("-t", "--thumbnail",
action="store_true", dest="thumbnail", default=False,
help="download thumbnail from the site if available")
parser.add_option("-A", "--all-episodes",
action="store_true", dest="all_episodes", default=False,
help="try to download all episodes")
parser.add_option("--all-last", dest="all_last", default=-1, type=int,
metavar="NN", help="get last NN episodes instead of all episodes")
parser.add_option("-P", "--preferred", default=None,
metavar="preferred", help="preferred download method (dash, hls, hds, http or rtmp)")
parser.add_option("--exclude", dest="exclude", default=None,
metavar="WORD1,WORD2,...", help="exclude videos with the WORD(s) in the filename. comma separated.")
parser.add_option("-g", "--get-url",
action="store_true", dest="get_url", default=False,
help="do not download any video, but instead print the URL.")
parser.add_option("--dont-verify-ssl-cert", action="store_false", dest="ssl_verify", default=True,
help="Don't attempt to verify SSL certificates.")
parser.add_option("--http-header", dest="http_headers", default=None, metavar="header1=value;header2=value2",
help="A header to add to each HTTP request.")
parser.add_option("--stream-priority", dest="stream_prio", default=None, metavar="dash,hls,hds,http,rtmp",
help="If two streams have the same quality, choose the one you prefer")
parser.add_option("--remux", dest="remux", default=False, action="store_true",
help="Remux from one container to mp4 using ffmpeg or avconv")
parser.add_option("--include-clips", dest="include_clips", default=False, action="store_true",
help="include clips from websites when using -A")
parser.add_option("--get-info",
action="store_true", dest="get_info", default=False,
help="Download and saves information about the video if available")
(options, args) = parser.parse_args()
if not args:
parser.print_help()
sys.exit(0)
if len(args) < 1:
parser.error("Incorrect number of arguments")
if options.exclude:
options.exclude = options.exclude.split(",")
if options.require_subtitle:
if options.merge_subtitle:
options.merge_subtitle = True
else:
options.subtitle = True
if options.merge_subtitle:
options.remux = True
options = mergeParserOption(Options(), options)
if options.silent_semi:
options.silent = True
setup_log(options.silent, options.verbose)
if options.flexibleq and not options.quality:
log.error("flexible-quality requires a quality")
sys.exit(4)
urls = args
try:
if len(urls) == 1:
get_media(urls[0], options)
else:
get_multiple_media(urls, options)
except KeyboardInterrupt:
print("")
def mergeParserOption(options, parser):
options.output = parser.output
options.resume = parser.resume
options.live = parser.live
options.silent = parser.silent
options.force = parser.force
options.quality = parser.quality
options.flexibleq = parser.flexibleq
options.list_quality = parser.list_quality
options.subtitle = parser.subtitle
options.merge_subtitle = parser.merge_subtitle
options.silent_semi = parser.silent_semi
options.username = parser.username
options.password = parser.password
options.thumbnail = parser.thumbnail
options.all_episodes = parser.all_episodes
options.all_last = parser.all_last
options.force_subtitle = parser.force_subtitle
options.require_subtitle = parser.require_subtitle
options.preferred = parser.preferred
options.verbose = parser.verbose
options.exclude = parser.exclude
options.get_url = parser.get_url
options.ssl_verify = parser.ssl_verify
options.http_headers = parser.http_headers
options.stream_prio = parser.stream_prio
options.remux = parser.remux
options.get_all_subtitles = parser.get_all_subtitles
options.get_raw_subtitles = parser.get_raw_subtitles
options.convert_subtitle_colors = parser.convert_subtitle_colors
options.include_clips = parser.include_clips
options.get_info = parser.get_info
return options
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver common utilities for HP 3PAR Storage array
The 3PAR drivers requires 3.1.2 MU2 firmware on the 3PAR array.
You will need to install the python hp3parclient.
sudo pip install hp3parclient
The drivers uses both the REST service and the SSH
command line to correctly operate. Since the
ssh credentials and the REST credentials can be different
we need to have settings for both.
The drivers requires the use of the san_ip, san_login,
san_password settings for ssh connections into the 3PAR
array. It also requires the setting of
hp3par_api_url, hp3par_username, hp3par_password
for credentials to talk to the REST service on the 3PAR
array.
"""
import ast
import base64
import json
import pprint
from random import randint
import re
import time
import uuid
from eventlet import greenthread
import hp3parclient
from hp3parclient import client
from hp3parclient import exceptions as hpexceptions
from oslo.config import cfg
import paramiko
from cinder import context
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder import utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '2.0.0'
hp3par_opts = [
cfg.StrOpt('hp3par_api_url',
default='',
help="3PAR WSAPI Server Url like "
"https://<3par ip>:8080/api/v1"),
cfg.StrOpt('hp3par_username',
default='',
help="3PAR Super user username"),
cfg.StrOpt('hp3par_password',
default='',
help="3PAR Super user password",
secret=True),
# TODO(kmartin): Remove hp3par_domain during I release.
cfg.StrOpt('hp3par_domain',
default=None,
help="This option is DEPRECATED and no longer used. "
"The 3par domain name to use."),
cfg.StrOpt('hp3par_cpg',
default="OpenStack",
help="The CPG to use for volume creation"),
cfg.StrOpt('hp3par_cpg_snap',
default="",
help="The CPG to use for Snapshots for volumes. "
"If empty hp3par_cpg will be used"),
cfg.StrOpt('hp3par_snapshot_retention',
default="",
help="The time in hours to retain a snapshot. "
"You can't delete it before this expires."),
cfg.StrOpt('hp3par_snapshot_expiration',
default="",
help="The time in hours when a snapshot expires "
" and is deleted. This must be larger than expiration"),
cfg.BoolOpt('hp3par_debug',
default=False,
help="Enable HTTP debugging to 3PAR"),
cfg.ListOpt('hp3par_iscsi_ips',
default=[],
help="List of target iSCSI addresses to use.")
]
CONF = cfg.CONF
CONF.register_opts(hp3par_opts)
class HP3PARCommon(object):
"""Class that contains common code for the 3PAR drivers.
Version history:
1.2.0 - Updated hp3parclient API use to 2.0.x
1.2.1 - Check that the VVS exists
1.2.2 - log prior to raising exceptions
"""
VERSION = "1.2.2"
stats = {}
# Valid values for volume type extra specs
# The first value in the list is the default value
valid_prov_values = ['thin', 'full']
valid_persona_values = ['1 - Generic',
'2 - Generic-ALUA',
'6 - Generic-legacy',
'7 - HPUX-legacy',
'8 - AIX-legacy',
'9 - EGENERA',
'10 - ONTAP-legacy',
'11 - VMware',
'12 - OpenVMS']
hp_qos_keys = ['maxIOPS', 'maxBWS']
hp3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs']
def __init__(self, config):
self.sshpool = None
self.config = config
self.hosts_naming_dict = dict()
self.client = None
if CONF.hp3par_domain is not None:
LOG.deprecated(_("hp3par_domain has been deprecated and "
"is no longer used. The domain is automatically "
"looked up based on the CPG."))
def get_version(self):
return self.VERSION
def check_flags(self, options, required_flags):
for flag in required_flags:
if not getattr(options, flag, None):
msg = _('%s is not set') % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _create_client(self):
cl = client.HP3ParClient(self.config.hp3par_api_url)
client_version = hp3parclient.version
if (client_version < MIN_CLIENT_VERSION):
ex_msg = (_('Invalid hp3parclient version. Version %s or greater '
'required.') % MIN_CLIENT_VERSION)
LOG.error(ex_msg)
raise exception.InvalidInput(reason=ex_msg)
return cl
def client_login(self):
try:
LOG.debug("Connecting to 3PAR")
self.client.login(self.config.hp3par_username,
self.config.hp3par_password)
except hpexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
{'url': self.config.hp3par_api_url, 'err': str(ex)})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def client_logout(self):
self.client.logout()
LOG.debug("Disconnect from 3PAR")
def do_setup(self, context):
try:
self.client = self._create_client()
except hpexceptions.UnsupportedVersion as ex:
raise exception.InvalidInput(str(ex))
LOG.info(_("HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s")
% {"common_ver": self.VERSION,
"rest_ver": hp3parclient.get_version_string()})
if self.config.hp3par_debug:
self.client.debug_rest(True)
self.client_login()
try:
# make sure the default CPG exists
self.validate_cpg(self.config.hp3par_cpg)
self._set_connections()
finally:
self.client_logout()
def validate_cpg(self, cpg_name):
try:
cpg = self.client.getCPG(cpg_name)
except hpexceptions.HTTPNotFound as ex:
err = (_("CPG (%s) doesn't exist on array") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
def _set_connections(self):
"""Set the number of concurrent connections.
The 3PAR WS API server has a limit of concurrent connections.
This is setting the number to the highest allowed, 15 connections.
"""
self._cli_run(['setwsapi', '-sru', 'high'])
def get_domain(self, cpg_name):
try:
cpg = self.client.getCPG(cpg_name)
except hpexceptions.HTTPNotFound:
err = (_("Failed to get domain because CPG (%s) doesn't "
"exist on array.") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
if 'domain' in cpg:
return cpg['domain']
return None
def extend_volume(self, volume, new_size):
volume_name = self._get_3par_vol_name(volume['id'])
old_size = volume.size
growth_size = int(new_size) - old_size
LOG.debug("Extending Volume %s from %s to %s, by %s GB." %
(volume_name, old_size, new_size, growth_size))
try:
self._cli_run(['growvv', '-f', volume_name, '%dg' % growth_size])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error extending volume %s") % volume)
def _get_3par_vol_name(self, volume_id):
"""Get converted 3PAR volume name.
Converts the openstack volume id from
ecffc30f-98cb-4cf5-85ee-d7309cc17cd2
to
osv-7P.DD5jLTPWF7tcwnMF80g
We convert the 128 bits of the uuid into a 24character long
base64 encoded string to ensure we don't exceed the maximum
allowed 31 character name limit on 3Par
We strip the padding '=' and replace + with .
and / with -
"""
volume_name = self._encode_name(volume_id)
return "osv-%s" % volume_name
def _get_3par_snap_name(self, snapshot_id):
snapshot_name = self._encode_name(snapshot_id)
return "oss-%s" % snapshot_name
def _get_3par_vvs_name(self, volume_id):
vvs_name = self._encode_name(volume_id)
return "vvs-%s" % vvs_name
def _encode_name(self, name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.b64encode(vol_uuid.bytes)
# 3par doesn't allow +, nor /
vol_encoded = vol_encoded.replace('+', '.')
vol_encoded = vol_encoded.replace('/', '-')
# strip off the == as 3par doesn't like those.
vol_encoded = vol_encoded.replace('=', '')
return vol_encoded
def _capacity_from_size(self, vol_size):
# because 3PAR volume sizes are in
# Mebibytes, Gigibytes, not Megabytes.
MB = 1000L
MiB = 1.048576
if int(vol_size) == 0:
capacity = MB # default: 1GB
else:
capacity = vol_size * MB
capacity = int(round(capacity / MiB))
return capacity
def _cli_run(self, cmd):
"""Runs a CLI command over SSH, without doing any result parsing."""
LOG.debug("SSH CMD = %s " % cmd)
(stdout, stderr) = self._run_ssh(cmd, False)
# we have to strip out the input and exit lines
tmp = stdout.split("\r\n")
out = tmp[5:len(tmp) - 2]
return out
def _ssh_execute(self, ssh, cmd, check_exit_code=True):
"""We have to do this in order to get CSV output from the CLI command.
We first have to issue a command to tell the CLI that we want the
output to be formatted in CSV, then we issue the real command.
"""
LOG.debug(_('Running cmd (SSH): %s'), cmd)
channel = ssh.invoke_shell()
stdin_stream = channel.makefile('wb')
stdout_stream = channel.makefile('rb')
stderr_stream = channel.makefile('rb')
stdin_stream.write('''setclienv csvtable 1
%s
exit
''' % cmd)
# stdin.write('process_input would go here')
# stdin.flush()
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
stdout_stream.close()
stderr_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
msg = _("command %s failed") % cmd
LOG.error(msg)
raise processutils.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
channel.close()
return (stdout, stderr)
def _run_ssh(self, cmd_list, check_exit=True, attempts=1):
utils.check_ssh_injection(cmd_list)
command = ' '. join(cmd_list)
if not self.sshpool:
self.sshpool = utils.SSHPool(self.config.san_ip,
self.config.san_ssh_port,
self.config.ssh_conn_timeout,
self.config.san_login,
password=self.config.san_password,
privatekey=
self.config.san_private_key,
min_size=
self.config.ssh_min_pool_conn,
max_size=
self.config.ssh_max_pool_conn)
try:
total_attempts = attempts
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return self._ssh_execute(ssh, command,
check_exit_code=check_exit)
except Exception as e:
LOG.error(e)
greenthread.sleep(randint(20, 500) / 100.0)
msg = (_("SSH Command failed after '%(total_attempts)r' "
"attempts : '%(command)s'") %
{'total_attempts': total_attempts, 'command': command})
LOG.error(msg)
raise exception.CinderException(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error running ssh command: %s") % command)
def _delete_3par_host(self, hostname):
self.client.deleteHost(hostname)
def _create_3par_vlun(self, volume, hostname):
try:
self.client.createVLUN(volume, hostname=hostname, auto=True)
except hpexceptions.HTTPBadRequest as e:
if 'must be in the same domain' in e.get_description():
LOG.error(e.get_description())
raise exception.Invalid3PARDomain(err=e.get_description())
def _safe_hostname(self, hostname):
"""We have to use a safe hostname length for 3PAR host names."""
try:
index = hostname.index('.')
except ValueError:
# couldn't find it
index = len(hostname)
# we'll just chop this off for now.
if index > 23:
index = 23
return hostname[:index]
def _get_3par_host(self, hostname):
return self.client.getHost(hostname)
def get_ports(self):
return self.client.getPorts()
def get_active_target_ports(self):
ports = self.get_ports()
target_ports = []
for port in ports['members']:
if (
port['mode'] == self.client.PORT_MODE_TARGET and
port['linkState'] == self.client.PORT_STATE_READY
):
port['nsp'] = self.build_nsp(port['portPos'])
target_ports.append(port)
return target_ports
def get_active_fc_target_ports(self):
ports = self.get_active_target_ports()
fc_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_FC:
fc_ports.append(port)
return fc_ports
def get_active_iscsi_target_ports(self):
ports = self.get_active_target_ports()
iscsi_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_ISCSI:
iscsi_ports.append(port)
return iscsi_ports
def get_volume_stats(self, refresh):
if refresh:
self._update_volume_stats()
return self.stats
def _update_volume_stats(self):
# const to convert MiB to GB
const = 0.0009765625
# storage_protocol and volume_backend_name are
# set in the child classes
stats = {'driver_version': '1.0',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'storage_protocol': None,
'total_capacity_gb': 'unknown',
'QoS_support': True,
'vendor_name': 'Hewlett-Packard',
'volume_backend_name': None}
try:
cpg = self.client.getCPG(self.config.hp3par_cpg)
if 'limitMiB' not in cpg['SDGrowth']:
total_capacity = 'infinite'
free_capacity = 'infinite'
else:
total_capacity = int(cpg['SDGrowth']['limitMiB'] * const)
free_capacity = int((cpg['SDGrowth']['limitMiB'] -
cpg['UsrUsage']['usedMiB']) * const)
stats['total_capacity_gb'] = total_capacity
stats['free_capacity_gb'] = free_capacity
except hpexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array")
% self.config.hp3par_cpg)
LOG.error(err)
raise exception.InvalidInput(reason=err)
self.stats = stats
def create_vlun(self, volume, host):
"""Create a VLUN.
In order to export a volume on a 3PAR box, we have to create a VLUN.
"""
volume_name = self._get_3par_vol_name(volume['id'])
self._create_3par_vlun(volume_name, host['name'])
return self.client.getVLUN(volume_name)
def delete_vlun(self, volume, hostname):
volume_name = self._get_3par_vol_name(volume['id'])
vlun = self.client.getVLUN(volume_name)
self.client.deleteVLUN(volume_name, vlun['lun'], hostname)
try:
self._delete_3par_host(hostname)
except hpexceptions.HTTPConflict as ex:
# host will only be removed after all vluns
# have been removed
if 'has exported VLUN' in ex.get_description():
pass
else:
raise
def _get_volume_type(self, type_id):
ctxt = context.get_admin_context()
return volume_types.get_volume_type(ctxt, type_id)
def _get_key_value(self, hp3par_keys, key, default=None):
if hp3par_keys is not None and key in hp3par_keys:
return hp3par_keys[key]
else:
return default
def _get_qos_value(self, qos, key, default=None):
if key in qos:
return qos[key]
else:
return default
def _get_qos_by_volume_type(self, volume_type):
qos = {}
specs = volume_type.get('extra_specs')
for key, value in specs.iteritems():
if 'qos:' in key:
fields = key.split(':')
key = fields[1]
if key in self.hp_qos_keys:
qos[key] = int(value)
return qos
def _get_keys_by_volume_type(self, volume_type):
hp3par_keys = {}
specs = volume_type.get('extra_specs')
for key, value in specs.iteritems():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.hp3par_valid_keys:
hp3par_keys[key] = value
return hp3par_keys
def _set_qos_rule(self, qos, vvs_name):
max_io = self._get_qos_value(qos, 'maxIOPS')
max_bw = self._get_qos_value(qos, 'maxBWS')
cmd = ['setqos']
if max_io is not None:
cmd.extend(['-io', '%s' % max_io])
if max_bw is not None:
cmd.extend(['-bw', '%sM' % max_bw])
cmd.append('vvset:' + vvs_name)
self._cli_run(cmd)
def _add_volume_to_volume_set(self, volume, volume_name,
cpg, vvs_name, qos):
if vvs_name is not None:
# Admin has set a volume set name to add the volume to
out = self._cli_run(['createvvset', '-add', vvs_name, volume_name])
if out and len(out) == 1:
if 'does not exist' in out[0]:
msg = _('VV Set %s does not exist.') % vvs_name
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
else:
vvs_name = self._get_3par_vvs_name(volume['id'])
domain = self.get_domain(cpg)
if domain is not None:
self._cli_run(['createvvset', '-domain', domain, vvs_name])
else:
self._cli_run(['createvvset', vvs_name])
self._set_qos_rule(qos, vvs_name)
self._cli_run(['createvvset', '-add', vvs_name, volume_name])
def _remove_volume_set(self, vvs_name):
# Must first clear the QoS rules before removing the volume set
self._cli_run(['setqos', '-clear', 'vvset:%s' % (vvs_name)])
self._cli_run(['removevvset', '-f', vvs_name])
def _remove_volume_from_volume_set(self, volume_name, vvs_name):
self._cli_run(['removevvset', '-f', vvs_name, volume_name])
def get_cpg(self, volume, allowSnap=False):
volume_name = self._get_3par_vol_name(volume['id'])
vol = self.client.getVolume(volume_name)
if 'userCPG' in vol:
return vol['userCPG']
elif allowSnap:
return vol['snapCPG']
return None
def _get_3par_vol_comment(self, volume_name):
vol = self.client.getVolume(volume_name)
if 'comment' in vol:
return vol['comment']
return None
def get_persona_type(self, volume, hp3par_keys=None):
default_persona = self.valid_persona_values[0]
type_id = volume.get('volume_type_id', None)
volume_type = None
if type_id is not None:
volume_type = self._get_volume_type(type_id)
if hp3par_keys is None:
hp3par_keys = self._get_keys_by_volume_type(volume_type)
persona_value = self._get_key_value(hp3par_keys, 'persona',
default_persona)
if persona_value not in self.valid_persona_values:
err = _("Must specify a valid persona %(valid)s, "
"value '%(persona)s' is invalid.") % \
({'valid': self.valid_persona_values,
'persona': persona_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
# persona is set by the id so remove the text and return the id
# i.e for persona '1 - Generic' returns 1
persona_id = persona_value.split(' ')
return persona_id[0]
def get_volume_settings_from_type(self, volume):
cpg = None
snap_cpg = None
volume_type = None
vvs_name = None
hp3par_keys = {}
qos = {}
type_id = volume.get('volume_type_id', None)
if type_id is not None:
volume_type = self._get_volume_type(type_id)
hp3par_keys = self._get_keys_by_volume_type(volume_type)
vvs_name = self._get_key_value(hp3par_keys, 'vvs')
if vvs_name is None:
qos = self._get_qos_by_volume_type(volume_type)
cpg = self._get_key_value(hp3par_keys, 'cpg',
self.config.hp3par_cpg)
if cpg is not self.config.hp3par_cpg:
# The cpg was specified in a volume type extra spec so it
# needs to be validiated that it's in the correct domain.
self.validate_cpg(cpg)
# Also, look to see if the snap_cpg was specified in volume
# type extra spec, if not use the extra spec cpg as the
# default.
snap_cpg = self._get_key_value(hp3par_keys, 'snap_cpg', cpg)
else:
# default snap_cpg to hp3par_cpg_snap if it's not specified
# in the volume type extra specs.
snap_cpg = self.config.hp3par_cpg_snap
# if it's still not set or empty then set it to the cpg
# specified in the cinder.conf file.
if not self.config.hp3par_cpg_snap:
snap_cpg = cpg
# if provisioning is not set use thin
default_prov = self.valid_prov_values[0]
prov_value = self._get_key_value(hp3par_keys, 'provisioning',
default_prov)
# check for valid provisioning type
if prov_value not in self.valid_prov_values:
err = _("Must specify a valid provisioning type %(valid)s, "
"value '%(prov)s' is invalid.") % \
({'valid': self.valid_prov_values,
'prov': prov_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
tpvv = True
if prov_value == "full":
tpvv = False
# check for valid persona even if we don't use it until
# attach time, this will give the end user notice that the
# persona type is invalid at volume creation time
self.get_persona_type(volume, hp3par_keys)
return {'cpg': cpg, 'snap_cpg': snap_cpg,
'vvs_name': vvs_name, 'qos': qos,
'tpvv': tpvv, 'volume_type': volume_type}
def create_volume(self, volume):
LOG.debug("CREATE VOLUME (%s : %s %s)" %
(volume['display_name'], volume['name'],
self._get_3par_vol_name(volume['id'])))
try:
comments = {'volume_id': volume['id'],
'name': volume['name'],
'type': 'OpenStack'}
name = volume.get('display_name', None)
if name:
comments['display_name'] = name
# get the options supported by volume types
type_info = self.get_volume_settings_from_type(volume)
volume_type = type_info['volume_type']
vvs_name = type_info['vvs_name']
qos = type_info['qos']
cpg = type_info['cpg']
snap_cpg = type_info['snap_cpg']
tpvv = type_info['tpvv']
type_id = volume.get('volume_type_id', None)
if type_id is not None:
comments['volume_type_name'] = volume_type.get('name')
comments['volume_type_id'] = type_id
if vvs_name is not None:
comments['vvs'] = vvs_name
else:
comments['qos'] = qos
extras = {'comment': json.dumps(comments),
'snapCPG': snap_cpg,
'tpvv': tpvv}
capacity = self._capacity_from_size(volume['size'])
volume_name = self._get_3par_vol_name(volume['id'])
self.client.createVolume(volume_name, cpg, capacity, extras)
if qos or vvs_name is not None:
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg, vvs_name, qos)
except exception.InvalidInput as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(str(ex))
raise exception.CinderException(str(ex))
except hpexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpexceptions.HTTPBadRequest as ex:
LOG.error(str(ex))
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(str(ex))
raise ex
except exception.CinderException as ex:
LOG.error(str(ex))
raise ex
except Exception as ex:
LOG.error(str(ex))
raise exception.CinderException(ex.get_description())
def _copy_volume(self, src_name, dest_name, cpg=None, snap_cpg=None,
tpvv=True):
# Virtual volume sets are not supported with the -online option
cmd = ['createvvcopy', '-p', src_name, '-online']
if snap_cpg:
cmd.extend(['-snp_cpg', snap_cpg])
if tpvv:
cmd.append('-tpvv')
if cpg:
cmd.append(cpg)
cmd.append(dest_name)
LOG.debug('Creating clone of a volume with %s' % cmd)
self._cli_run(cmd)
def get_next_word(self, s, search_string):
"""Return the next word.
Search 's' for 'search_string', if found return the word preceding
'search_string' from 's'.
"""
word = re.search(search_string.strip(' ') + ' ([^ ]*)', s)
return word.groups()[0].strip(' ')
def _get_3par_vol_comment_value(self, vol_comment, key):
comment_dict = dict(ast.literal_eval(vol_comment))
if key in comment_dict:
return comment_dict[key]
return None
def create_cloned_volume(self, volume, src_vref):
try:
orig_name = self._get_3par_vol_name(volume['source_volid'])
vol_name = self._get_3par_vol_name(volume['id'])
type_info = self.get_volume_settings_from_type(volume)
# make the 3PAR copy the contents.
# can't delete the original until the copy is done.
self._copy_volume(orig_name, vol_name, cpg=type_info['cpg'],
snap_cpg=type_info['snap_cpg'],
tpvv=type_info['tpvv'])
return None
except hpexceptions.HTTPForbidden:
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound:
raise exception.NotFound()
except Exception as ex:
LOG.error(str(ex))
raise exception.CinderException(ex)
def _get_vvset_from_3par(self, volume_name):
"""Get Virtual Volume Set from 3PAR.
The only way to do this currently is to try and delete the volume
to get the error message.
NOTE(walter-boring): don't call this unless you know the volume is
already in a vvset!
"""
cmd = ['removevv', '-f', volume_name]
LOG.debug("Issuing remove command to find vvset name %s" % cmd)
out = self._cli_run(cmd)
vvset_name = None
if out and len(out) > 1:
if out[1].startswith("Attempt to delete "):
words = out[1].split(" ")
vvset_name = words[len(words) - 1]
return vvset_name
def delete_volume(self, volume):
try:
volume_name = self._get_3par_vol_name(volume['id'])
# Try and delete the volume, it might fail here because
# the volume is part of a volume set which will have the
# volume set name in the error.
try:
self.client.deleteVolume(volume_name)
except hpexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
# volume is part of a volume set.
vvset_name = self._get_vvset_from_3par(volume_name)
LOG.debug("Returned vvset_name = %s" % vvset_name)
if vvset_name is not None and \
vvset_name.startswith('vvs-'):
# We have a single volume per volume set, so
# remove the volume set.
self._remove_volume_set(
self._get_3par_vvs_name(volume['id']))
elif vvset_name is not None:
# We have a pre-defined volume set just remove the
# volume and leave the volume set.
self._remove_volume_from_volume_set(volume_name,
vvset_name)
self.client.deleteVolume(volume_name)
else:
LOG.error(str(ex))
raise ex
except hpexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
LOG.error(str(ex))
except hpexceptions.HTTPForbidden as ex:
LOG.error(str(ex))
raise exception.NotAuthorized(ex.get_description())
except Exception as ex:
LOG.error(str(ex))
raise exception.CinderException(ex)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
TODO: support using the size from the user.
"""
LOG.debug("Create Volume from Snapshot\n%s\n%s" %
(pprint.pformat(volume['display_name']),
pprint.pformat(snapshot['display_name'])))
if snapshot['volume_size'] != volume['size']:
err = "You cannot change size of the volume. It must "
"be the same as the snapshot."
LOG.error(err)
raise exception.InvalidInput(reason=err)
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
volume_name = self._get_3par_vol_name(volume['id'])
extra = {'volume_id': volume['id'],
'snapshot_id': snapshot['id']}
volume_type = None
type_id = volume.get('volume_type_id', None)
vvs_name = None
qos = {}
hp3par_keys = {}
if type_id is not None:
volume_type = self._get_volume_type(type_id)
hp3par_keys = self._get_keys_by_volume_type(volume_type)
vvs_name = self._get_key_value(hp3par_keys, 'vvs')
if vvs_name is None:
qos = self._get_qos_by_volume_type(volume_type)
name = volume.get('display_name', None)
if name:
extra['display_name'] = name
description = volume.get('display_description', None)
if description:
extra['description'] = description
optional = {'comment': json.dumps(extra),
'readOnly': False}
self.client.createSnapshot(volume_name, snap_name, optional)
if qos or vvs_name is not None:
cpg = self._get_key_value(hp3par_keys, 'cpg',
self.config.hp3par_cpg)
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg, vvs_name, qos)
except Exception as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(str(ex))
raise exception.CinderException(ex.get_description())
except hpexceptions.HTTPForbidden as ex:
LOG.error(str(ex))
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
LOG.error(str(ex))
raise exception.NotFound()
except Exception as ex:
LOG.error(str(ex))
raise exception.CinderException(ex.get_description())
def create_snapshot(self, snapshot):
LOG.debug("Create Snapshot\n%s" % pprint.pformat(snapshot))
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
vol_name = self._get_3par_vol_name(snapshot['volume_id'])
extra = {'volume_name': snapshot['volume_name']}
vol_id = snapshot.get('volume_id', None)
if vol_id:
extra['volume_id'] = vol_id
try:
extra['display_name'] = snapshot['display_name']
except AttributeError:
pass
try:
extra['description'] = snapshot['display_description']
except AttributeError:
pass
optional = {'comment': json.dumps(extra),
'readOnly': True}
if self.config.hp3par_snapshot_expiration:
optional['expirationHours'] = (
self.config.hp3par_snapshot_expiration)
if self.config.hp3par_snapshot_retention:
optional['retentionHours'] = (
self.config.hp3par_snapshot_retention)
self.client.createSnapshot(snap_name, vol_name, optional)
except hpexceptions.HTTPForbidden as ex:
LOG.error(str(ex))
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
LOG.error(str(ex))
raise exception.NotFound()
def delete_snapshot(self, snapshot):
LOG.debug("Delete Snapshot\n%s" % pprint.pformat(snapshot))
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
self.client.deleteVolume(snap_name)
except hpexceptions.HTTPForbidden as ex:
LOG.error(str(ex))
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
LOG.error(str(ex))
raise exception.NotFound()
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
if wwns is not None and not isinstance(wwns, list):
wwns = [wwns]
if iqns is not None and not isinstance(iqns, list):
iqns = [iqns]
out = self.client.getHosts()
hosts = out['members']
for host in hosts:
if 'iSCSIPaths' in host and iqns is not None:
iscsi_paths = host['iSCSIPaths']
for iscsi in iscsi_paths:
for iqn in iqns:
if iqn == iscsi['name']:
return host['name']
if 'FCPaths' in host and wwns is not None:
fc_paths = host['FCPaths']
for fc in fc_paths:
for wwn in wwns:
if wwn == fc['WWN']:
return host['name']
def terminate_connection(self, volume, hostname, wwn=None, iqn=None):
"""Driver entry point to unattach a volume from an instance."""
try:
# does 3par know this host by a different name?
if hostname in self.hosts_naming_dict:
hostname = self.hosts_naming_dict.get(hostname)
self.delete_vlun(volume, hostname)
return
except hpexceptions.HTTPNotFound as e:
if 'host does not exist' in e.get_description():
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
# no 3par host, re-throw
if (hostname is None):
LOG.error(str(e))
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
LOG.error(str(e))
raise
# try again with name retrieved from 3par
self.delete_vlun(volume, hostname)
def parse_create_host_error(self, hostname, out):
search_str = "already used by host "
if search_str in out[1]:
# host exists, return name used by 3par
hostname_3par = self.get_next_word(out[1], search_str)
self.hosts_naming_dict[hostname] = hostname_3par
return hostname_3par
def build_nsp(self, portPos):
return '%s:%s:%s' % (portPos['node'],
portPos['slot'],
portPos['cardPort'])
|
|
from trac.perm import PermissionCache
from trac.test import Mock, EnvironmentStub
from trac.ticket import api, default_workflow, web_ui
from trac.ticket.batch import BatchModifyModule
from trac.ticket.model import Ticket
from trac.util.datefmt import utc
import unittest
class BatchModifyTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True,
enable=[default_workflow.ConfigurableTicketWorkflow,
web_ui.TicketModule,
api.TicketSystem])
self.req = Mock(href=self.env.href, authname='anonymous', tz=utc)
self.req.session = {}
self.req.perm = PermissionCache(self.env)
def assertCommentAdded(self, ticket_id, comment):
ticket = Ticket(self.env, int(ticket_id))
changes = ticket.get_changelog()
comment_change = [c for c in changes if c[2] == 'comment'][0]
self.assertEqual(comment_change[2], comment)
def assertFieldChanged(self, ticket_id, field, new_value):
ticket = Ticket(self.env, int(ticket_id))
changes = ticket.get_changelog()
field_change = [c for c in changes if c[2] == field][0]
self.assertEqual(field_change[4], new_value)
def _change_list_test_helper(self, original, new, new2, mode):
batch = BatchModifyModule(self.env)
return batch._change_list(original, new, new2, mode)
def _add_list_test_helper(self, original, to_add):
return self._change_list_test_helper(original, to_add, '', '+')
def _remove_list_test_helper(self, original, to_remove):
return self._change_list_test_helper(original, to_remove, '', '-')
def _add_remove_list_test_helper(self, original, to_add, to_remove):
return self._change_list_test_helper(original, to_add, to_remove,
'+-')
def _assign_list_test_helper(self, original, new):
return self._change_list_test_helper(original, new, '', '=')
def _insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def test_ignore_summary_reporter_and_description(self):
"""These cannot be added through the UI, but if somebody tries
to build their own POST data they will be ignored."""
batch = BatchModifyModule(self.env)
self.req.args = {}
self.req.args['batchmod_value_summary'] = 'test ticket'
self.req.args['batchmod_value_reporter'] = 'anonymous'
self.req.args['batchmod_value_description'] = 'synergize the widgets'
values = batch._get_new_ticket_values(self.req)
self.assertEqual(len(values), 0)
def test_add_batchmod_value_data_from_request(self):
batch = BatchModifyModule(self.env)
self.req.args = {}
self.req.args['batchmod_value_milestone'] = 'milestone1'
values = batch._get_new_ticket_values(self.req)
self.assertEqual(values['milestone'], 'milestone1')
def test_selected_tickets(self):
self.req.args = { 'selected_tickets' : '1,2,3' }
batch = BatchModifyModule(self.env)
selected_tickets = batch._get_selected_tickets(self.req)
self.assertEqual(selected_tickets, ['1', '2', '3'])
def test_no_selected_tickets(self):
"""If nothing is selected, the return value is the empty list."""
self.req.args = { 'selected_tickets' : '' }
batch = BatchModifyModule(self.env)
selected_tickets = batch._get_selected_tickets(self.req)
self.assertEqual(selected_tickets, [])
# Assign list items
def test_change_list_replace_empty_with_single(self):
"""Replace emtpy field with single item."""
changed = self._assign_list_test_helper('', 'alice')
self.assertEqual(changed, 'alice')
def test_change_list_replace_empty_with_items(self):
"""Replace emtpy field with items."""
changed = self._assign_list_test_helper('', 'alice, bob')
self.assertEqual(changed, 'alice, bob')
def test_change_list_replace_item(self):
"""Replace item with a different item."""
changed = self._assign_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'bob')
def test_change_list_replace_item_with_items(self):
"""Replace item with different items."""
changed = self._assign_list_test_helper('alice', 'bob, carol')
self.assertEqual(changed, 'bob, carol')
def test_change_list_replace_items_with_item(self):
"""Replace items with a different item."""
changed = self._assign_list_test_helper('alice, bob', 'carol')
self.assertEqual(changed, 'carol')
def test_change_list_replace_items(self):
"""Replace items with different items."""
changed = self._assign_list_test_helper('alice, bob', 'carol, dave')
self.assertEqual(changed, 'carol, dave')
def test_change_list_replace_items_partial(self):
"""Replace items with different (or not) items."""
changed = self._assign_list_test_helper('alice, bob', 'bob, dave')
self.assertEqual(changed, 'bob, dave')
def test_change_list_clear(self):
"""Clear field."""
changed = self._assign_list_test_helper('alice bob', '')
self.assertEqual(changed, '')
# Add / remove list items
def test_change_list_add_item(self):
"""Append additional item."""
changed = self._add_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'alice, bob')
def test_change_list_add_items(self):
"""Append additional items."""
changed = self._add_list_test_helper('alice, bob', 'carol, dave')
self.assertEqual(changed, 'alice, bob, carol, dave')
def test_change_list_remove_item(self):
"""Remove existing item."""
changed = self._remove_list_test_helper('alice, bob', 'bob')
self.assertEqual(changed, 'alice')
def test_change_list_remove_items(self):
"""Remove existing items."""
changed = self._remove_list_test_helper('alice, bob, carol',
'alice, carol')
self.assertEqual(changed, 'bob')
def test_change_list_remove_idempotent(self):
"""Ignore missing item to be removed."""
changed = self._remove_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'alice')
def test_change_list_remove_mixed(self):
"""Ignore only missing item to be removed."""
changed = self._remove_list_test_helper('alice, bob', 'bob, carol')
self.assertEqual(changed, 'alice')
def test_change_list_add_remove(self):
"""Remove existing item and append additional item."""
changed = self._add_remove_list_test_helper('alice, bob', 'carol',
'alice')
self.assertEqual(changed, 'bob, carol')
def test_change_list_add_no_duplicates(self):
"""Existing items are not duplicated."""
changed = self._add_list_test_helper('alice, bob', 'bob, carol')
self.assertEqual(changed, 'alice, bob, carol')
def test_change_list_remove_all_duplicates(self):
"""Remove all duplicates."""
changed = self._remove_list_test_helper('alice, bob, alice', 'alice')
self.assertEqual(changed, 'bob')
# Save
def test_save_comment(self):
"""Comments are saved to all selected tickets."""
first_ticket_id = self._insert_ticket('Test 1', reporter='joe')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, 'comment',
'leave')
self.assertCommentAdded(first_ticket_id, 'comment')
self.assertCommentAdded(second_ticket_id, 'comment')
def test_save_values(self):
"""Changed values are saved to all tickets."""
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
component='foo')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
new_values = { 'component' : 'bar' }
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, new_values, '',
'leave')
self.assertFieldChanged(first_ticket_id, 'component', 'bar')
self.assertFieldChanged(second_ticket_id, 'component', 'bar')
def test_action_with_state_change(self):
"""Actions can have change status."""
self.env.config.set('ticket-workflow', 'embiggen', '* -> big')
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
status='small')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, '',
'embiggen')
ticket = Ticket(self.env, int(first_ticket_id))
changes = ticket.get_changelog()
self.assertFieldChanged(first_ticket_id, 'status', 'big')
self.assertFieldChanged(second_ticket_id, 'status', 'big')
def test_action_with_side_effects(self):
"""Actions can have operations with side effects."""
self.env.config.set('ticket-workflow', 'buckify', '* -> *')
self.env.config.set('ticket-workflow', 'buckify.operations',
'set_owner')
self.req.args = {}
self.req.args['action_buckify_reassign_owner'] = 'buck'
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
owner='foo')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, '',
'buckify')
ticket = Ticket(self.env, int(first_ticket_id))
changes = ticket.get_changelog()
self.assertFieldChanged(first_ticket_id, 'owner', 'buck')
self.assertFieldChanged(second_ticket_id, 'owner', 'buck')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BatchModifyTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class LoyaltyLedgerEntry(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'created': 'datetime',
'program_id': 'int',
'customer_profile_id': 'str',
'customer_session_id': 'str',
'event_id': 'int',
'type': 'str',
'amount': 'float',
'start_date': 'datetime',
'expiry_date': 'datetime',
'name': 'str',
'sub_ledger_id': 'str',
'user_id': 'int'
}
attribute_map = {
'created': 'created',
'program_id': 'programID',
'customer_profile_id': 'customerProfileID',
'customer_session_id': 'customerSessionID',
'event_id': 'eventID',
'type': 'type',
'amount': 'amount',
'start_date': 'startDate',
'expiry_date': 'expiryDate',
'name': 'name',
'sub_ledger_id': 'subLedgerID',
'user_id': 'userID'
}
def __init__(self, created=None, program_id=None, customer_profile_id=None, customer_session_id=None, event_id=None, type=None, amount=None, start_date=None, expiry_date=None, name=None, sub_ledger_id=None, user_id=None, local_vars_configuration=None): # noqa: E501
"""LoyaltyLedgerEntry - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._created = None
self._program_id = None
self._customer_profile_id = None
self._customer_session_id = None
self._event_id = None
self._type = None
self._amount = None
self._start_date = None
self._expiry_date = None
self._name = None
self._sub_ledger_id = None
self._user_id = None
self.discriminator = None
self.created = created
self.program_id = program_id
self.customer_profile_id = customer_profile_id
if customer_session_id is not None:
self.customer_session_id = customer_session_id
if event_id is not None:
self.event_id = event_id
self.type = type
self.amount = amount
if start_date is not None:
self.start_date = start_date
if expiry_date is not None:
self.expiry_date = expiry_date
self.name = name
self.sub_ledger_id = sub_ledger_id
if user_id is not None:
self.user_id = user_id
@property
def created(self):
"""Gets the created of this LoyaltyLedgerEntry. # noqa: E501
:return: The created of this LoyaltyLedgerEntry. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this LoyaltyLedgerEntry.
:param created: The created of this LoyaltyLedgerEntry. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def program_id(self):
"""Gets the program_id of this LoyaltyLedgerEntry. # noqa: E501
:return: The program_id of this LoyaltyLedgerEntry. # noqa: E501
:rtype: int
"""
return self._program_id
@program_id.setter
def program_id(self, program_id):
"""Sets the program_id of this LoyaltyLedgerEntry.
:param program_id: The program_id of this LoyaltyLedgerEntry. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and program_id is None: # noqa: E501
raise ValueError("Invalid value for `program_id`, must not be `None`") # noqa: E501
self._program_id = program_id
@property
def customer_profile_id(self):
"""Gets the customer_profile_id of this LoyaltyLedgerEntry. # noqa: E501
:return: The customer_profile_id of this LoyaltyLedgerEntry. # noqa: E501
:rtype: str
"""
return self._customer_profile_id
@customer_profile_id.setter
def customer_profile_id(self, customer_profile_id):
"""Sets the customer_profile_id of this LoyaltyLedgerEntry.
:param customer_profile_id: The customer_profile_id of this LoyaltyLedgerEntry. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and customer_profile_id is None: # noqa: E501
raise ValueError("Invalid value for `customer_profile_id`, must not be `None`") # noqa: E501
self._customer_profile_id = customer_profile_id
@property
def customer_session_id(self):
"""Gets the customer_session_id of this LoyaltyLedgerEntry. # noqa: E501
:return: The customer_session_id of this LoyaltyLedgerEntry. # noqa: E501
:rtype: str
"""
return self._customer_session_id
@customer_session_id.setter
def customer_session_id(self, customer_session_id):
"""Sets the customer_session_id of this LoyaltyLedgerEntry.
:param customer_session_id: The customer_session_id of this LoyaltyLedgerEntry. # noqa: E501
:type: str
"""
self._customer_session_id = customer_session_id
@property
def event_id(self):
"""Gets the event_id of this LoyaltyLedgerEntry. # noqa: E501
:return: The event_id of this LoyaltyLedgerEntry. # noqa: E501
:rtype: int
"""
return self._event_id
@event_id.setter
def event_id(self, event_id):
"""Sets the event_id of this LoyaltyLedgerEntry.
:param event_id: The event_id of this LoyaltyLedgerEntry. # noqa: E501
:type: int
"""
self._event_id = event_id
@property
def type(self):
"""Gets the type of this LoyaltyLedgerEntry. # noqa: E501
The type of the ledger transaction. Possible values are addition, subtraction, expire or expiring (for expiring points ledgers) # noqa: E501
:return: The type of this LoyaltyLedgerEntry. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this LoyaltyLedgerEntry.
The type of the ledger transaction. Possible values are addition, subtraction, expire or expiring (for expiring points ledgers) # noqa: E501
:param type: The type of this LoyaltyLedgerEntry. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def amount(self):
"""Gets the amount of this LoyaltyLedgerEntry. # noqa: E501
:return: The amount of this LoyaltyLedgerEntry. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this LoyaltyLedgerEntry.
:param amount: The amount of this LoyaltyLedgerEntry. # noqa: E501
:type: float
"""
if self.local_vars_configuration.client_side_validation and amount is None: # noqa: E501
raise ValueError("Invalid value for `amount`, must not be `None`") # noqa: E501
self._amount = amount
@property
def start_date(self):
"""Gets the start_date of this LoyaltyLedgerEntry. # noqa: E501
:return: The start_date of this LoyaltyLedgerEntry. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this LoyaltyLedgerEntry.
:param start_date: The start_date of this LoyaltyLedgerEntry. # noqa: E501
:type: datetime
"""
self._start_date = start_date
@property
def expiry_date(self):
"""Gets the expiry_date of this LoyaltyLedgerEntry. # noqa: E501
:return: The expiry_date of this LoyaltyLedgerEntry. # noqa: E501
:rtype: datetime
"""
return self._expiry_date
@expiry_date.setter
def expiry_date(self, expiry_date):
"""Sets the expiry_date of this LoyaltyLedgerEntry.
:param expiry_date: The expiry_date of this LoyaltyLedgerEntry. # noqa: E501
:type: datetime
"""
self._expiry_date = expiry_date
@property
def name(self):
"""Gets the name of this LoyaltyLedgerEntry. # noqa: E501
A name referencing the condition or effect that added this entry, or the specific name provided in an API call. # noqa: E501
:return: The name of this LoyaltyLedgerEntry. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this LoyaltyLedgerEntry.
A name referencing the condition or effect that added this entry, or the specific name provided in an API call. # noqa: E501
:param name: The name of this LoyaltyLedgerEntry. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def sub_ledger_id(self):
"""Gets the sub_ledger_id of this LoyaltyLedgerEntry. # noqa: E501
This specifies if we are adding loyalty points to the main ledger or a subledger # noqa: E501
:return: The sub_ledger_id of this LoyaltyLedgerEntry. # noqa: E501
:rtype: str
"""
return self._sub_ledger_id
@sub_ledger_id.setter
def sub_ledger_id(self, sub_ledger_id):
"""Sets the sub_ledger_id of this LoyaltyLedgerEntry.
This specifies if we are adding loyalty points to the main ledger or a subledger # noqa: E501
:param sub_ledger_id: The sub_ledger_id of this LoyaltyLedgerEntry. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and sub_ledger_id is None: # noqa: E501
raise ValueError("Invalid value for `sub_ledger_id`, must not be `None`") # noqa: E501
self._sub_ledger_id = sub_ledger_id
@property
def user_id(self):
"""Gets the user_id of this LoyaltyLedgerEntry. # noqa: E501
This is the ID of the user who created this entry, if the addition or subtraction was done manually. # noqa: E501
:return: The user_id of this LoyaltyLedgerEntry. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this LoyaltyLedgerEntry.
This is the ID of the user who created this entry, if the addition or subtraction was done manually. # noqa: E501
:param user_id: The user_id of this LoyaltyLedgerEntry. # noqa: E501
:type: int
"""
self._user_id = user_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LoyaltyLedgerEntry):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, LoyaltyLedgerEntry):
return True
return self.to_dict() != other.to_dict()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DCGAN generator and discriminator from https://arxiv.org/abs/1511.06434."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import log
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
def _validate_image_inputs(inputs):
inputs.get_shape().assert_has_rank(4)
inputs.get_shape()[1:3].assert_is_fully_defined()
if inputs.get_shape()[1] != inputs.get_shape()[2]:
raise ValueError('Input tensor does not have equal width and height: ',
inputs.get_shape()[1:3])
width = inputs.get_shape().as_list()[1]
if log(width, 2) != int(log(width, 2)):
raise ValueError('Input tensor `width` is not a power of 2: ', width)
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def discriminator(inputs,
depth=64,
is_training=True,
reuse=None,
scope='Discriminator',
fused_batch_norm=False):
"""Discriminator network for DCGAN.
Construct discriminator network from inputs to the final endpoint.
Args:
inputs: A tensor of size [batch_size, height, width, channels]. Must be
floating point.
depth: Number of channels in first convolution layer.
is_training: Whether the network is for training or not.
reuse: Whether or not the network variables should be reused. `scope`
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: The pre-softmax activations, a tensor of size [batch_size, 1]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If the input image shape is not 4-dimensional, if the spatial
dimensions aren't defined at graph construction time, if the spatial
dimensions aren't square, or if the spatial dimensions aren't a power of
two.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
_validate_image_inputs(inputs)
inp_shape = inputs.get_shape().as_list()[1]
end_points = {}
with tf.compat.v1.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d],
stride=2,
kernel_size=4,
activation_fn=tf.nn.leaky_relu):
net = inputs
for i in xrange(int(log(inp_shape, 2))):
scope = 'conv%i' % (i + 1)
current_depth = depth * 2**i
normalizer_fn_ = None if i == 0 else normalizer_fn
net = slim.conv2d(
net, current_depth, normalizer_fn=normalizer_fn_, scope=scope)
end_points[scope] = net
logits = slim.conv2d(net, 1, kernel_size=1, stride=1, padding='VALID',
normalizer_fn=None, activation_fn=None)
logits = tf.reshape(logits, [-1, 1])
end_points['logits'] = logits
return logits, end_points
# TODO(joelshor): Use fused batch norm by default. Investigate why some GAN
# setups need the gradient of gradient FusedBatchNormGrad.
def generator(inputs,
depth=64,
final_size=32,
num_outputs=3,
is_training=True,
reuse=None,
scope='Generator',
fused_batch_norm=False):
"""Generator network for DCGAN.
Construct generator network from inputs to the final endpoint.
Args:
inputs: A tensor with any size N. [batch_size, N]
depth: Number of channels in last deconvolution layer.
final_size: The shape of the final output.
num_outputs: Number of output features. For images, this is the number of
channels.
is_training: whether is training or not.
reuse: Whether or not the network has its variables should be reused. scope
must be given to be reused.
scope: Optional variable_scope.
fused_batch_norm: If `True`, use a faster, fused implementation of
batch norm.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, 32, 32, channels]
end_points: a dictionary from components of the network to their activation.
Raises:
ValueError: If `inputs` is not 2-dimensional.
ValueError: If `final_size` isn't a power of 2 or is less than 8.
"""
normalizer_fn = slim.batch_norm
normalizer_fn_args = {
'is_training': is_training,
'zero_debias_moving_mean': True,
'fused': fused_batch_norm,
}
inputs.get_shape().assert_has_rank(2)
if log(final_size, 2) != int(log(final_size, 2)):
raise ValueError('`final_size` (%i) must be a power of 2.' % final_size)
if final_size < 8:
raise ValueError('`final_size` (%i) must be greater than 8.' % final_size)
end_points = {}
num_layers = int(log(final_size, 2)) - 1
with tf.compat.v1.variable_scope(
scope, values=[inputs], reuse=reuse) as scope:
with slim.arg_scope([normalizer_fn], **normalizer_fn_args):
with slim.arg_scope([slim.conv2d_transpose],
normalizer_fn=normalizer_fn,
stride=2,
kernel_size=4):
net = tf.expand_dims(tf.expand_dims(inputs, 1), 1)
# First upscaling is different because it takes the input vector.
current_depth = depth * 2 ** (num_layers - 1)
scope = 'deconv1'
net = slim.conv2d_transpose(
net, current_depth, stride=1, padding='VALID', scope=scope)
end_points[scope] = net
for i in xrange(2, num_layers):
scope = 'deconv%i' % (i)
current_depth = depth * 2 ** (num_layers - i)
net = slim.conv2d_transpose(net, current_depth, scope=scope)
end_points[scope] = net
# Last layer has different normalizer and activation.
scope = 'deconv%i' % (num_layers)
net = slim.conv2d_transpose(
net, depth, normalizer_fn=None, activation_fn=None, scope=scope)
end_points[scope] = net
# Convert to proper channels.
scope = 'logits'
logits = slim.conv2d(
net,
num_outputs,
normalizer_fn=None,
activation_fn=None,
kernel_size=1,
stride=1,
padding='VALID',
scope=scope)
end_points[scope] = logits
logits.get_shape().assert_has_rank(4)
logits.get_shape().assert_is_compatible_with(
[None, final_size, final_size, num_outputs])
return logits, end_points
|
|
import urllib
class StringWidget(object):
""" String Widget """
def __init__(self, screen, ref, x, y, text):
self.screen = screen
self.ref = ref
self.x = x
self.y = y
self.text = text
self.screen.server.request("widget_add %s %s %s" % (self.screen.ref, self.ref, "string"))
self.update()
def update(self):
self.screen.server.request('widget_set %s %s %s %s "%s"' % (self.screen.ref, self.ref, self.x, self.y, self.text))
def set_x(self, x):
self.x = x
self.update()
def set_y(self, y):
self.y = y
self.update()
def set_text(self, text):
self.text = text
self.update()
class TitleWidget(object):
""" Title Widget """
def __init__(self, screen, ref, text):
self.screen = screen
self.ref = ref
self.text = text
self.screen.server.request("widget_add %s %s %s" % (self.screen.ref, self.ref, "title"))
self.update()
def update(self):
self.screen.server.request('widget_set %s %s "%s"' % (self.screen.ref, self.ref, self.text))
def set_text(self, text):
self.text = text
self.update()
class HBarWidget(object):
def __init__(self, screen, ref, x, y, length):
self.screen = screen
self.ref = ref
self.x = x
self.y = y
self.length = length
self.screen.server.request("widget_add %s %s %s" % (self.screen.ref, self.ref, "hbar"))
self.update()
def update(self):
self.screen.server.request("widget_set %s %s %s %s %s" % (self.screen.ref, self.ref, self.x, self.y, self.length))
def set_x(self, x):
self.x = x
self.update()
def set_y(self, y):
self.y = y
self.update()
def set_length(self, length):
self.length = length
self.update()
class VBarWidget(object):
def __init__(self, screen, ref, x, y, length):
self.screen = screen
self.ref = ref
self.x = x
self.y = y
self.length = length
self.screen.server.request("widget_add %s %s %s" % (self.screen.ref, self.ref, "vbar"))
self.update()
def update(self):
self.screen.server.request("widget_set %s %s %s %s %s" % (self.screen.ref, self.ref, self.x, self.y, self.length))
def set_x(self, x):
self.x = x
self.update()
def set_y(self, y):
self.y = y
self.update()
def set_length(self, length):
self.length = length
self.update()
class IconWidget(object):
def __init__(self, screen, ref, x, y, name):
self.screen = screen
self.ref = ref
self.x = x
self.y = y
self.name = name
self.screen.server.request("widget_add %s %s %s" % (self.screen.ref, self.ref, "icon"))
self.update()
def update(self):
self.screen.server.request("widget_set %s %s %s %s %s" % (self.screen.ref, self.ref, self.x, self.y, self.name))
def set_x(self, x):
self.x = x
self.update()
def set_y(self, y):
self.y = y
self.update()
def set_name(self, name):
self.name = name
self.update()
class ScrollerWidget(object):
def __init__(self, screen, ref, left, top, right, bottom, direction, speed, text):
self.screen = screen
self.ref = ref
self.left = left
self.top = top
self.right = right
self.bottom = bottom
self.direction = direction
self.speed = speed
self.text = text
self.screen.server.request("widget_add %s %s %s" % (self.screen.ref,
self.ref,
"scroller"))
self.update()
def update(self):
self.screen.server.request('widget_set %s %s %s %s %s %s %s %s "%s"' % (self.screen.ref,
self.ref,
self.left,
self.top,
self.right,
self.bottom,
self.direction,
self.speed,
self.text))
def set_left(self, left):
self.left = left
self.update()
def set_top(self, top):
self.top = top
self.update()
def set_right(self, right):
self.right = right
self.update()
def set_bottom(self, bottom):
self.bottom = bottom
self.update()
def set_direction(self, direction):
self.direction = direction
self.update()
def set_speed(self, speed):
self.speed = speed
self.update()
def set_text(self, text):
self.text = text
self.update()
class FrameWidget(object):
def __init__(self, screen, ref, left, top, right, bottom, width, height, direction, speed):
self.screen = screen
self.ref = ref
self.left = left
self.top = top
self.right = right
self.bottom = bottom
self.width = width
self.height = height
self.direction = direction
self.speed = speed
self.screen.server.request("widget_add %s %s %s" % (self.screen.ref,
self.ref,
"frame"))
self.update()
def update(self):
self.screen.server.request('widget_set %s %s %s %s %s %s %s %s %s %s' % (self.screen.ref,
self.ref,
self.left,
self.top,
self.right,
self.bottom,
self.width,
self.height,
self.direction,
self.speed))
def set_left(self, left):
self.left = left
self.update()
def set_top(self, top):
self.top = top
self.update()
def set_right(self, right):
self.right = right
self.update()
def set_bottom(self, bottom):
self.bottom = bottom
self.update()
def set_width(self, width):
self.width = width
self.update()
def set_height(self, height):
self.height = height
self.update()
def set_direction(self, direction):
self.direction = direction
self.update()
def set_speed(self, speed):
self.speed = speed
self.update()
class NumberWidget(object):
def __init__(self, screen, ref, x, value):
self.screen = screen
self.ref = ref
self.x = x
self.value = value
self.screen.server.request("widget_add %s %s %s" % (self.screen.ref,
self.ref,
"num"))
self.update()
def update(self):
self.screen.server.request('widget_set %s %s %s %s' % (self.screen.ref,
self.ref,
self.x,
self.value))
def set_x(self, x):
self.x = x
self.update()
def set_value(self, value):
self.value = value
self.update()
|
|
'''This task handles the interface between the kernel and user-level servers.
System services can be accessed by doing a system call. System calls are
transformed into request messages, which are handled by this task. By
convention, a sys_call() is transformed in a SYS_CALL request message that
is handled in a function named do_call().
A private call vector is used to map all system calls to the functions that
handle them. The actual handler functions are contained in separate files
to keep this file clean. The call vector is used in the system task's main
loop to handle all incoming requests.
In addition to the main sys_task() entry point, which starts the main loop,
there are several other minor entry points:
get_priv: assign privilege structure to user or system process
set_sendto_bit: allow a process to send messages to a new target
unset_sendto_bit: disallow a process from sending messages to a target
fill_sendto_mask: fill the target mask of a given process
send_sig: send a signal directly to a system process
cause_sig: take action to cause a signal to occur via a signal mgr
sig_delay_done: tell PM that a process is not sending
get_randomness: accumulate randomness in a buffer
clear_endpoint: remove a process' ability to send and receive messages
sched_proc: schedule a process
Changes:
Nov 22, 2009 get_priv supports static priv ids (Cristiano Giuffrida)
Aug 04, 2005 check if system call is allowed (Jorrit N. Herder)
Jul 20, 2005 send signal to services with message (Jorrit N. Herder)
Jan 15, 2005 new, generalized virtual copy function (Jorrit N. Herder)
Oct 10, 2004 dispatch system calls from call vector (Jorrit N. Herder)
Sep 30, 2004 source code documentation updated (Jorrit N. Herder)'''
# TODO check imports
'''Declaration of the call vector that defines the mapping of system calls
to handler functions. The vector is initialized in sys_init() with map(),
which makes sure the system call numbers are ok. No space is allocated,
because the dummy is declared extern. If an illegal call is given, the
array size will be negative and this won't compile.'''
def map_(call_nr, handler):
call_index = call_nr - KERNEL_CALL
assert(call_index >= 0 and call_index < NR_SYS_CALLS)
# TODO check WTF is call_vec
call_vec[call_index] = handler
def kernel_call_finish(caller, msg, result):
if result == VMSUSPEND:
'''Special case: message has to be saved for handling
until VM tells us it's allowed. VM has been notified
and we must wait for its reply to restart the call.'''
assert(RTS_ISSET(caller, RTS_VMREQUEST))
# TODO check caller struct
assert(caller['p_vmrequest']['type'] == VMSTYPE_KERNELCALL)
caller['p_vmrequest']['saved']['reqmsg'] = msg
caller['p_misc_flags'] |= MF_KCALL_RESUME
else:
''' call is finished, we could have been suspended because
of VM, remove the request message'''
caller['p_vmrequest']['saved']['reqmsg']['m_source'] = None
if result != EDONTREPLY:
# Copy the result as a message to the original user buffer
msg['m_source'] = SYSTEM
msg['m_type'] = result
if DEBUG_IPC_HOOK:
hook_ipc_msgkresult(msg, caller)
if copy_msg_to_user(msg, caller['p_delivermsg_vir']):
print('WARNING wrong user pointer {} from process {} /\
{}'.format(caller['p_delivermsg_vir'], caller['p_name'],
caller['p_endpoint']
)
)
cause_sig(proc_nr(caller), SIGSEGV)
def kernel_call_dispatch(caller, msg):
result = OK
if DEBUG_IPC_HOOK:
hook_ipc_msgkresult(msg, caller)
call_nr = msg['m_type'] - KERNEL_CALL
# See if the caller made a valid request and try to handle it
if call_nr < 0 or call_nr >= NR_SYS_CALLS:
result = EBADREQUEST
elif not GET_BIT(priv(caller)['s_k_call_mask'], call_nr):
result = ECALLDENIED
else: # handle the system call
if call_vec[call_nr]:
result = call_vec[call_nr](caller, msg) # TODO check WTF
else:
print("Unused kernel call {} from {}".format(
call_nr, caller['p_endpoint'])
)
if result in [EBADREQUEST, ECALLDENIED]:
print('SYSTEM: illegal request {} from {}'.format(
call_nr, msg['m_source'])
)
return result
def kernel_call(m_user, caller):
''' Check the basic syscall parameters and if accepted
dispatches its handling to the right handler'''
result = OK
msg = {}
# TODO check vir_bytes casting
caller['p_delivermsg_vir'] = m_user
''' the ldt and cr3 of the caller process is loaded because it just've
trapped into the kernel or was already set in switch_to_user() before we
resume execution of an interrupted kernel call'''
if not copy_msg_from_user(m_user, msg):
msg['m_source'] = caller['p_endpoint']
result = kernel_call_dispatch(caller, msg)
else:
print('WARNING wrong user pointer {} from process {} / {}'.format(
m_user, caller['p_name'], caller['p_endpoint'])
)
kbill_kcall = caller
kernel_call_finish(caller, msg, result)
def initialize():
# TODO implement
pass
def get_priv(rc, priv_id):
''' Allocate a new privilege structure for a system process.
Privilege ids can be assigned either statically or dynamically.'''
# TODO check sp loop
if priv_id == NULL_PRIV_ID: # allocate slot dynamically
for sp in range(BEG_DYN_PRIV_ADDR + 1, END_DYN_PRIV_ADDR):
if sp['s_proc_nr'] == None:
break
if sp >= END_DYN_PRIV_ADDR return ENOSPC
else: # allocate slot from id
if not is_static_priv_id(priv_id):
return EINVAL # invalid id
if priv[priv_id].s_proc_nr != None:
return EBUSY # slot in use
sp = priv['priv_id']
rc['p_priv'] = sp # assign new slow
rc['p_priv']['s_proc_nr'] = proc_nr(rc) # set association
return OK
def set_sendto_bit(rp, _id):
''' Allow a process to send messages to the process(es) associated
with the system privilege structure with the given id.'''
''' Disallow the process from sending to a process privilege structure
with no associated process, and disallow the process from sending to
itself.'''
if id_to_nr(_id) == None or priv_id(rp) == _id:
unset_sys_bit(priv(rp)['s_ipc_to'], _id)
return
set_sys_bit(priv(rp)['s_ipc_to'], _id)
''' The process that this process can now send to, must be able to reply
(or vice versa). Therefore, its send mask should be updated as well.
Ignore receivers that don't support traps other than RECEIVE, they can't
reply or send messages anyway.'''
if priv_addr(_id)['s_trap_mask'] & ~(1 << RECEIVE):
set_sys_bit(priv_addr(_id)['s_ipc_to'], priv_id(rp))
def unset_sendto_bit(rp, _id):
''' Prevent a process from sending to another process. Retain the send
mask symmetry by also unsetting the bit for the other direction.'''
unset_sys_bit(priv(rp)['s_ipc_to'], _id)
unset_sys_bit(priv_addr(_id)['s_ipc_to'], priv_id(rp))
def fill_sendto_mask(rp, _map):
for i in range(len(NR_SYS_PROCS)):
if get_sys_bit(_map, i):
set_sendto_bit(rp, i)
else:
unset_sendto_bit(rp, i)
def send_sig(ep, sig_nr):
''' Notify a system process about a signal. This is straightforward. Simply
set the signal that is to be delivered in the pending signals map and
send a notification with source SYSTEM. '''
if not isokendpt(ep, proc_nr) or isemptyn(proc_nr):
return EINVAL
rp = proc_addr(proc_nr)
priv = priv(rp)
if not priv:
return ENOENT
sigaddset(priv['s_sig_pending'], sig_nr)
increase_proc_signals(rp)
mini_notify(proc_addr(SYSTEM), rp['p_endpoint'])
return OK
def cause_sig(proc_nr, sig_nr):
'''A system process wants to send a signal to a process. Examples are:
- HARDWARE wanting to cause a SIGSEGV after a CPU exception
- TTY wanting to cause SIGINT upon getting a DEL
- FS wanting to cause SIGPIPE for a broken pipe
Signals are handled by sending a message to the signal manager assigned to
the process. This function handles the signals and makes sure the signal
manager gets them by sending a notification. The process being signaled
is blocked while the signal manager has not finished all signals for it.
Race conditions between calls to this function and the system calls that
process pending kernel signals cannot exist. Signal related functions are
only called when a user process causes a CPU exception and from the kernel
process level, which runs to completion.'''
# Lookup signal manager
rp = proc_addr(proc_nr)
sig_mgr = priv(rp)['s_sig_mgr']
# TODO check self definition
if sig_mgr == SELF:
sig_mgr = rp['p_endpoint']
# If the target is the signaol manager of itself
# send the signal directly
if rp['p_endpoint'] == sig_nr:
if SIGS_IS_LETHAL(sig_nr):
# If sig is lethal, see if a backup sig manager exists
sig_mgr = priv(rp)['s_bak_sig_mgr']
if sig_mgr != None and isokendpt(sig_mgr, sig_mgr_proc_nr):
priv(rp)['s_sig_mgr'] = sig_mgr
priv(rp)['s_bak_sig_mgr'] = None
sig_mgr_rp = proc_addr(sig_mgr_proc_nr)
RTS_UNSET(sig_mgr_rp, RTS_NO_PRIV)
cause_sig(proc_nr, sig_nr) # try again with new sig mgr
return
# no luck, time to panic
proc_stacktrace(rp)
panic("cause_sig: sig manager {} gets lethal signal {} for itself".format(
rp['p_endpoint'], sig_nr))
sigaddset(priv(rp)['s_sig_pending'], sig_nr)
if send_sig(rp['p_endpoint'], SIGKSIGSM):
panic('send_sig failed')
return
# Check if the signal is already pending. Process it otherwise
if not sigismember(rp['p_pending'], sig_nr):
sigaddset(rp['p_pending'], sig_nr)
increase_proc_signals(rp)
if not RTS_ISSET(rp, RTS_SIGNALED):
RTS_SET(rp, RTS_SIGNALED | RTS_SIG_PENDING)
if send_sig(sig_mgr, SIGKSIG) != OK:
panic('send_sig failed')
def sig_delay_done(rp):
'''A process is now known not to send any direct messages.
Tell PM that the stop delay has ended, by sending a signal to the
process. Used for actual signal delivery.'''
rp['p_misc_flags'] &= ~MF_SIG_DELAY
cause_sig(proc_nr(rp), SIGSNDELAY)
def _clear_ipc(rc):
# TODO implement
pass
def clear_endpoint(rc):
if isemptyp(rc):
panic('clear_proc: empty process {}'.format(rc['p_endpoint']))
if DEBUG_IPC_HOOK:
hook_ipc_clear(rc)
# Make sure that the exiting proc is no longer scheduled
RTS_SET(rc, RTS_NO_ENDPOINT)
if priv(rc)['s_flags'] & SYS_PROC:
priv(rc)['s_asynsize'] = 0
# If the process happens to be queued trying to send a
# message, then it must be removed from the message queues.
_clear_ipc(rc)
# Likewise, if another process was sending or receive a message to or from
# the exiting process, it must be alerted that process no longer is alive.
# Check all process
clear_ipc_refs(rc, EDEADSRCDST)
def clear_ipc_refs(rc, caller_ret):
# Clear IPC references for a given process slot
# Tell processes that sent asynchronous messages to 'rc'
# they are not going to be delivered
src_id = has_pending_asend(rc, ANY)
while src_id != NULL_PRIV_ID:
cancel_async(proc_addr(id_to_nr), rc)
src_id = has_pending_asend(rc, ANY)
# TODO check this
for rp in (BEG_PROC_ADDR, END_PROC_ADDR):
if (isemptyp(rp)):
continue
# Unset pending notification bits
unset_sys_bit(priv(rp)['s_notify_pending'], priv(rc)['s_id'])
# Unset pending asynchronous messages
unset_sys_bit(priv(rp)['s_asyn_pending'], priv(rc)['s_id'])
# Check if process depends on given process.
if P_BLOCKEDON(rp) == rc['p_endpoint']:
rp['p_reg']['retreg'] = caller_ret
clear_ipc(rp)
def kernel_call_resume(caller):
assert(not RTS_ISSET(caller, RTS_SLOT_FREE))
assert(not RTS_ISSET(caller, RTS_VMREQUEST))
asset(caller['p_vmrequest']['saved']['reqmsg']
['m_source'] == caller['p_endpoint'])
# re-execute the kernel call, with MF_KCALL_RESUME still set so
# the call knows this is a retry.
result = kernel_call_dispatch(caller, caller['p_vmrequest']['saved']['reqmsg'])
# we are resuming the kernel call so we have to remove this flag so it
# can be set again
caller['p_misc_flags'] &= ~MF_KCALL_RESUME
kernel_call_finish(caller, caller['p_vmrequest']['saved']['reqmsg'], result)
def sched(p, priority, quantum, cpu):
# Make sure the values given are within the allowed range.*/
if priority > NR_SCHED_QUEUES or (priority < TASK_Q and priority != -1):
return EINVAL
if quantum < 1 and quantum != -1:
return EINVAL
# TODO implement smp
'''if CONFIG_SMP:
if (cpu < 0 and cpu != -1) or (cpu > 0 and cpu >= ncpus)
return EINVAL
if cpu != -1 and not cpu_is_ready(cpu):
return EBADCPU
'''
'''In some cases, we might be rescheduling a runnable process. In such
a case (i.e. if we are updating the priority) we set the NO_QUANTUM
flag before the generic unset to dequeue/enqueue the process'''
# FIXME this preempts the process, do we really want to do that
# FIXME this is a problem for SMP if the processes currently runs on a
# different CPU
if proc_is_runnable(p):
pass
# TODO implement SMP
'''if CONFIG_SMP:
if p->p_cpu != cpuid and cpu != -1 and cpu != p->p_cpu:
smp_schedule_migrate_proc(p, cpu)'''
RTS_SET(p, RTS_NO_QUANTUM)
# TODO check, pro cis runnable again ?
if proc_is_runnable(p):
RTS_SET(p, RTS_NO_QUANTUM)
if priority != -1:
p['p_priority'] = priority
if quantum != -1:
p['p_quantum_size_ms'] = quantum
p['p_cpu_time_left'] = ms_2_cpu_time(quantum)
# TODO implement SMP
'''if CONFIG_SMP:
if cpu != -1:
p['p_cpu'] = cpu
'''
# Clear the scheduling bit and enqueue the process
RTS_UNSET(p, RTS_NO_QUANTUM)
return OK
}
|
|
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import openstackdocstheme
import os
import pbr.version
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../playbooks/inventory/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# TODO(ajaeger): enable PDF building, for example add 'rst2pdf.pdfbuilder'
extensions = ['sphinx.ext.autodoc','sphinxmark']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2016, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
target_name = 'openstack-ansible'
title = 'OpenStack-Ansible Deployment Guide'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version_info = pbr.version.VersionInfo(target_name)
# The full version, including alpha/beta/rc tags.
release = version_info.version_string_with_vcs()
# The short X.Y version.
version = version_info.canonical_version_string()
# A few variables have to be set for the log-a-bug feature.
# giturl: The location of conf.py on Git. Must be set manually.
# gitsha: The SHA checksum of the bug description. Automatically extracted from git log.
# bug_tag: Tag for categorizing the bug. Must be set manually.
# These variables are passed to the logabug code via html_context.
giturl = ("http://git.openstack.org/cgit/openstack/{0}"
"/tree/deploy-guide/source").format(target_name)
git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '"
gitsha = os.popen(git_cmd).read().strip('\n')
bug_title = "Documentation bug"
html_context = {"gitsha": gitsha, "giturl": giturl,
"bug_tag": "docs", "bug_title": bug_title,
"bug_project": target_name}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# If true, publish source files
html_copy_source = False
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, target_name + '.tex',
title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, project,
description, category),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
(master_doc, target_name,
title, author)
]
latest_tag = os.popen('git describe --abbrev=0 --tags').read().strip('\n')
previous_release_branch_name='newton'
current_release_branch_name='ocata'
previous_release_capital_name=previous_release_branch_name.upper()
previous_release_formal_name=previous_release_branch_name.capitalize()
current_release_capital_name=current_release_branch_name.upper()
current_release_formal_name=current_release_branch_name.capitalize()
upgrade_backup_dir="``/etc/openstack_deploy."+previous_release_capital_name+"``"
rst_epilog = """
.. |previous_release_branch_name| replace:: %s
.. |current_release_branch_name| replace:: %s
.. |previous_release_capital_name| replace:: %s
.. |previous_release_formal_name| replace:: %s
.. |current_release_capital_name| replace:: %s
.. |current_release_formal_name| replace:: %s
.. |upgrade_backup_dir| replace:: %s
.. |latest_tag| replace:: %s
""" % (previous_release_branch_name,
current_release_branch_name,
previous_release_capital_name,
previous_release_formal_name,
current_release_capital_name,
current_release_formal_name,
upgrade_backup_dir,
latest_tag)
watermark = os.popen("git branch --contains $(git rev-parse HEAD) | awk -F/ '/stable/ {print $2}'").read().strip(' \n\t').capitalize()
if watermark == "":
watermark = "Pre-release"
# -- Options for sphinxmark -----------------------------------------------
sphinxmark_enable = True
sphinxmark_div = 'docs-body'
sphinxmark_image = 'text'
sphinxmark_text = watermark
sphinxmark_text_color = (128, 128, 128)
sphinxmark_text_size = 70
|
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from pathlib import PurePath
from textwrap import dedent
from typing import List, Mapping, Optional
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.dependency_inference import rules as dependency_inference_rules
from pants.backend.python.goals import package_pex_binary, pytest_runner
from pants.backend.python.goals.coverage_py import create_coverage_config
from pants.backend.python.goals.pytest_runner import PythonTestFieldSet
from pants.backend.python.target_types import (
PexBinary,
PythonLibrary,
PythonRequirementLibrary,
PythonTests,
)
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.test import TestDebugRequest, TestResult, get_filtered_environment
from pants.core.util_rules import distdir
from pants.engine.addresses import Address
from pants.engine.fs import DigestContents, FileContent
from pants.engine.process import InteractiveRunner
from pants.testutil.python_interpreter_selection import skip_unless_python27_and_python3_present
from pants.testutil.rule_runner import QueryRule, RuleRunner, mock_console
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
create_coverage_config,
*pytest_runner.rules(),
*pex_from_targets.rules(),
*dependency_inference_rules.rules(), # For conftest detection.
*distdir.rules(),
*package_pex_binary.rules(),
get_filtered_environment,
*target_types_rules.rules(),
QueryRule(TestResult, (PythonTestFieldSet,)),
QueryRule(TestDebugRequest, (PythonTestFieldSet,)),
],
target_types=[PexBinary, PythonLibrary, PythonTests, PythonRequirementLibrary],
)
SOURCE_ROOT = "tests/python"
PACKAGE = os.path.join(SOURCE_ROOT, "pants_test")
GOOD_SOURCE = FileContent(f"{PACKAGE}/test_good.py", b"def test():\n pass\n")
GOOD_WITH_PRINT = FileContent(f"{PACKAGE}/test_good.py", b"def test():\n print('All good!')")
BAD_SOURCE = FileContent(f"{PACKAGE}/test_bad.py", b"def test():\n assert False\n")
PY3_ONLY_SOURCE = FileContent(f"{PACKAGE}/test_py3.py", b"def test() -> None:\n pass\n")
LIBRARY_SOURCE = FileContent(f"{PACKAGE}/library.py", b"def add_two(x):\n return x + 2\n")
BINARY_SOURCE = FileContent(f"{PACKAGE}/say_hello.py", b"print('Hello, test!')")
def create_python_library(
rule_runner: RuleRunner,
source_files: List[FileContent],
*,
name: str = "library",
dependencies: Optional[List[str]] = None,
) -> None:
for source_file in source_files:
rule_runner.create_file(source_file.path, source_file.content.decode())
source_globs = [PurePath(source_file.path).name for source_file in source_files]
rule_runner.add_to_build_file(
PACKAGE,
dedent(
f"""\
python_library(
name={repr(name)},
sources={source_globs},
dependencies={[*(dependencies or ())]},
)
"""
),
)
rule_runner.create_file(os.path.join(PACKAGE, "__init__.py"))
def create_test_target(
rule_runner: RuleRunner,
source_files: List[FileContent],
*,
name: str = "tests",
dependencies: Optional[List[str]] = None,
interpreter_constraints: Optional[str] = None,
) -> PythonTests:
for source_file in source_files:
rule_runner.create_file(source_file.path, source_file.content.decode())
rule_runner.add_to_build_file(
relpath=PACKAGE,
target=dedent(
f"""\
python_tests(
name={repr(name)},
dependencies={dependencies or []},
interpreter_constraints={[interpreter_constraints] if interpreter_constraints else []},
)
"""
),
)
tgt = rule_runner.get_target(Address(PACKAGE, target_name=name))
assert isinstance(tgt, PythonTests)
return tgt
def create_pex_binary_target(rule_runner: RuleRunner, source_file: FileContent) -> None:
rule_runner.create_file(source_file.path, source_file.content.decode())
file_name = PurePath(source_file.path).name
rule_runner.add_to_build_file(
relpath=PACKAGE,
target=dedent(
f"""\
python_library(name='bin_lib', sources=['{file_name}'])
pex_binary(name='bin', entry_point='{file_name}', output_path="bin.pex")
"""
),
)
def setup_thirdparty_dep(rule_runner: RuleRunner) -> None:
rule_runner.add_to_build_file(
relpath="3rdparty/python",
target=(
"python_requirement_library(name='ordered-set', requirements=['ordered-set==3.1.1'])"
),
)
def run_pytest(
rule_runner: RuleRunner,
test_target: PythonTests,
*,
passthrough_args: Optional[str] = None,
junit_xml_dir: Optional[str] = None,
use_coverage: bool = False,
execution_slot_var: Optional[str] = None,
extra_env_vars: Optional[str] = None,
env: Optional[Mapping[str, str]] = None,
config: Optional[str] = None,
force: bool = False,
) -> TestResult:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns={SOURCE_ROOT}",
# pin to lower versions so that we can run Python 2 tests
"--pytest-version=pytest>=4.6.6,<4.7",
"--pytest-pytest-plugins=['zipp==1.0.0', 'pytest-cov>=2.8.1,<2.9']",
]
if passthrough_args:
args.append(f"--pytest-args='{passthrough_args}'")
if extra_env_vars:
args.append(f"--test-extra-env-vars={extra_env_vars}")
if junit_xml_dir:
args.append(f"--pytest-junit-xml-dir={junit_xml_dir}")
if use_coverage:
args.append("--test-use-coverage")
if execution_slot_var:
args.append(f"--pytest-execution-slot-var={execution_slot_var}")
if config:
rule_runner.create_file(relpath="pytest.ini", contents=config)
args.append("--pytest-config=pytest.ini")
if force:
args.append("--test-force")
rule_runner.set_options(args, env=env, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
inputs = [PythonTestFieldSet.create(test_target)]
test_result = rule_runner.request(TestResult, inputs)
debug_request = rule_runner.request(TestDebugRequest, inputs)
if debug_request.process is not None:
with mock_console(rule_runner.options_bootstrapper):
debug_result = InteractiveRunner(rule_runner.scheduler).run(debug_request.process)
assert test_result.exit_code == debug_result.exit_code
return test_result
def test_single_passing_test(rule_runner: RuleRunner) -> None:
tgt = create_test_target(rule_runner, [GOOD_SOURCE])
result = run_pytest(rule_runner, tgt)
assert result.exit_code == 0
assert f"{PACKAGE}/test_good.py ." in result.stdout
def test_force(rule_runner: RuleRunner) -> None:
tgt = create_test_target(rule_runner, [GOOD_SOURCE])
# Should not receive a memoized result if force=True.
result_one = run_pytest(rule_runner, tgt, force=True)
result_two = run_pytest(rule_runner, tgt, force=True)
assert result_one.exit_code == 0
assert result_two.exit_code == 0
assert result_one is not result_two
# But should if force=False.
result_one = run_pytest(rule_runner, tgt, force=False)
result_two = run_pytest(rule_runner, tgt, force=False)
assert result_one.exit_code == 0
assert result_one is result_two
def test_single_failing_test(rule_runner: RuleRunner) -> None:
tgt = create_test_target(rule_runner, [BAD_SOURCE])
result = run_pytest(rule_runner, tgt)
assert result.exit_code == 1
assert f"{PACKAGE}/test_bad.py F" in result.stdout
def test_mixed_sources(rule_runner: RuleRunner) -> None:
tgt = create_test_target(rule_runner, [GOOD_SOURCE, BAD_SOURCE])
result = run_pytest(rule_runner, tgt)
assert result.exit_code == 1
assert f"{PACKAGE}/test_good.py ." in result.stdout
assert f"{PACKAGE}/test_bad.py F" in result.stdout
def test_absolute_import(rule_runner: RuleRunner) -> None:
create_python_library(rule_runner, [LIBRARY_SOURCE])
source = FileContent(
path=f"{PACKAGE}/test_absolute_import.py",
content=dedent(
"""\
from pants_test.library import add_two
def test():
assert add_two(2) == 4
"""
).encode(),
)
tgt = create_test_target(rule_runner, [source], dependencies=[":library"])
result = run_pytest(rule_runner, tgt)
assert result.exit_code == 0
assert f"{PACKAGE}/test_absolute_import.py ." in result.stdout
def test_relative_import(rule_runner: RuleRunner) -> None:
create_python_library(rule_runner, [LIBRARY_SOURCE])
source = FileContent(
path=f"{PACKAGE}/test_relative_import.py",
content=dedent(
"""\
from .library import add_two
def test():
assert add_two(2) == 4
"""
).encode(),
)
tgt = create_test_target(rule_runner, [source], dependencies=[":library"])
result = run_pytest(rule_runner, tgt)
assert result.exit_code == 0
assert f"{PACKAGE}/test_relative_import.py ." in result.stdout
def test_respects_config(rule_runner: RuleRunner) -> None:
target = create_test_target(rule_runner, [GOOD_WITH_PRINT])
result = run_pytest(rule_runner, target, config="[pytest]\naddopts = -s\n")
assert result.exit_code == 0
assert "All good!" in result.stdout and "Captured" not in result.stdout
def test_transitive_dep(rule_runner: RuleRunner) -> None:
create_python_library(rule_runner, [LIBRARY_SOURCE])
transitive_dep_fc = FileContent(
path=f"{PACKAGE}/transitive_dep.py",
content=dedent(
"""\
from pants_test.library import add_two
def add_four(x):
return add_two(x) + 2
"""
).encode(),
)
create_python_library(
rule_runner, [transitive_dep_fc], name="transitive_dep", dependencies=[":library"]
)
source = FileContent(
path=f"{PACKAGE}/test_transitive_dep.py",
content=dedent(
"""\
from pants_test.transitive_dep import add_four
def test():
assert add_four(2) == 6
"""
).encode(),
)
tgt = create_test_target(rule_runner, [source], dependencies=[":transitive_dep"])
result = run_pytest(rule_runner, tgt)
assert result.exit_code == 0
assert f"{PACKAGE}/test_transitive_dep.py ." in result.stdout
def test_thirdparty_dep(rule_runner: RuleRunner) -> None:
setup_thirdparty_dep(rule_runner)
source = FileContent(
path=f"{PACKAGE}/test_3rdparty_dep.py",
content=dedent(
"""\
from ordered_set import OrderedSet
def test():
assert OrderedSet((1, 2)) == OrderedSet([1, 2])
"""
).encode(),
)
tgt = create_test_target(rule_runner, [source], dependencies=["3rdparty/python:ordered-set"])
result = run_pytest(rule_runner, tgt)
assert result.exit_code == 0
assert f"{PACKAGE}/test_3rdparty_dep.py ." in result.stdout
def test_thirdparty_transitive_dep(rule_runner: RuleRunner) -> None:
setup_thirdparty_dep(rule_runner)
library_fc = FileContent(
path=f"{PACKAGE}/library.py",
content=dedent(
"""\
import string
from ordered_set import OrderedSet
alphabet = OrderedSet(string.ascii_lowercase)
"""
).encode(),
)
create_python_library(
rule_runner,
[library_fc],
dependencies=["3rdparty/python:ordered-set"],
)
source = FileContent(
path=f"{PACKAGE}/test_3rdparty_transitive_dep.py",
content=dedent(
"""\
from pants_test.library import alphabet
def test():
assert 'a' in alphabet and 'z' in alphabet
"""
).encode(),
)
tgt = create_test_target(rule_runner, [source], dependencies=[":library"])
result = run_pytest(rule_runner, tgt)
assert result.exit_code == 0
assert f"{PACKAGE}/test_3rdparty_transitive_dep.py ." in result.stdout
@skip_unless_python27_and_python3_present
def test_uses_correct_python_version(rule_runner: RuleRunner) -> None:
tgt = create_test_target(
rule_runner, [PY3_ONLY_SOURCE], name="py2", interpreter_constraints="CPython==2.7.*"
)
py2_result = run_pytest(rule_runner, tgt)
assert py2_result.exit_code == 2
assert "SyntaxError: invalid syntax" in py2_result.stdout
tgt = create_test_target(
rule_runner, [PY3_ONLY_SOURCE], name="py3", interpreter_constraints="CPython>=3.6"
)
py3_result = run_pytest(rule_runner, tgt)
assert py3_result.exit_code == 0
assert f"{PACKAGE}/test_py3.py ." in py3_result.stdout
def test_respects_passthrough_args(rule_runner: RuleRunner) -> None:
source = FileContent(
path=f"{PACKAGE}/test_config.py",
content=dedent(
"""\
def test_run_me():
pass
def test_ignore_me():
pass
"""
).encode(),
)
tgt = create_test_target(rule_runner, [source])
result = run_pytest(rule_runner, tgt, passthrough_args="-k test_run_me")
assert result.exit_code == 0
assert f"{PACKAGE}/test_config.py ." in result.stdout
assert "collected 2 items / 1 deselected / 1 selected" in result.stdout
def test_junit(rule_runner: RuleRunner) -> None:
tgt = create_test_target(rule_runner, [GOOD_SOURCE])
result = run_pytest(rule_runner, tgt, junit_xml_dir="dist/test-results")
assert result.exit_code == 0
assert f"{PACKAGE}/test_good.py ." in result.stdout
assert result.xml_results is not None
digest_contents = rule_runner.request(DigestContents, [result.xml_results.digest])
file = digest_contents[0]
assert file.path.startswith("dist/test-results")
assert b"pants_test.test_good" in file.content
def test_coverage(rule_runner: RuleRunner) -> None:
tgt = create_test_target(rule_runner, [GOOD_SOURCE])
result = run_pytest(rule_runner, tgt, use_coverage=True)
assert result.exit_code == 0
assert f"{PACKAGE}/test_good.py ." in result.stdout
assert result.coverage_data is not None
def test_conftest_handling(rule_runner: RuleRunner) -> None:
"""Tests that we a) inject a dependency on conftest.py and b) skip running directly on
conftest.py."""
tgt = create_test_target(rule_runner, [GOOD_SOURCE])
rule_runner.create_file(
f"{SOURCE_ROOT}/conftest.py", "def pytest_runtest_setup(item):\n print('In conftest!')\n"
)
rule_runner.add_to_build_file(SOURCE_ROOT, "python_tests()")
conftest_tgt = rule_runner.get_target(Address(SOURCE_ROOT, relative_file_path="conftest.py"))
assert isinstance(conftest_tgt, PythonTests)
result = run_pytest(rule_runner, tgt, passthrough_args="-s")
assert result.exit_code == 0
assert f"{PACKAGE}/test_good.py In conftest!\n." in result.stdout
result = run_pytest(rule_runner, conftest_tgt)
assert result.exit_code is None
def test_execution_slot_variable(rule_runner: RuleRunner) -> None:
source = FileContent(
path=f"{PACKAGE}/test_concurrency_slot.py",
content=dedent(
"""\
import os
def test_fail_printing_slot_env_var():
slot = os.getenv("SLOT")
print(f"Value of slot is {slot}")
# Deliberately fail the test so the SLOT output gets printed to stdout
assert 1 == 2
"""
).encode(),
)
tgt = create_test_target(rule_runner, [source])
result = run_pytest(rule_runner, tgt, execution_slot_var="SLOT")
assert result.exit_code == 1
assert re.search(r"Value of slot is \d+", result.stdout)
def test_extra_env_vars(rule_runner: RuleRunner) -> None:
source = FileContent(
path=f"{PACKAGE}/test_extra_env_vars.py",
content=dedent(
"""\
import os
def test_args():
assert os.getenv("SOME_VAR") == "some_value"
assert os.getenv("OTHER_VAR") == "other_value"
"""
).encode(),
)
tgt = create_test_target(rule_runner, [source])
result = run_pytest(
rule_runner,
tgt,
extra_env_vars='["SOME_VAR=some_value", "OTHER_VAR"]',
env={"OTHER_VAR": "other_value"},
)
assert result.exit_code == 0
def test_runtime_package_dependency(rule_runner: RuleRunner) -> None:
create_pex_binary_target(rule_runner, BINARY_SOURCE)
rule_runner.create_file(
f"{PACKAGE}/test_binary_call.py",
dedent(
f"""\
import os.path
import subprocess
def test_embedded_binary():
assert b"Hello, test!" in subprocess.check_output(args=['./bin.pex'])
# Ensure that we didn't accidentally pull in the binary's sources. This is a
# special type of dependency that should not be included with the rest of the
# normal dependencies.
assert os.path.exists("{BINARY_SOURCE.path}") is False
"""
),
)
rule_runner.add_to_build_file(PACKAGE, "python_tests(runtime_package_dependencies=[':bin'])")
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="test_binary_call.py"))
assert isinstance(tgt, PythonTests)
result = run_pytest(rule_runner, tgt, passthrough_args="-s")
assert result.exit_code == 0
def test_skip_type_stubs(rule_runner: RuleRunner) -> None:
rule_runner.create_file(f"{PACKAGE}/test_foo.pyi", "def test_foo() -> None:\n ...\n")
rule_runner.add_to_build_file(PACKAGE, "python_tests()")
tgt = rule_runner.get_target(Address(PACKAGE, relative_file_path="test_foo.pyi"))
assert isinstance(tgt, PythonTests)
result = run_pytest(rule_runner, tgt)
assert result.exit_code is None
|
|
"""
Created on Sep 24, 2013
@author: casale
"""
import scipy as SP
import scipy.linalg as LA
import pdb
import sys
sys.path.append('./../../..')
import mtSet.pycore.external.limix.plink_reader as plink_reader
def genBinormal(dim1,dim2,percSign=0.5,std=1e-1):
rv = (2*(SP.rand(dim1,dim2)>percSign)-1)+std*SP.randn(dim1,dim2)
return rv
def selectRnd(n_sel,n_all):
rv = SP.zeros(n_all)
rv[:n_sel] = 1
SP.random.shuffle(rv)
rv = rv==1
return rv
class CSimulator:
"""
this class takes care of phenotype generation in a flexible way
"""
def __init__(self,bfile,XX=None,P=1):
"""
X: genotype matrix
traitNum: number of traits to be considered
"""
self.bfile = bfile
self.N = XX.shape[0]
self.P = P
self.XX = XX
pass
def getRegion(self,size=3e4,min_nSNPs=1,chrom_i=None,pos_min=None,pos_max=None):
"""
Sample a region from the piece of genotype X, chrom, pos
minSNPnum: minimum number of SNPs contained in the region
Ichrom: restrict X to chromosome Ichrom before taking the region
cis: bool vector that marks the sorted region
region: vector that contains chrom and init and final position of the region
"""
bim = plink_reader.readBIM(self.bfile,usecols=(0,1,2,3))
chrom = SP.array(bim[:,0],dtype=int)
pos = SP.array(bim[:,3],dtype=int)
if chrom_i is None:
n_chroms = chrom.max()
chrom_i = int(SP.ceil(SP.rand()*n_chroms))
pos = pos[chrom==chrom_i]
chrom = chrom[chrom==chrom_i]
ipos = SP.ones(len(pos),dtype=bool)
if pos_min is not None:
ipos = SP.logical_and(ipos,pos_min<pos)
if pos_max is not None:
ipos = SP.logical_and(ipos,pos<pos_max)
pos = pos[ipos]
chrom = chrom[ipos]
if size==1:
# select single SNP
idx = int(SP.ceil(pos.shape[0]*SP.rand()))
cis = SP.arange(pos.shape[0])==idx
region = SP.array([chrom_i,pos[idx],pos[idx]])
else:
while 1:
idx = int(SP.floor(pos.shape[0]*SP.rand()))
posT1 = pos[idx]
posT2 = pos[idx]+size
if posT2<=pos.max():
cis = chrom==chrom_i
cis*= (pos>posT1)*(pos<posT2)
if cis.sum()>min_nSNPs: break
region = SP.array([chrom_i,posT1,posT2])
start = SP.nonzero(cis)[0].min()
nSNPs = cis.sum()
rv = plink_reader.readBED(self.bfile,useMAFencoding=True,start = start, nSNPs = nSNPs,bim=bim)
Xr = rv['snps']
return Xr, region
def genRegionTerm(self,X,vTot=0.1,pCausal=0.10,nCausal=None,pCommon=1.,nCommon=None,plot=False,distribution='biNormal'):
"""
Generate population structure term
Population structure is simulated by background SNPs
beta_pdf: pdf used to generate the regression weights
for now either Normal or fixed
variance: variance of the term
percCausal: percentage of causal SNPs
Xcausal: set of SNPs being causal
"""
S = X.shape[1]
# number of causal, common, specific
if nCausal==None:
nCausal=int(SP.floor(pCausal*S))
if nCommon==None:
nCommon = round(pCommon*nCausal)
nSpecific = self.P*(nCausal-nCommon)
# common SNPs
if nCommon>0:
if distribution=='biNormal':
Bc = SP.kron(genBinormal(nCommon,1),genBinormal(1,self.P))
elif distribution=='normal':
Bc = SP.kron(SP.randn(nCommon,1),SP.randn(1,self.P))
Ic = selectRnd(nCommon,S)
Yc = SP.dot(X[:,Ic],Bc)
Yc *= SP.sqrt(nCommon/Yc.var(0).mean())
else:
Yc = SP.zeros((self.N,self.P))
# indipendent signal
if nSpecific>0:
Is = selectRnd(nSpecific,S*self.P).reshape(S,self.P)
if distribution=='biNormal':
Bi = Is*genBinormal(S,self.P)
elif distribution=='normal':
Bi = Is*SP.randn(S,self.P)
Yi = SP.dot(X,Bi)
Yi *= SP.sqrt(nSpecific/(Yi.var(0).mean()*self.P))
else:
Yi = SP.zeros((self.N,self.P))
Y = Yc+Yi
Yc *= SP.sqrt(vTot/Y.var(0).mean())
Yi *= SP.sqrt(vTot/Y.var(0).mean())
if plot:
import pylab as PL
PL.ion()
for p in range(self.P):
PL.subplot(self.P,1,p+1)
PL.plot(SP.arange(S)[Ic],Bc[:,p],'o',color='y')
_Is = Is[:,p]
if _Is.sum()>0:
PL.plot(SP.arange(S)[_Is],Bi[_Is,p],'o',color='r')
#PL.ylim(-2,2)
PL.plot([0,S],[0,0],'k')
return Yc, Yi
def _genBgTerm_fromSNPs(self,vTot=0.5,vCommon=0.1,pCausal=0.5,plot=False):
""" generate """
print 'Reading in all SNPs. This is slow.'
rv = plink_reader.readBED(self.bfile,useMAFencoding=True)
X = rv['snps']
S = X.shape[1]
vSpecific = vTot-vCommon
# select causal SNPs
nCausal = int(SP.floor(pCausal*S))
Ic = selectRnd(nCausal,S)
X = X[:,Ic]
# common effect
Bc = SP.kron(SP.randn(nCausal,1),SP.randn(1,self.P))
Yc = SP.dot(X,Bc)
Yc *= SP.sqrt(vCommon/Yc.var(0).mean())
# indipendent effect
Bi = SP.randn(nCausal,self.P)
Yi = SP.dot(X,Bi)
Yi *= SP.sqrt(vSpecific/Yi.var(0).mean())
if plot:
import pylab as PL
PL.ion()
for p in range(self.P):
PL.subplot(self.P,1,p+1)
PL.plot(SP.arange(self.X.shape[1])[Ic],Bc[:,p],'o',color='y',alpha=0.05)
PL.plot(SP.arange(self.X.shape[1])[Ic],Bi[:,p],'o',color='r',alpha=0.05)
#PL.ylim(-2,2)
PL.plot([0,Ic.shape[0]],[0,0],'k')
return Yc, Yi
def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None):
"""
generate background term from SNPs
Args:
vTot: variance of Yc+Yi
vCommon: variance of Yc
XX: kinship matrix
a: common scales, it can be set for debugging purposes
c: indipendent scales, it can be set for debugging purposes
"""
vSpecific = vTot-vCommon
if a==None: a = SP.randn(self.P)
if c==None: c = SP.randn(self.P)
XX += 1e-3 * SP.eye(XX.shape[0])
L = LA.cholesky(XX,lower=True)
# common effect
R = SP.randn(self.N,self.P)
A = SP.zeros((self.P,self.P))
A[:,0] = a
Yc = SP.dot(L,SP.dot(R,A.T))
Yc*= SP.sqrt(vCommon)/SP.sqrt(Yc.var(0).mean())
# specific effect
R = SP.randn(self.N,self.P)
Yi = SP.dot(L,SP.dot(R,SP.diag(c)))
Yi*= SP.sqrt(vSpecific)/SP.sqrt(Yi.var(0).mean())
return Yc, Yi
def genBgTerm(self,vTot=0.5,vCommon=0.1,pCausal=0.5,XX=None,use_XX=False,a=None,c=None,plot=False):
""" generate """
if use_XX:
if XX is None: XX = self.XX
assert XX is not None, 'Simulator: set XX!'
Yc,Yi = self._genBgTerm_fromXX(vTot,vCommon,XX,a=a,c=c)
else:
Yc,Yi = self._genBgTerm_fromSNPs(vTot=vTot,vCommon=vCommon,pCausal=pCausal,plot=plot)
return Yc, Yi
def genHidden(self,nHidden=10,vTot=0.5,vCommon=0.1):
""" generate """
vSpecific = vTot-vCommon
# generate hidden
X = SP.randn(self.N,nHidden)
# common effect
Bc = SP.kron(SP.randn(nHidden,1),SP.randn(1,self.P))
Yc = SP.dot(X,Bc)
Yc *= SP.sqrt(vCommon/Yc.var(0).mean())
# indipendent effect
Bi = SP.randn(nHidden,self.P)
Yi = SP.dot(X,Bi)
Yi *= SP.sqrt(vSpecific/Yi.var(0).mean())
return Yc,Yi
def genNoise(self,vTot=0.4,vCommon=0.2):
vSpecifc = vTot-vCommon
# common
Yc = SP.kron(SP.randn(self.N,1),SP.randn(1,self.P))
Yc *= SP.sqrt(vCommon/Yc.var(0).mean())
# independent
Yi = SP.randn(self.N,self.P)
Yi *= SP.sqrt(vSpecifc/Yi.var(0).mean())
return Yc,Yi
def genPheno(self,Xr,
vTotR=0.1,nCommonR=5,nCausalR=10,distribution='biNormal',
vCommonBg=0.1,vTotBg=0.4,pCausalBg=0.5,XX=None,use_XX=False,
vCommonH=0.1,vTotH=0.2,nHidden=10,
vCommonN=0.,vTotN=0.3,standardize=True):
YRc,YRi = self.genRegionTerm(Xr,vTot=vTotR,nCommon=nCommonR,nCausal=nCausalR,distribution='biNormal')
YGc,YGi = self.genBgTerm(vCommon=vCommonBg,vTot=vTotBg,pCausal=pCausalBg,XX=XX,use_XX=use_XX)
YHc,YHi = self.genHidden(vCommon=vCommonH,vTot=vTotH,nHidden=nHidden)
YNc,YNi = self.genNoise(vCommon=vCommonN,vTot=vTotN)
Y = YRc+YRi+YGc+YGi+YHc+YHi+YNc+YNi
if standardize:
Y -= Y.mean(0)
Y /= Y.std(0)
info = {'YRc':YRc,'YRi':YRi,
'YGc':YGc,'YGi':YGi,
'YHc':YHc,'YHi':YHi,
'YNc':YNc,'YNi':YNi}
return Y, info
|
|
#!/usr/bin/python
import os
import subprocess
import sys, getopt
import time
import re
import datetime
import json
import copy
import hashlib
import shutil
import random
from pprint import pprint
from subprocess import *
from distutils.spawn import find_executable
VERSION = 3.2
REGEXP_IMPORT = re.compile('/// *<reference path="(.*)/([^/]+)/[^"]+" */>')
#REGEXP_IMPORT_D_TS = re.compile('/// *<reference path=".*/([^/]+)/[^.]+\.class\.d\.ts" */>')
REGEXP_IMPORT_D_TS = re.compile('/// *<reference path="([^"]+)" */>')
REGEXP_IMPORT_D_TS_IMPORT = re.compile('/// *<reference path="(.*/[^/]+/[^.]+\.class\.d\.ts)" */>')
REGEXP_IMPORT_CUSTOM = re.compile('/// *< *([a-z]+)="([^"]+)" *([^/]+)? */>')
REGEXP_CUSTOM = re.compile('/// *< *([a-z]+) */>')
ESVersion = 3
USE_SOUND = True
USE_NOTIFICATION = True
TYPESCRIPT_PATH = None
ONCE = False
class Console:
def __init__(self):
self.__colors = {'RED':'\033[91m', 'BLUE':'\033[94m', 'GREEN':'\033[92m', 'ORANGE':'\033[93m', 'NORMAL':'\033[0m'}
def getColor(self, name):
return self.__colors[name]
def red(self, value):
value = self.__treat(value)
self.__print(self.__colors['RED'] + str(value) + self.__colors['NORMAL'])
def green(self, value):
value = self.__treat(value)
self.__print(self.__colors['GREEN'] + str(value) + self.__colors['NORMAL'])
def blue(self, value):
value = self.__treat(value)
self.__print(self.__colors['BLUE'] + str(value) + self.__colors['NORMAL'])
def orange(self, value):
value = self.__treat(value)
self.__print(self.__colors['ORANGE'] + str(value) + self.__colors['NORMAL'])
def normal(self, value):
value = self.__treat(value)
self.__print(self.__colors['NORMAL'] + str(value))
def error(self, value):
value = self.__treat(value)
self.red("[ERROR]:"+str(value))
def info(self, value):
value = self.__treat(value)
self.blue("[INFO]:"+str(value))
def out(self, value):
value = self.__treat(value)
now = datetime.datetime.now()
self.normal("[OUT "+now.strftime("%d-%m-%Y %H:%M:%S")+"]:"+str(value))
def __treat(self, value):
if((value is None)):
value = "None"
if(not value):
value = "empty"
try:
value = str(value)
except:
print(value)
value = ""
return value
def __print(self, value):
print(value)
class Voice:
def __init__(self):
self.count = 0
self.good = ["thats better", "yep", "ok", "good", "very good", "excellent", "great", "you are on FIRE", "Hoorray!!"];
#self.score = [-10, 0, 10, 20, 30, 40, 50, 60, 70];
self.neutral = 2
def getBadSound(self):
if(self.count > 0):
self.count = 0
self.count -= 1
return self.__getSound()
def getGoodSound(self):
self.count += 1
return self.__getSound()
def __getSound(self):
r = self.getRandom()
l = len(self.good)
for i in range(l):
if((i-self.neutral+1)*10>r):
#print(str(self.score[i])+":"+str(((i-self.neutral+1)*10)))
return self.good[i]
self.count = 0
return "imbattable"
def getRandom(self):
left = self.count
right = self.count*10
if(left>right):
left, right = right, left
return random.randint(left, right)
voice = Voice()
class MegaWatcher:
def __init__(self, folders):
self.__folders = folders
self.__fileWatchers = {}
self.__compilations = 0
self.__errors = 0
start_time = time.time()
#LOG.green(folders)
for folder in self.__folders:
name = folder.split("/")[-1]
self.__fileWatchers[name] = TSFilesWatcher(folder, self, name)
try:
file_config=open('.cache_metacompile.json','r')
data = json.load(file_config)
file_config.close()
for key in data.keys():
parentName = key.split("/")[0]
module = key.split("/")[1]
if(parentName in self.__fileWatchers):
if(self.__fileWatchers[parentName].hasModule(module)):
self.__fileWatchers[parentName].getModule(module).setDependencyMD5(data[key]["dependencies"])
self.__fileWatchers[parentName].getModule(module).setError(data[key]["errors"])
self.__fileWatchers[parentName].getModule(module).setLastDate(data[key]["last_date"])
#force recompilation of errors
if(data[key]["errors"]):
self.__fileWatchers[parentName].getModule(module).setLastCompilationDate(0)
else:
self.__fileWatchers[parentName].getModule(module).setLastCompilationDate(data[key]["last_date_compilation"])
self.__fileWatchers[parentName].getModule(module).init()
except Exception as error:
LOG.orange("No previous compilation found")
print(error)
pass
keys = self.__fileWatchers.keys()
#for name in keys:
#LOG.red("compiling module "+name)
#self.__fileWatchers[name].compileAll()
for name in keys:
#LOG.red("dependencies module "+name)
self.__fileWatchers[name].checkDependenciesAll()
self.compileAll()
LOG.green("MetaTypescript Compiler v"+str(VERSION)+" is ready in "+str(round(time.time()-start_time,2))+"s")
self.watch()
def hasModule(self, moduleName, name = None):
if(name is None):
name = moduleName.split("/")[0]
moduleName = moduleName.split("/")[1]
if(name in self.__fileWatchers.keys()):
return self.__fileWatchers[name].hasModule(moduleName)
else:
return False
def getModule(self, moduleName, name = None):
if(name is None):
name = moduleName.split("/")[0]
moduleName = moduleName.split("/")[1]
if(name in self.__fileWatchers.keys()):
return self.__fileWatchers[name].getModule(moduleName)
else:
return None
def getModuleList(self):
module_list = []
keys = self.__fileWatchers.keys()
for name in keys:
module_list.append(name)
return module_list
def watch(self):
try:
i = 0
keys = self.__fileWatchers.keys()
while(True):
i = i + 1
self.compileAll()
time.sleep(2)
if(i%5 == 0):
for name in keys:
self.__fileWatchers[name].seekFiles()
except KeyboardInterrupt:
print("End of program")
def compileAll(self):
start_time = time.time()
dep = {}
done = []
toCompile = []
for name in self.__fileWatchers.keys():
toCompile+=self.__fileWatchers[name].getFiles()
for tsfile in toCompile:
dep[tsfile.getLongModuleName()] = tsfile.getDependencies()
tsfile.checkLastDateChanged()
metaHasCompiled = False
hasCompiled = True
self.__errors = 0
errors = 0
compilations = 0
error_line = "error"
module_list = []
module_list_error = []
list_error = []
times = []
while(hasCompiled):
hasCompiled = False
isFailed = False
for tsfile in toCompile:
compile = False
if(not tsfile.isUpToDate()):
compile = True
module_list.append(tsfile.getLongModuleName())
LOG.orange(tsfile.getLongModuleName()+" has changed...")
else:
# LOG.info(file.getModule()+":"+str(file.getLastDate()))
compile = False
for depend in dep[tsfile.getLongModuleName()]:
md5 = tsfile.getDependencyMD5(depend)
if(md5!=self.getModule(depend).getMD5() and not self.getModule(depend).isFailed()):
compile = True
LOG.orange(tsfile.getLongModuleName()+" depends of "+depend+" and has to be recompiled")
#LOG.red(md5)
#LOG.green(self.getModule(depend).getMD5())
break
if(compile):
#LOG.out("Compile "+file.getLongModuleName())
success, t = tsfile.compile()
times.append(t)
if(not success):
module_list_error.append(tsfile.getLongModuleName())
list_error.append(tsfile.getLastError())
LOG.error("Error during compilation of "+tsfile.getLongModuleName())
self.__errors = self.__errors + 1
errors = errors + 1
self.__compilations = self.__compilations + 1
compilations = compilations + 1
hasCompiled = True
metaHasCompiled = True
if(success):
for module in toCompile:
if(module.isFailed()):
module.resetFailed(tsfile.getLongModuleName())
isFailed = False
for tsfile in toCompile:
if(tsfile.isFailed()):
isFailed = True
break
if(metaHasCompiled):
#LOG.green("Step 1 in "+str(round(time.time()-start_time,2))+"s")
save = {}
for tsfile in toCompile:
save[tsfile.getLongModuleName()] = {"dependencies":tsfile.getDependencyMD5(), "errors":tsfile.isFailed(),"last_date":tsfile.getLastDate(),"last_date_compilation":tsfile.getLastCompilationDate()}
save = json.dumps(save)
f = open(".cache_metacompile.json","w")
f.write(save)
f.close()
if("compile_modules" in data and data["compile_modules"] == True):
##Javascript concatenation for each module
dep_order = self.getDependenciesInOrder(dep, dep.keys())
files = {}
names = {}
maps = {}
for name in dep_order:
tsfile = self.getModule(name)
if(tsfile.getRoot() not in files):
files[tsfile.getRoot()] = []
maps[tsfile.getRoot()] = []
names[tsfile.getRoot()] = [""]
names[tsfile.getRoot()] += [tsfile.getLongModuleName()]
files[tsfile.getRoot()] += tsfile.getJSContent(True)
maps[tsfile.getRoot()].append(tsfile.getMapFile())
for name in files:
f = open(name+"/index.js",'w')
f.write("".join(files[name]))
f.write("\n///".join(names[name]))
f.write("\n//# sourceMappingURL=index.js.map")
f.close()
args = ["node",".pythoncompile/node_modules/source-map-concat/bin/source-map-concat.js","--source-file", name+"/index.js", "--out-file",name+"/index.js.map"] + maps[name]
#print(" ".join(args))
call(args)
#LOG.green("Step 2 in "+str(round(time.time()-start_time,2))+"s")
if("out" in data):
source = False
if("concat_sourcemaps" in data and data["concat_sourcemaps"] == True):
source = True
for out in data["out"]:
module = self.getModule(out)
maps = []
if(module is not None):
dependencies = self.getDependenciesInOrder(dep, module.getAllDependencies()) + [module.getLongModuleName()]
writer = open(data["out"][out],'w')
for name in dependencies:
tsfile = self.getModule(name)
maps.append(tsfile.getMapFile())
try:
writer.write(""+("".join(tsfile.getJSContent(True))))
except:
LOG.red("/!\ Unable to read "+name+"'s js file - "+data["out"][out]+" can't be correctly created /!\\")
if(source):
writer.write("\n//# sourceMappingURL="+data["out"][out]+".map")
writer.close()
if(source):
args = ["node",".pythoncompile/node_modules/source-map-concat/bin/source-map-concat.js","--source-file", data["out"][out],"--out-file",data["out"][out]+".map"] + maps
call(args)
else:
LOG.error("No module named "+str(out)+" "+str(data["out"][out])+" can't be created - modules list:")
LOG.error(self.getModuleList())
#LOG.green("Step 3 in "+str(round(time.time()-start_time,2))+"s")
#print(self.getModule(out))
avg = 0
if(len(times) > 0):
for t in times:
avg += t
avg /= len(times)
avg = round(avg, 2)
LOG.green("Compiled in "+str(round(time.time()-start_time,2))+"s - average time : "+str(avg)+"s")
if(isFailed or errors > 0):
LOG.error("End of step with errors : "+str(compilations)+" compilation(s) and "+str(errors)+" error(s)")
if(USE_SOUND):
voice.getBadSound()
Tools.speak(str(errors)+" error")
if(USE_NOTIFICATION):
error_msg = None
for index,tsfile in enumerate(module_list_error) :
if(error_msg is None):
error_msg = ""
else:
error_msg = error_msg +"\n"
line = list_error[index].split("\n")[0]
m = re.search('([^/]+\.ts)\(([0-9]+),', line)
error_msg = error_msg + tsfile + " "+m.group(1)+":"+m.group(2)
Tools.notify(str(error_msg), str(errors)+" Error(s)", str("\n".join(module_list_error)), "error", "Basso")
#os.system("osascript -e 'display notification \""+str(errors)+" Error(s)\" with title \"Error\"'")
else:
LOG.green("End of step : "+str(compilations)+" compilation(s) and "+str(errors)+" error(s)")
if(USE_SOUND):
Tools.speak(voice.getGoodSound())
if(USE_NOTIFICATION):
Tools.notify(str("\n".join(module_list)), "Success!", str("\n".join(module_list_error)), "ok", "Purr")
if(ONCE == True):
LOG.info("done")
sys.exit(0)
else:
if(ONCE == True and not isFailed and errors == 0):
LOG.info("done")
sys.exit(0)
#os.system("osascript -e 'display notification \"Success\" with title \"Success\"'")
def getDependenciesInOrder(self, dep, dep_list):
dep_order = []
dep_save = copy.deepcopy(dep)
i = 0
j = 0
while(len(dep_list)>0 and j<1000):
dependency = dep_list[i]
for done in dep_order:
if(done in dep_save[dependency]):
dep_save[dependency].remove(done)
if(len(dep_save[dependency]) == 0):
dep_order.append(dependency)
dep_list.remove(dependency)
else:
i = i + 1
if(i>=len(dep_list)):
i = 0
j = j + 1
#print(self.getModule(dependency).getJSContent())
#print(dependency, len(dep[dependency]),dep[dependency])
if j>=1000:
LOG.red(dep_list)
for module in dep_list:
LOG.blue(module)
LOG.green(dep_save[module])
LOG.red(dep_order)
raise Exception("Dependencies cycle")
else:
return dep_order
class TSFilesWatcher:
def __init__(self, root, mega, parentModuleName):
try:
self.__errors = 0
self.__compilations = 0
self.__errors = 0
self.__root = root
self.__files = []
self.__mega = mega
self.__parentModuleName = parentModuleName
self.prepareModules()
self.seekFiles()
#self.compileAll()
#self.checkDependenciesAll()
#self.watch()
except KeyboardInterrupt:
print("\n")
LOG.info("End of program : "+str(self.__compilations)+" compilation(s) and "+str(self.__errors)+" error(s)")
def getFiles(self):
return self.__files
def prepareModules(self):
for filename in os.listdir(self.__root):
exists = False
#pas les fichiers/dossiers caches
if(filename[0:1]!="."):
file = os.path.join(self.__root, filename)
#seulement directories
if(os.path.isdir(file)):
self.prepareModule(self.__root, filename)
def prepareModule(self, root, filename):
file = os.path.join(root, filename)
moduleFile = os.path.join(file,filename+".class.ts")
#return
files_dir = []
files_import = []
ts_found = os.path.exists(moduleFile)
if(not ts_found):
for root, subFolders, files in os.walk(file,followlinks=True):
if(not ts_found):
for f in files:
if(f[0:1]!="."):
extension = f.split(".")
if(extension[-2]!="d" and extension[-1]=="ts" and extension[-2]!="class"):
ts_found = True
break
if(ts_found):
if(not os.path.exists(moduleFile)):
f = open(moduleFile, "w")
f.write("\n")
f.close()
moduleFile = os.path.join(file,filename+".class.d.ts")
if(not os.path.exists(moduleFile)):
f = open(moduleFile, "w")
f.write("\n")
f.close()
def checkDependenciesAll(self):
for module in self.__files:
module.checkDependencies()
def compileAllDeprecated(self):
dep = {}
done = []
toCompile = []
toCompile[:] = self.__files
for file in self.__files:
dep[file.getModule()] = file.getDependencies()
file.checkLastDateChanged()
metaHasCompiled = False
hasCompiled = True
errors = 0
compilations = 0
error_line = "error"
module_list = []
module_list_error = []
list_error = []
while(hasCompiled):
hasCompiled = False
isFailed = False
for file in toCompile:
compile = False
if(not file.isUpToDate()):
compile = True
module_list.append(file.getModule())
LOG.orange(file.getLongModuleName()+" has changed...")
else:
# LOG.info(file.getModule()+":"+str(file.getLastDate()))
compile = False
for depend in dep[file.getModule()]:
md5 = file.getDependencyMD5(depend)
if(md5!=self.getModule(depend).getMD5() and not self.getModule(depend).isFailed()):
compile = True
LOG.orange(file.getModule()+" depends of "+depend+" and has to be recompiled")
break
if(compile):
LOG.out("Compile "+file.getModule())
success = file.compile()
if(not success):
module_list_error.append(file.getModule())
list_error.append(file.getLastError())
LOG.error("Error during compilation of "+file.getModule())
self.__errors = self.__errors + 1
errors = errors + 1
self.__compilations = self.__compilations + 1
compilations = compilations + 1
hasCompiled = True
metaHasCompiled = True
if(success):
for module in toCompile:
if(module.isFailed()):
module.resetFailed(file.getModule())
isFailed = False
for file in toCompile:
if(file.isFailed()):
isFailed = True
break
if(metaHasCompiled):
if(isFailed or errors > 0):
LOG.error("End of step with errors : "+str(compilations)+" compilation(s) and "+str(errors)+" error(s)")
if(USE_SOUND):
voice.getBadSound()
Tools.speak(str(errors)+" error")
if(USE_NOTIFICATION):
error_msg = None
for index,file in enumerate(module_list_error) :
if(error_msg is None):
error_msg = ""
else:
error_msg = error_msg +"\n"
line = list_error[index].split("\n")[0]
m = re.search('([^/]+\.ts)\(([0-9]+),', line)
error_msg = error_msg + file + " "+m.group(1)+":"+m.group(2)
Tools.notify(str(error_msg), str(errors)+" Error(s)", str("\n".join(module_list_error)), "error", "Basso")
#os.system("osascript -e 'display notification \""+str(errors)+" Error(s)\" with title \"Error\"'")
else:
LOG.green("End of step : "+str(compilations)+" compilation(s) and "+str(errors)+" error(s)")
if(USE_SOUND):
Tools.speak(voice.getGoodSound())
if(USE_NOTIFICATION):
Tools.notify(str("\n".join(module_list)), "Success!", str("\n".join(module_list_error)), "ok", "Purr")
#os.system("osascript -e 'display notification \"Success\" with title \"Success\"'")
def watch(self):
i = 0
while(True):
i = i + 1
self.compileAll()
time.sleep(2)
if(i%5 == 0):
self.seekFiles()
def seekFiles(self):
for filename in os.listdir(self.__root):
#pas les fichiers/dossiers caches
if(filename[0:1]!="."):
file = os.path.join(self.__root, filename)
#seulement directories
if(os.path.isdir(file)):
for subfilename in os.listdir(file):
extension = subfilename.split(".")
#seulement fichiers ts
if(extension[-1] == "ts" and extension[-2]!="d" and extension[-2]=="class"):
#if(extension[-1] == "ts" and extension[-2]!="d"):
self.__addFile(file, subfilename)
#remove removed files
self.__files[:] = [file for file in self.__files if not file.isRemoved()]
def __addFile(self, modulePath, filePath):
for file in self.__files:
if(file.same(self.__root, modulePath, filePath)):
return
file = TSFile(self, self.__root, modulePath, filePath)
self.__files.append(file)
#file.init()
def hasModule(self, moduleName, name = None):
if(name is None or name == self.getParentName()):
for file in self.__files:
if(file.isModule(moduleName)):
return True
else:
return self.__mega.hasModule(moduleName, name)
def getModule(self, moduleName, name = None):
if(name is None or name == self.getParentName()):
for file in self.__files:
if(file.isModule(moduleName)):
return file
else:
return self.__mega.getModule(moduleName, name)
def getParentName(self):
return self.__parentModuleName
def getRoot(self):
return self.__root.split("/")[0]
class MD5File:
@staticmethod
def getMD5(path):
return hashlib.md5(open(path, 'rb').read()).hexdigest().strip()
class Tools:
@staticmethod
def cmdExist(name):
try:
devnull = open(os.devnull)
if(isinstance(name, basestring)):
args = [name]
else:
args = name
subprocess.Popen(args, stdout=devnull, stderr=devnull).communicate()
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
@staticmethod
def speak(text):
if(Tools.cmdExist(["say", "''"])):
os.system("say '"+text+"'")
@staticmethod
def notify(text, title, subtitle, icon, sound):
notifySubtitle = ""
if(sys.platform == 'darwin'):
if(not Tools.cmdExist("terminal-notifier")):
os.system("brew install terminal-notifier")
if(subtitle is not None and len(subtitle.strip()) > 0):
notifySubtitle = "-substitle '"+subtitle+"'"
os.system("terminal-notifier -message '"+text+"' "+notifySubtitle+" -title '"+title+"' -activate com.googlecode.iterm2 -sound "+sound+" -group compile")
else:
try:
import pynotify
if not pynotify.init("Typescript compilation"):
print "Failed to send notification"
else:
notifyTitle = title
if(subtitle is not None and len(subtitle.strip()) > 0):
notifyTitle += "\n"+subtitle
n = pynotify.Notification(notifyTitle, text, "dialog-"+icon)
n.set_urgency(pynotify.URGENCY_NORMAL)
n.set_timeout(2500)
try:
if not n.show():
print "Failed to send notification"
except:
print "Failed to send notification (restart metatypescript is needed)"
pass
except:
LOG.red("Failed to send notification")
class TSFile:
def __init__(self, watcher, root, module, file):
self.__watcher = watcher
self.__removed = False
self.__root = root;
self.__module = os.path.relpath(module, root)
self.__filepath = file
self.__realPath = os.path.join(self.__root, self.__module, self.__filepath)
self.__dep = []
self.__depMD5 = {}
#self.__dependencies = []
#self.__dependenciesMD5 = {}
self.__md5 = ""
self.__lastDateChanged = None
self.__lastCompilationDate = None
self.__failed = False
self.__last_error = ""
def init(self):
if(not self.isUpToDate()):
self.prepare()
self.checkDependencies()
if(not self.__removed):
self.refreshMD5()
self.checkLastDateChanged()
def __unicode__(self):
return self.__str__()
def isFailed(self):
return self.__failed
def resetFailed(self, moduleName):
self.__failed = False
if(moduleName in self.__depMD5):
self.__depMD5[moduleName] = None
def prepare(self):
#LOG.green(self.getLongModuleName()+" prepare")
index = self.__realPath.rfind(os.path.sep)
file = self.__realPath[:index]
index = file.rfind(os.path.sep)
filename = file[index+1:]
root = file[:index]
index = root.rfind(os.path.sep)
root = root[:index]
moduleFile = os.path.join(file,filename+".class.ts")
if(os.path.exists(moduleFile)):
pass
#return
files_dir = []
files_import = []
files_dependencies = {}
lib_import = []
module_not_in_d_ts = []
module_to_copy = []
for root, subFolders, files in os.walk(file,followlinks=False):
for f in files:
if(f[0:1]!="."):
extension = f.split(".")
if(extension[-2]!="d" and extension[-1]=="ts"):
if(os.path.relpath(os.path.join(root,f)) == moduleFile):
continue
fileread = open(os.path.join(root,f), "r")
relative_path = os.path.join(root, f)[len(file)+1:]
excluded = False
files_dependencies[relative_path] = []
for line in fileread:
result = REGEXP_CUSTOM.match(line)
if(result != None):
type = result.group(1)
if(type == "exclude"):
LOG.orange(os.path.join(root,f)+" is excluded")
excluded = True
break
else:
LOG.red(type+" of reference is not understood")
else:
reg = REGEXP_IMPORT_CUSTOM.match(line)
if(reg!=None):
type = reg.group(1)
result = reg.group(2)
if(type == "module"):
include_in_d_ts = True
copy_in_js = False
if(len(reg.groups())>=3):
if(reg.group(3)=="first"):
include_in_d_ts = False
elif(reg.group(3)=="copy"):
copy_in_js = True
#import d'un module ou parent-module
module_file = result
parts = module_file.split("/")
if(len(parts)>2):
grand_parent = parts[0]
parts.pop(0)
else:
grand_parent = self.__watcher.getRoot()
if(len(parts)>1):
parent = parts[0]
module = parts[1]
else:
parent = self.__watcher.getParentName()
module = parts[0]
if(copy_in_js and module_file not in module_to_copy):
module_to_copy.append(parent+"/"+module)
if(include_in_d_ts):
module_file = module+".class.d.ts"
else:
module_file = module+".class_free.d.ts"
if(parent == self.__watcher.getParentName()):
module_file = ".."+os.path.sep+module+os.path.sep+module_file
else:
if(grand_parent == self.__watcher.getRoot()):
module_file = ".."+os.path.sep+".."+os.path.sep+parent+os.path.sep+module+os.path.sep+module_file
else:#two folders top
module_file = ".."+os.path.sep+".."+os.path.sep+".."+os.path.sep+grand_parent+os.path.sep+parent+os.path.sep+module+os.path.sep+module_file
if(module_file not in files_import):
files_import.append(module_file)
if(not include_in_d_ts and module_file not in module_not_in_d_ts):
module_not_in_d_ts.append(module_file)
elif(type == "file"):
#import file (for import order)
module_file = result
if(module_file[-3:]!=".ts"):
module_file += ".ts"
if(module_file not in files_dependencies[relative_path]):
files_dependencies[relative_path].append(module_file)
#LOG.green(module_file+" imported by "+relative_path)
#if(module_file in files_dir):
#LOG.red("remove "+module_file)
#files_dir.remove(module_file)
#LOG.green("add "+module_file+" from "+str(f))
#files_dir.insert(0, module_file)
elif(type == "lib"):
if(result == "es6-promise" and ESVersion == 6):
LOG.green("Ignore ES6-promise lib")
continue
#import d'une librairie
module_file = ".."+os.path.sep+".."+os.path.sep+".."+os.path.sep+"lib"+os.path.sep+result+os.path.sep+result+".d.ts"
if(module_file not in lib_import):
lib_import.append(module_file)
else:
LOG.red(type+" of reference is not understood")
else:
result = REGEXP_IMPORT_D_TS_IMPORT.match(line)
if(result!=None):
fileimported = result.group(1)
for i in range(0, os.path.relpath(os.path.join(root,f),file).count(os.path.sep)):
fileimported = fileimported[3:]
if(fileimported not in files_import):
files_import.append(fileimported)
continue
fileread.close()
if(not excluded):
file_test = os.path.relpath(os.path.join(root,f),file)
if(file_test not in files_dir):
files_dir.append(file_test)
if(len(lib_import)>0):
content = "/* Extern Librairies */\n"
for line in lib_import:
content += "///<reference path=\""+line+"\"/>\n"
else:
content = ""
if(len(files_import)>0):
if(len(content)>0):
content+="\n"
content += "/* Extern Modules */\n"
for line in files_import:
content+= "///<reference path=\""+line+"\"/>\n"
if(len(files_dir)>0):
if(len(content)>0):
content+="\n"
Found = True
i = 0
content += "\n/* Internal Files from Deps*/\n"
while(Found and i < 50):
i = i + 1
Found=False
#LOG.red(files_dependencies)
zero = []
for k in files_dependencies:
v = files_dependencies[k]
if(len(v) == 0):
file = k
if(file in files_dir):
files_dir.remove(file)
zero.append(k)
Found = True
content+= "///<reference path=\""+k+"\"/>\n"
for key in files_dependencies:
value = files_dependencies[key]
if(file in value):
files_dependencies[key].remove(file)
for file in zero:
files_dependencies.pop(file, None)
if(len(files_dependencies.keys())>0):
LOG.red("You have an error on your ///<file> imports")
LOG.red(str(files_dependencies))
content += "///Dependencies not resolved : "+str(files_dependencies)+"\n"
content += "\n/* Internal files */\n"
for line in files_dir:
content+= "///<reference path=\""+line+"\"/>\n"
f = open(moduleFile, "w")
f.write(content)
f.close()
return (module_not_in_d_ts, files_import, module_to_copy)
def compile(self):
start_time = time.time()
module_not_in_d_ts, files_import, module_to_copy = self.prepare()
start_time2 = time.time()
try:
global TYPESCRIPT_PATH
#args = ["tsc", "-t","ES"+str(ESVersion), "--declaration", "--sourcemap", self.__realPath, "--out", self.__realPath[:-2]+"js"]
#args = ["tsc", "-t","ES"+str(ESVersion), "--declaration", "--sourcemap", self.__realPath, "--out", self.__realPath[:-2]+"js"]
#
#
#LOG.red(path)
args = TYPESCRIPT_PATH + ["-t","ES"+str(ESVersion), "--declaration", "--sourcemap", self.__realPath, "--out", self.__realPath[:-2]+"js"]
#print(" ".join(args))
# print(args)
pipe = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
LOG.error("Typescript is not installed - please execute:")
LOG.error("npm -g install typescript")
LOG.red(sys.exc_info()[0])
sys.exit(1)
return
output, errors = pipe.communicate()
#pipe.wait()
start_time2 = round(time.time()-start_time2,2)
if(errors is None or len(errors)==0):
if(output.find("error")!=-1):
errors = output
#print(start_time2)
if(errors!=None and len(errors)>0):
self.__last_error = errors
LOG.error("["+os.path.join( self.getLongModuleName(), self.__filepath)+"]:\n"+str("\n".join(errors.split("\n")[0:5])).replace(os.path.abspath(self.__root),""))
self.__failed = True
self.refreshMD5()
self.checkLastDateChanged()
self.checkDependencies()
self.__lastCompilationDate = self.__lastDateChanged
self.__refreshDepMD5()
else:
self.__failed = False
file = open(self.__realPath[:-2]+"d.ts","r")
file_content = ""
file_content_free = ""
for line in file:
#print(line)
result = REGEXP_IMPORT_D_TS.match(line)
#TODO:check si le module est bien un module typescript a nous
if(result is None or result.group(1) not in module_not_in_d_ts):
file_content = file_content + "\n" + line
if(result is None or result.group(1) not in files_import):
file_content_free = file_content_free + "\n" + line
file.close()
file = open(self.__realPath[:-2]+"d.ts","w")
file.write(file_content)
file.close()
file = open(self.__realPath[:-3]+"_free.d.ts","w")
file.write(file_content_free)
file.close()
#print(round(time.time()-start_time,2))
if(len(module_to_copy)>0):
content = ""
file = open(self.__realPath[:-2]+"js","r")
for line in file:
content+= line+"\n"
file.close()
content_imported = "var fs = require(\"fs\");\n"
dep = []
for name in module_to_copy:
if(name not in dep):
#dep.append(name)
moduleDep = self.__getAllDependencies(name)
#LOG.red(name)
#LOG.green(moduleDep)
for dependency in moduleDep:
if(dependency not in dep):
dep.append(dependency)
#dep.reverse()
for name in dep:
module = self.__watcher.getModule(name.split("/")[1],name.split("/")[0])
content_imported+='eval(fs.readFileSync("../../'+name+'/'+module.getModule()+'.class.js").toString());\n'
content = content_imported + content
file = open(self.__realPath[:-2]+"js","w")
file.write(content)
file.close()
self.checkLastDateChanged()
self.refreshMD5()
self.checkDependencies()
self.__refreshDepMD5()
self.__lastCompilationDate = self.__lastDateChanged
start_time = round(time.time()-start_time,2)
LOG.blue("Compiled in "+str(start_time)+"s ("+str(start_time2)+"s)")
return (not self.__failed,start_time)
def getJSContent(self, clean = False):
file = open(self.__realPath[:-2]+"js","r")
lines = []
for line in file:
if(clean):
if(line.find("//# sourceMappingURL")!=-1):#/* or line.find("///<reference path")!=-1
continue
lines.append(line)
file.close()
return lines
def getMapFile(self):
return self.__realPath[:-2]+"js.map"
def __getAllDependencies(self, longName = None, original = []):
if( longName is None):
module = self
else:
module = self.__watcher.getModule(longName.split("/")[1],longName.split("/")[0])
if(module is not None and longName not in original):
dep = module.getDependencies()
if(len(dep)>0):
if(longName not in original and module is not self):
original.append(longName)
for dependency in dep:
if(dependency not in original):
self.__getAllDependencies(dependency, original)
else:
if(module is not self):
original.append(longName)
return original
def getAllDependencies(self):
return self.__getAllDependencies()
def getLastDate(self):
return self._lastDateChanged
def getLastCompilationDate(self):
return self.__lastCompilationDate
def setLastDate(self, date):
self.__lastDateChanged = date
def setLastCompilationDate(self, date):
self.__lastCompilationDate = date
def isUpToDate(self):
return self.__lastDateChanged!=None and self.__lastCompilationDate >= self.__lastDateChanged
def isFailed(self):
return self.__failed
def isModule(self, moduleName):
return moduleName == self.__module
def getLastError(self):
return self.__last_error
def getRoot(self):
return self.__root
def getModule(self):
return self.__module
def getLongModuleName(self):
return self.__watcher.getParentName()+"/"+self.getModule()
def getLastDate(self):
return self.__lastDateChanged
def getDependencies(self):
return self.__dep
def getDependencyMD5(self, module = None):
if(module is not None):
return self.__depMD5[module]
return self.__depMD5
def setDependencyMD5(self, dependencies):
self.__depMD5 = dependencies
def setError(self, error):
self.__failed = error
def same(self, root, module, file):
module = os.path.relpath(module, root)
return root==self.__root and module == self.__module and file == self.__filepath
def checkLastDateChanged(self):
dir = os.path.join(self.__root, self.__module)
date = os.path.getmtime(dir)
#print(dir+":"+str(date))
for dirpath, dirnames, filenames in os.walk(dir, None, True):
for subdirname in dirnames:
sdate = os.path.getmtime(os.path.join(dirpath, subdirname))
if(sdate>date):
date = sdate
for subdirname in filenames:
sdate = os.path.getmtime(os.path.join(dirpath, subdirname))
if(sdate>date):
date = sdate
self.__lastDateChanged = date
def checkDependencies(self):
file = None
try:
file = open(self.__realPath, 'r')
#dep = []
except(IOError):
LOG.error("Can't read "+self.__filepath)
#reset des dependances
self.__dep = []
if(file != None):
for line in file:
result = REGEXP_IMPORT.match(line)
if(result!=None):
depModule = result.group(2)
#dep.append(depModule)
#is not None: pour les dossier n'etant pas des modules
#LOG.green(depModule)
if(result.group(1) == ".."):
self.__addDependency(depModule, self.__watcher.getParentName())
#if (depModule not in self.__dependencies and self.__watcher.getModule(depModule) is not None):
# self.__dependencies.append(module.getParentName()+"/"+depModule)
# self.__dependenciesMD5[module.getParentName()+"/"+depModule] = None
else:
self.__addDependency(depModule, result.group(1).split(os.path.sep)[-1])
#mega module bof
#pass
##if (depModule not in self.__dependenciesMEGA and self.__watcher.getModule(depModule, result.group(1)) is not None):
#pass
#self.__dependenciesMEGA[result.group(1)].append(depModule)
#self.__dependenciesMEGAMD5[result.group(1)][depModule] = None
file.close()
#self.__dependencies[:] = [modep for modep in self.__dependencies if modep in dep]
if(len(self.__dep)>0):
LOG.blue(self.getLongModuleName()+" Dependencies : "+LOG.getColor('ORANGE')+str(self.__dep))
else:
self.__removed = True
def __addDependency(self, module, parentName):
long_name = parentName+"/"+module
if(long_name not in self.__dep and self.__watcher.getModule(module, parentName) is not None):
self.__dep.append(long_name)
if(long_name not in self.__depMD5.keys()):
self.__depMD5[long_name] = None
def getMD5(self):
return self.__md5
def refreshMD5(self):
self.__md5 = MD5File.getMD5(self.__realPath[:-2]+"d.ts")
def __refreshDepMD5(self):
for dep in self.__dep:
module = dep.split("/")[1]
parentName = dep.split("/")[0]
if(self.__watcher.hasModule(module, parentName )):
self.__depMD5[dep] = self.__watcher.getModule(module, parentName ).getMD5()
def isRemoved(self):
return self.__removed
def __str__(self):
valueStr = "[TSFile module=\""+self.__module+"\" file=\""+self.__filepath+"\" root=\""+self.__root+"\" md5=\""+str(self.__md5)+"\""
return valueStr+" dep=\""+str(len(self.__dep))+"\" upToDate=\""+str(self.isUpToDate())+"\"]"
def __repr__(self):
return self.__str__()
def initialize():
LOG.green("initiliazing project")
if(Tools.cmdExist("git")):
os.system("git clone https://github.com/borisyankov/DefinitelyTyped.git lib")
#distutils.shutil.copytree()
source_example = os.path.abspath(os.path.dirname(os.path.realpath(__file__))+os.sep+".."+os.sep+"example")
copytree(source_example, ".")
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
try:
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
except:
LOG.red(d+" already exists or can't be written")
if __name__ == '__main__':
LOG = Console()
LOG.info("Typescript Start")
#LOG.green(sys.argv[0])
#LOG.red(os.getcwd())
#
if(TYPESCRIPT_PATH is None):
path_used = None
if(Tools.cmdExist("tsc")):
TYPESCRIPT_PATH = ["tsc"]
path_used = find_executable("tsc")
else:
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, "../")
path_used = path+"node_modules/typescript/bin/tsc.js"
TYPESCRIPT_PATH = ["node", path_used ]
LOG.green("Typescript version:")
LOG.blue("Typescript path : "+str(path_used))
subprocess.call(TYPESCRIPT_PATH+["-v"])
if(not os.path.isfile('metatypescript.json')):
LOG.orange("No metatypescript.json file found - initializing a new project")
initialize()
try:
file_config=open('metatypescript.json','r')
except:
LOG.red(os.getcwd()+"/metatypescript.json not found")
sys.exit(1)
data = json.load(file_config)
if("target" in data):
ESVersion = data["target"]
file_config.close()
initialize = False
directories = data["folders"]
try:
opts, args = getopt.getopt(sys.argv[1:], "d:es5:ns:nn:init:R:o", ["directory=","es5","nosound","nonotification","initialize","reset","once"])
for o, a in opts:
if o in ("-d", "--directory"):
directories = [a]
elif o in ("-es5", "--es5"):
ESVersion = 5
elif o in ("-ns","--nosound"):
USE_SOUND = False
elif o in ("-nn","--nonotification"):
USE_NOTIFICATION = False
elif o in ("-init","--initialize"):
initialize = True
elif o in ("-o","--once"):
ONCE = True
elif o in ("-R","--reset"):
if(os.path.exists('.cache_metacompile.json')):
if(os.path.isfile('.cache_metacompile.json')):
os.remove(".cache_metacompile.json");
except getopt.GetoptError as err:
LOG.error(err)
if(initialize):
initialize()
LOG.green("ES:"+str(ESVersion))
MegaWatcher(directories)
exit(1)
for folder in directories:
if(not os.path.exists(folder)):
LOG.error(folder+" not found")
exit(1)
LOG.info("Reading "+folder+" folder")
if(not os.path.exists(os.path.join(folder,".tmp"))):
os.mkdir(os.path.join(folder,".tmp"), 0777)
watcher = TSFilesWatcher(folder)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class ToughPinchZoomCasesPage(page_module.Page):
def __init__(self, url, page_set, name=''):
super(ToughPinchZoomCasesPage, self).__init__(
url=url, page_set=page_set, name=name,
shared_page_state_class=shared_page_state.SharedDesktopPageState,
credentials_path = 'data/credentials.json')
self.archive_data_file = 'data/tough_pinch_zoom_cases.json'
self.target_scale_factor = page_set.target_scale_factor
def RunPinchGesture(self, action_runner, left_anchor_ratio=0.5,
top_anchor_ratio=0.5, scale_factor=None,
speed_in_pixels_per_second=800):
with action_runner.CreateGestureInteraction('PinchAction',
repeatable=True):
action_runner.PinchPage(
left_anchor_ratio=left_anchor_ratio,
top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second)
def RunPageInteractions(self, action_runner):
action_runner.tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
for _ in xrange(0, 3):
current_scale_factor = self.target_scale_factor
self.RunPinchGesture(action_runner, scale_factor=current_scale_factor)
while current_scale_factor > 1.0:
current_scale_factor *= 1/2.0
self.RunPinchGesture(action_runner, scale_factor=1/2.0)
class GoogleSearchPage(ToughPinchZoomCasesPage):
""" Why: top google property; a google tab is often open. """
def __init__(self, page_set):
super(GoogleSearchPage, self).__init__(
url='https://www.google.com/#hl=en&q=barack+obama',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
super(GoogleSearchPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Next')
class GmailPage(ToughPinchZoomCasesPage):
""" Why: productivity, top google properties """
def __init__(self, page_set):
super(GmailPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set)
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
super(GmailPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
class GoogleCalendarPage(ToughPinchZoomCasesPage):
""" Why: productivity, top google properties """
def __init__(self, page_set):
super(GoogleCalendarPage, self).__init__(
url='https://www.google.com/calendar/',
page_set=page_set)
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
super(GoogleCalendarPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
class GoogleImageSearchPage(ToughPinchZoomCasesPage):
""" Why: tough image case; top google properties """
def __init__(self, page_set):
super(GoogleImageSearchPage, self).__init__(
url='https://www.google.com/search?q=cats&tbm=isch',
page_set=page_set)
self.credentials = 'google'
class YoutubePage(ToughPinchZoomCasesPage):
""" Why: #3 (Alexa global) """
def __init__(self, page_set):
super(YoutubePage, self).__init__(
url='http://www.youtube.com',
page_set=page_set)
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
super(YoutubePage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
class BlogSpotPage(ToughPinchZoomCasesPage):
"""
Why: #11 (Alexa global), google property; some blogger layouts have infinite
scroll but more interesting
"""
def __init__(self, page_set):
super(BlogSpotPage, self).__init__(
url='http://googlewebmastercentral.blogspot.com/',
page_set=page_set, name='Blogger')
def RunNavigateSteps(self, action_runner):
super(BlogSpotPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='accessibility')
class FacebookPage(ToughPinchZoomCasesPage):
""" Why: top social,Public profile """
def __init__(self, page_set):
super(FacebookPage, self).__init__(
url='http://www.facebook.com/barackobama',
page_set=page_set, name='Facebook')
self.credentials = 'facebook'
def RunNavigateSteps(self, action_runner):
super(FacebookPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='About')
class LinkedinPage(ToughPinchZoomCasesPage):
""" Why: #12 (Alexa global),Public profile """
def __init__(self, page_set):
super(LinkedinPage, self).__init__(
url='http://www.linkedin.com/in/linustorvalds',
page_set=page_set, name='LinkedIn')
class WikipediaPage(ToughPinchZoomCasesPage):
""" Why: #6 (Alexa) most visited worldwide,Picked an interesting page """
def __init__(self, page_set):
super(WikipediaPage, self).__init__(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=page_set, name='Wikipedia (1 tab)')
class TwitterPage(ToughPinchZoomCasesPage):
""" Why: #8 (Alexa global),Picked an interesting page """
def __init__(self, page_set):
super(TwitterPage, self).__init__(
url='https://twitter.com/katyperry',
page_set=page_set, name='Twitter')
def RunNavigateSteps(self, action_runner):
super(TwitterPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
class ESPNPage(ToughPinchZoomCasesPage):
""" Why: #1 sports """
def __init__(self, page_set):
super(ESPNPage, self).__init__(
url='http://espn.go.com/nba',
page_set=page_set, name='ESPN')
class WeatherDotComPage(ToughPinchZoomCasesPage):
""" Why: #7 (Alexa news); #27 total time spent,Picked interesting page """
def __init__(self, page_set):
super(WeatherDotComPage, self).__init__(
# pylint: disable=line-too-long
url='http://www.weather.com/weather/right-now/Mountain+View+CA+94043',
page_set=page_set, name='Weather.com')
class YahooGamePage(ToughPinchZoomCasesPage):
""" Why: #1 games according to Alexa (with actual games in it) """
def __init__(self, page_set):
super(YahooGamePage, self).__init__(
url='http://games.yahoo.com',
page_set=page_set)
def RunNavigateSteps(self, action_runner):
super(YahooGamePage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
class ToughPinchZoomCasesPageSet(story.StorySet):
""" Set of pages that are tricky to pinch-zoom """
def __init__(self, target_scale_factor):
super(ToughPinchZoomCasesPageSet, self).__init__(
archive_data_file='data/tough_pinch_zoom_cases.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
self.target_scale_factor = target_scale_factor
self.AddStory(GoogleSearchPage(self))
self.AddStory(GmailPage(self))
self.AddStory(GoogleCalendarPage(self))
self.AddStory(GoogleImageSearchPage(self))
self.AddStory(YoutubePage(self))
self.AddStory(BlogSpotPage(self))
self.AddStory(FacebookPage(self))
self.AddStory(LinkedinPage(self))
self.AddStory(WikipediaPage(self))
self.AddStory(TwitterPage(self))
self.AddStory(ESPNPage(self))
# Why: #1 news worldwide (Alexa global)
self.AddStory(ToughPinchZoomCasesPage('http://news.yahoo.com', self))
# Why: #2 news worldwide
self.AddStory(ToughPinchZoomCasesPage('http://www.cnn.com', self))
self.AddStory(WeatherDotComPage(self))
# Why: #1 world commerce website by visits; #3 commerce in the US by time
# spent
self.AddStory(ToughPinchZoomCasesPage('http://www.amazon.com', self))
# Why: #1 commerce website by time spent by users in US
self.AddStory(ToughPinchZoomCasesPage('http://www.ebay.com', self))
self.AddStory(YahooGamePage(self))
# Why: #1 Alexa recreation
self.AddStory(ToughPinchZoomCasesPage('http://booking.com', self))
# Why: #1 Alexa sports
self.AddStory(ToughPinchZoomCasesPage('http://sports.yahoo.com/', self))
class AndroidToughPinchZoomCasesPageSet(ToughPinchZoomCasesPageSet):
"""
ToughPinchZoomCasesPageSet using the maximum Android zoom level. This is
chosen as 7x, which may seem to exceed the 5x value specified in
WebPreferences::default_maximum_page_scale_factor. However, as desktop sites
on Android start at less than 1x scale (up to 0.25x), a value of 7x does not
exceed the 5x limit.
"""
def __init__(self):
super(AndroidToughPinchZoomCasesPageSet, self).__init__(7.0)
class DesktopToughPinchZoomCasesPageSet(ToughPinchZoomCasesPageSet):
""" ToughPinchZoomCasesPageSet using the maximum desktop zoom level """
def __init__(self):
super(DesktopToughPinchZoomCasesPageSet, self).__init__(4.0)
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'badge.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(790, 588)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(699, 553))
MainWindow.setTabShape(QtGui.QTabWidget.Triangular)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(10, 20, 771, 511))
self.tabWidget.setTabShape(QtGui.QTabWidget.Rounded)
self.tabWidget.setMovable(True)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab_Uart = QtGui.QWidget()
self.tab_Uart.setObjectName(_fromUtf8("tab_Uart"))
self.comboBox_UartPort = QtGui.QComboBox(self.tab_Uart)
self.comboBox_UartPort.setGeometry(QtCore.QRect(10, 10, 171, 27))
self.comboBox_UartPort.setObjectName(_fromUtf8("comboBox_UartPort"))
self.comboBox_UartBaud = QtGui.QComboBox(self.tab_Uart)
self.comboBox_UartBaud.setGeometry(QtCore.QRect(190, 10, 161, 27))
self.comboBox_UartBaud.setObjectName(_fromUtf8("comboBox_UartBaud"))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.comboBox_UartBaud.addItem(_fromUtf8(""))
self.pushButton_UartConnect = QtGui.QPushButton(self.tab_Uart)
self.pushButton_UartConnect.setGeometry(QtCore.QRect(640, 10, 111, 27))
self.pushButton_UartConnect.setCheckable(True)
self.pushButton_UartConnect.setObjectName(_fromUtf8("pushButton_UartConnect"))
self.textEdit_UartConsole = QtGui.QTextEdit(self.tab_Uart)
self.textEdit_UartConsole.setGeometry(QtCore.QRect(10, 50, 741, 381))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.textEdit_UartConsole.setPalette(palette)
self.textEdit_UartConsole.setReadOnly(True)
self.textEdit_UartConsole.setObjectName(_fromUtf8("textEdit_UartConsole"))
self.lineEdit_UartInput = QtGui.QLineEdit(self.tab_Uart)
self.lineEdit_UartInput.setGeometry(QtCore.QRect(10, 440, 611, 27))
self.lineEdit_UartInput.setObjectName(_fromUtf8("lineEdit_UartInput"))
self.comboBox_UartTerminator = QtGui.QComboBox(self.tab_Uart)
self.comboBox_UartTerminator.setGeometry(QtCore.QRect(630, 440, 121, 27))
self.comboBox_UartTerminator.setObjectName(_fromUtf8("comboBox_UartTerminator"))
self.comboBox_UartTerminator.addItem(_fromUtf8(""))
self.comboBox_UartTerminator.addItem(_fromUtf8(""))
self.comboBox_UartTerminator.addItem(_fromUtf8(""))
self.comboBox_UartTerminator.addItem(_fromUtf8(""))
self.pushButton_UartRefresh = QtGui.QPushButton(self.tab_Uart)
self.pushButton_UartRefresh.setGeometry(QtCore.QRect(360, 10, 61, 27))
self.pushButton_UartRefresh.setObjectName(_fromUtf8("pushButton_UartRefresh"))
self.tabWidget.addTab(self.tab_Uart, _fromUtf8(""))
self.tab_Spi = QtGui.QWidget()
self.tab_Spi.setObjectName(_fromUtf8("tab_Spi"))
self.textEdit_SpiConsole = QtGui.QTextEdit(self.tab_Spi)
self.textEdit_SpiConsole.setGeometry(QtCore.QRect(10, 90, 741, 381))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.textEdit_SpiConsole.setPalette(palette)
self.textEdit_SpiConsole.setReadOnly(True)
self.textEdit_SpiConsole.setObjectName(_fromUtf8("textEdit_SpiConsole"))
self.lineEdit_SpiFilePath = QtGui.QLineEdit(self.tab_Spi)
self.lineEdit_SpiFilePath.setGeometry(QtCore.QRect(10, 20, 461, 27))
self.lineEdit_SpiFilePath.setObjectName(_fromUtf8("lineEdit_SpiFilePath"))
self.comboBox_SpiOperation = QtGui.QComboBox(self.tab_Spi)
self.comboBox_SpiOperation.setGeometry(QtCore.QRect(480, 20, 161, 27))
self.comboBox_SpiOperation.setObjectName(_fromUtf8("comboBox_SpiOperation"))
self.comboBox_SpiOperation.addItem(_fromUtf8(""))
self.comboBox_SpiOperation.addItem(_fromUtf8(""))
self.comboBox_SpiOperation.addItem(_fromUtf8(""))
self.comboBox_SpiOperation.addItem(_fromUtf8(""))
self.pushButton_SpiRun = QtGui.QPushButton(self.tab_Spi)
self.pushButton_SpiRun.setGeometry(QtCore.QRect(650, 20, 99, 27))
self.pushButton_SpiRun.setCheckable(False)
self.pushButton_SpiRun.setObjectName(_fromUtf8("pushButton_SpiRun"))
self.line = QtGui.QFrame(self.tab_Spi)
self.line.setGeometry(QtCore.QRect(10, 55, 741, 21))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.tabWidget.addTab(self.tab_Spi, _fromUtf8(""))
self.tab_Jtag = QtGui.QWidget()
self.tab_Jtag.setObjectName(_fromUtf8("tab_Jtag"))
self.textEdit_JtagConsole = QtGui.QTextEdit(self.tab_Jtag)
self.textEdit_JtagConsole.setGeometry(QtCore.QRect(10, 90, 741, 381))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.textEdit_JtagConsole.setPalette(palette)
self.textEdit_JtagConsole.setReadOnly(True)
self.textEdit_JtagConsole.setObjectName(_fromUtf8("textEdit_JtagConsole"))
self.comboBox_JtagSelectDevice = QtGui.QComboBox(self.tab_Jtag)
self.comboBox_JtagSelectDevice.setGeometry(QtCore.QRect(10, 10, 201, 27))
self.comboBox_JtagSelectDevice.setObjectName(_fromUtf8("comboBox_JtagSelectDevice"))
self.pushButton_JtagStartServer = QtGui.QPushButton(self.tab_Jtag)
self.pushButton_JtagStartServer.setGeometry(QtCore.QRect(10, 40, 201, 27))
self.pushButton_JtagStartServer.setCheckable(True)
self.pushButton_JtagStartServer.setObjectName(_fromUtf8("pushButton_JtagStartServer"))
self.pushButton_JtagConnect = QtGui.QPushButton(self.tab_Jtag)
self.pushButton_JtagConnect.setGeometry(QtCore.QRect(260, 10, 241, 27))
self.pushButton_JtagConnect.setCheckable(True)
self.pushButton_JtagConnect.setObjectName(_fromUtf8("pushButton_JtagConnect"))
self.pushButton_JtagRunGdb = QtGui.QPushButton(self.tab_Jtag)
self.pushButton_JtagRunGdb.setGeometry(QtCore.QRect(550, 40, 201, 27))
self.pushButton_JtagRunGdb.setCheckable(True)
self.pushButton_JtagRunGdb.setObjectName(_fromUtf8("pushButton_JtagRunGdb"))
self.line_2 = QtGui.QFrame(self.tab_Jtag)
self.line_2.setGeometry(QtCore.QRect(210, 10, 41, 61))
self.line_2.setFrameShape(QtGui.QFrame.VLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.line_3 = QtGui.QFrame(self.tab_Jtag)
self.line_3.setGeometry(QtCore.QRect(500, 10, 41, 61))
self.line_3.setFrameShape(QtGui.QFrame.VLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.lineEdit_JtagElfPath = QtGui.QLineEdit(self.tab_Jtag)
self.lineEdit_JtagElfPath.setGeometry(QtCore.QRect(550, 10, 201, 27))
self.lineEdit_JtagElfPath.setObjectName(_fromUtf8("lineEdit_JtagElfPath"))
self.tabWidget.addTab(self.tab_Jtag, _fromUtf8(""))
self.tab_I2c = QtGui.QWidget()
self.tab_I2c.setObjectName(_fromUtf8("tab_I2c"))
self.textEdit_I2cConsole = QtGui.QTextEdit(self.tab_I2c)
self.textEdit_I2cConsole.setGeometry(QtCore.QRect(10, 90, 741, 381))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.textEdit_I2cConsole.setPalette(palette)
self.textEdit_I2cConsole.setReadOnly(True)
self.textEdit_I2cConsole.setObjectName(_fromUtf8("textEdit_I2cConsole"))
self.line_4 = QtGui.QFrame(self.tab_I2c)
self.line_4.setGeometry(QtCore.QRect(10, 55, 741, 21))
self.line_4.setFrameShape(QtGui.QFrame.HLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName(_fromUtf8("line_4"))
self.lineEdit_I2cFilePath = QtGui.QLineEdit(self.tab_I2c)
self.lineEdit_I2cFilePath.setGeometry(QtCore.QRect(10, 20, 461, 27))
self.lineEdit_I2cFilePath.setObjectName(_fromUtf8("lineEdit_I2cFilePath"))
self.comboBox_I2cOperation = QtGui.QComboBox(self.tab_I2c)
self.comboBox_I2cOperation.setGeometry(QtCore.QRect(480, 20, 161, 27))
self.comboBox_I2cOperation.setObjectName(_fromUtf8("comboBox_I2cOperation"))
self.comboBox_I2cOperation.addItem(_fromUtf8(""))
self.comboBox_I2cOperation.addItem(_fromUtf8(""))
self.comboBox_I2cOperation.addItem(_fromUtf8(""))
self.comboBox_I2cOperation.addItem(_fromUtf8(""))
self.pushButton_I2cRun = QtGui.QPushButton(self.tab_I2c)
self.pushButton_I2cRun.setGeometry(QtCore.QRect(650, 20, 99, 27))
self.pushButton_I2cRun.setCheckable(False)
self.pushButton_I2cRun.setObjectName(_fromUtf8("pushButton_I2cRun"))
self.tabWidget.addTab(self.tab_I2c, _fromUtf8(""))
self.tab_Gpio = QtGui.QWidget()
self.tab_Gpio.setObjectName(_fromUtf8("tab_Gpio"))
self.checkBox_d0 = QtGui.QCheckBox(self.tab_Gpio)
self.checkBox_d0.setGeometry(QtCore.QRect(120, 100, 81, 31))
self.checkBox_d0.setObjectName(_fromUtf8("checkBox_d0"))
self.comboBox_d0 = QtGui.QComboBox(self.tab_Gpio)
self.comboBox_d0.setGeometry(QtCore.QRect(210, 100, 85, 27))
self.comboBox_d0.setObjectName(_fromUtf8("comboBox_d0"))
self.comboBox_d0.addItem(_fromUtf8(""))
self.comboBox_d0.addItem(_fromUtf8(""))
self.checkBox_d1 = QtGui.QCheckBox(self.tab_Gpio)
self.checkBox_d1.setGeometry(QtCore.QRect(120, 180, 81, 31))
self.checkBox_d1.setObjectName(_fromUtf8("checkBox_d1"))
self.comboBox_d1 = QtGui.QComboBox(self.tab_Gpio)
self.comboBox_d1.setGeometry(QtCore.QRect(210, 180, 85, 27))
self.comboBox_d1.setObjectName(_fromUtf8("comboBox_d1"))
self.comboBox_d1.addItem(_fromUtf8(""))
self.comboBox_d1.addItem(_fromUtf8(""))
self.comboBox_d2 = QtGui.QComboBox(self.tab_Gpio)
self.comboBox_d2.setGeometry(QtCore.QRect(210, 260, 85, 27))
self.comboBox_d2.setObjectName(_fromUtf8("comboBox_d2"))
self.comboBox_d2.addItem(_fromUtf8(""))
self.comboBox_d2.addItem(_fromUtf8(""))
self.checkBox_d2 = QtGui.QCheckBox(self.tab_Gpio)
self.checkBox_d2.setGeometry(QtCore.QRect(120, 260, 81, 31))
self.checkBox_d2.setObjectName(_fromUtf8("checkBox_d2"))
self.comboBox_d3 = QtGui.QComboBox(self.tab_Gpio)
self.comboBox_d3.setGeometry(QtCore.QRect(210, 340, 85, 27))
self.comboBox_d3.setObjectName(_fromUtf8("comboBox_d3"))
self.comboBox_d3.addItem(_fromUtf8(""))
self.comboBox_d3.addItem(_fromUtf8(""))
self.checkBox_d3 = QtGui.QCheckBox(self.tab_Gpio)
self.checkBox_d3.setGeometry(QtCore.QRect(120, 340, 81, 31))
self.checkBox_d3.setObjectName(_fromUtf8("checkBox_d3"))
self.checkBox_d7 = QtGui.QCheckBox(self.tab_Gpio)
self.checkBox_d7.setGeometry(QtCore.QRect(460, 340, 81, 31))
self.checkBox_d7.setObjectName(_fromUtf8("checkBox_d7"))
self.checkBox_d5 = QtGui.QCheckBox(self.tab_Gpio)
self.checkBox_d5.setGeometry(QtCore.QRect(460, 180, 81, 31))
self.checkBox_d5.setObjectName(_fromUtf8("checkBox_d5"))
self.comboBox_d4 = QtGui.QComboBox(self.tab_Gpio)
self.comboBox_d4.setGeometry(QtCore.QRect(550, 100, 85, 27))
self.comboBox_d4.setObjectName(_fromUtf8("comboBox_d4"))
self.comboBox_d4.addItem(_fromUtf8(""))
self.comboBox_d4.addItem(_fromUtf8(""))
self.checkBox_d6 = QtGui.QCheckBox(self.tab_Gpio)
self.checkBox_d6.setGeometry(QtCore.QRect(460, 260, 81, 31))
self.checkBox_d6.setObjectName(_fromUtf8("checkBox_d6"))
self.comboBox_d5 = QtGui.QComboBox(self.tab_Gpio)
self.comboBox_d5.setGeometry(QtCore.QRect(550, 180, 85, 27))
self.comboBox_d5.setObjectName(_fromUtf8("comboBox_d5"))
self.comboBox_d5.addItem(_fromUtf8(""))
self.comboBox_d5.addItem(_fromUtf8(""))
self.comboBox_d6 = QtGui.QComboBox(self.tab_Gpio)
self.comboBox_d6.setGeometry(QtCore.QRect(550, 260, 85, 27))
self.comboBox_d6.setObjectName(_fromUtf8("comboBox_d6"))
self.comboBox_d6.addItem(_fromUtf8(""))
self.comboBox_d6.addItem(_fromUtf8(""))
self.checkBox_d4 = QtGui.QCheckBox(self.tab_Gpio)
self.checkBox_d4.setGeometry(QtCore.QRect(460, 100, 81, 31))
self.checkBox_d4.setObjectName(_fromUtf8("checkBox_d4"))
self.comboBox_d7 = QtGui.QComboBox(self.tab_Gpio)
self.comboBox_d7.setGeometry(QtCore.QRect(550, 340, 85, 27))
self.comboBox_d7.setObjectName(_fromUtf8("comboBox_d7"))
self.comboBox_d7.addItem(_fromUtf8(""))
self.comboBox_d7.addItem(_fromUtf8(""))
self.line_5 = QtGui.QFrame(self.tab_Gpio)
self.line_5.setGeometry(QtCore.QRect(373, 80, 16, 301))
self.line_5.setFrameShape(QtGui.QFrame.VLine)
self.line_5.setFrameShadow(QtGui.QFrame.Sunken)
self.line_5.setObjectName(_fromUtf8("line_5"))
self.pushButton_GpioStartInputMonitor = QtGui.QPushButton(self.tab_Gpio)
self.pushButton_GpioStartInputMonitor.setGeometry(QtCore.QRect(270, 420, 221, 27))
self.pushButton_GpioStartInputMonitor.setObjectName(_fromUtf8("pushButton_GpioStartInputMonitor"))
self.tabWidget.addTab(self.tab_Gpio, _fromUtf8(""))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 790, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.menuFile.addAction(self.actionAbout)
self.menuFile.addAction(self.actionExit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Attify Badge Tool", None))
self.comboBox_UartBaud.setItemText(0, _translate("MainWindow", "110", None))
self.comboBox_UartBaud.setItemText(1, _translate("MainWindow", "300", None))
self.comboBox_UartBaud.setItemText(2, _translate("MainWindow", "600", None))
self.comboBox_UartBaud.setItemText(3, _translate("MainWindow", "1200", None))
self.comboBox_UartBaud.setItemText(4, _translate("MainWindow", "2400", None))
self.comboBox_UartBaud.setItemText(5, _translate("MainWindow", "4800", None))
self.comboBox_UartBaud.setItemText(6, _translate("MainWindow", "9600", None))
self.comboBox_UartBaud.setItemText(7, _translate("MainWindow", "14400", None))
self.comboBox_UartBaud.setItemText(8, _translate("MainWindow", "19200", None))
self.comboBox_UartBaud.setItemText(9, _translate("MainWindow", "38400", None))
self.comboBox_UartBaud.setItemText(10, _translate("MainWindow", "57600", None))
self.comboBox_UartBaud.setItemText(11, _translate("MainWindow", "115200", None))
self.comboBox_UartBaud.setItemText(12, _translate("MainWindow", "128000", None))
self.comboBox_UartBaud.setItemText(13, _translate("MainWindow", "256000", None))
self.pushButton_UartConnect.setText(_translate("MainWindow", "Connect", None))
self.comboBox_UartTerminator.setItemText(0, _translate("MainWindow", "New Line", None))
self.comboBox_UartTerminator.setItemText(1, _translate("MainWindow", "Carriage Return", None))
self.comboBox_UartTerminator.setItemText(2, _translate("MainWindow", "CR + LF", None))
self.comboBox_UartTerminator.setItemText(3, _translate("MainWindow", "No line ending", None))
self.pushButton_UartRefresh.setText(_translate("MainWindow", "Refresh", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_Uart), _translate("MainWindow", " UART ", None))
self.lineEdit_SpiFilePath.setPlaceholderText(_translate("MainWindow", "Enter File Name and Path", None))
self.comboBox_SpiOperation.setItemText(0, _translate("MainWindow", "Find Chip", None))
self.comboBox_SpiOperation.setItemText(1, _translate("MainWindow", "Read", None))
self.comboBox_SpiOperation.setItemText(2, _translate("MainWindow", "Write", None))
self.comboBox_SpiOperation.setItemText(3, _translate("MainWindow", "Erase", None))
self.pushButton_SpiRun.setText(_translate("MainWindow", "Run", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_Spi), _translate("MainWindow", " SPI ", None))
self.pushButton_JtagStartServer.setText(_translate("MainWindow", "Start OpenOCD Server", None))
self.pushButton_JtagConnect.setText(_translate("MainWindow", "Connect to OpenOCD Server", None))
self.pushButton_JtagRunGdb.setText(_translate("MainWindow", "Run GDB", None))
self.lineEdit_JtagElfPath.setPlaceholderText(_translate("MainWindow", " ELF file location", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_Jtag), _translate("MainWindow", " JTAG ", None))
self.lineEdit_I2cFilePath.setPlaceholderText(_translate("MainWindow", "Enter File Name and Path", None))
self.comboBox_I2cOperation.setItemText(0, _translate("MainWindow", "Find Chip", None))
self.comboBox_I2cOperation.setItemText(1, _translate("MainWindow", "Read", None))
self.comboBox_I2cOperation.setItemText(2, _translate("MainWindow", "Write", None))
self.comboBox_I2cOperation.setItemText(3, _translate("MainWindow", "Erase", None))
self.pushButton_I2cRun.setText(_translate("MainWindow", "Run", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_I2c), _translate("MainWindow", " I2C ", None))
self.checkBox_d0.setText(_translate("MainWindow", "Pin D0", None))
self.comboBox_d0.setItemText(0, _translate("MainWindow", "Output", None))
self.comboBox_d0.setItemText(1, _translate("MainWindow", "Input", None))
self.checkBox_d1.setText(_translate("MainWindow", "Pin D1", None))
self.comboBox_d1.setItemText(0, _translate("MainWindow", "Output", None))
self.comboBox_d1.setItemText(1, _translate("MainWindow", "Input", None))
self.comboBox_d2.setItemText(0, _translate("MainWindow", "Output", None))
self.comboBox_d2.setItemText(1, _translate("MainWindow", "Input", None))
self.checkBox_d2.setText(_translate("MainWindow", "Pin D2", None))
self.comboBox_d3.setItemText(0, _translate("MainWindow", "Output", None))
self.comboBox_d3.setItemText(1, _translate("MainWindow", "Input", None))
self.checkBox_d3.setText(_translate("MainWindow", "Pin D3", None))
self.checkBox_d7.setText(_translate("MainWindow", "Pin D7", None))
self.checkBox_d5.setText(_translate("MainWindow", "Pin D5", None))
self.comboBox_d4.setItemText(0, _translate("MainWindow", "Output", None))
self.comboBox_d4.setItemText(1, _translate("MainWindow", "Input", None))
self.checkBox_d6.setText(_translate("MainWindow", "Pin D6", None))
self.comboBox_d5.setItemText(0, _translate("MainWindow", "Output", None))
self.comboBox_d5.setItemText(1, _translate("MainWindow", "Input", None))
self.comboBox_d6.setItemText(0, _translate("MainWindow", "Output", None))
self.comboBox_d6.setItemText(1, _translate("MainWindow", "Input", None))
self.checkBox_d4.setText(_translate("MainWindow", "Pin D4", None))
self.comboBox_d7.setItemText(0, _translate("MainWindow", "Output", None))
self.comboBox_d7.setItemText(1, _translate("MainWindow", "Input", None))
self.pushButton_GpioStartInputMonitor.setText(_translate("MainWindow", "Start Input Monitor ", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_Gpio), _translate("MainWindow", " GPIO ", None))
self.menuFile.setTitle(_translate("MainWindow", "File", None))
self.actionAbout.setText(_translate("MainWindow", "About", None))
self.actionExit.setText(_translate("MainWindow", "Exit", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
|
from .error import *
from .tokens import *
from .events import *
from .nodes import *
from .loader import *
from .dumper import *
__version__ = '3.09'
try:
from .cyaml import *
__with_libyaml__ = True
except ImportError:
__with_libyaml__ = False
import io
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
while loader.check_token():
yield loader.get_token()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
while loader.check_event():
yield loader.get_event()
def compose(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
return loader.get_single_node()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
loader = Loader(stream)
while loader.check_node():
yield loader.get_node()
def load(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
loader = Loader(stream)
return loader.get_single_data()
def load_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
loader = Loader(stream)
while loader.check_data():
yield loader.get_data()
def safe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
return load(stream, SafeLoader)
def safe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
return load_all(stream, SafeLoader)
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
stream = io.StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
for event in events:
dumper.emit(event)
if getvalue:
return getvalue()
def serialize_all(nodes, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
dumper.open()
for node in nodes:
dumper.serialize(node)
dumper.close()
if getvalue:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
dumper.open()
for data in documents:
dumper.represent(data)
dumper.close()
if getvalue:
return getvalue()
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds)
def safe_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def add_implicit_resolver(tag, regexp, first=None,
Loader=Loader, Dumper=Dumper):
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
Loader.add_implicit_resolver(tag, regexp, first)
Dumper.add_implicit_resolver(tag, regexp, first)
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind)
def add_constructor(tag, constructor, Loader=Loader):
"""
Add a constructor for the given tag.
Constructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
Loader.add_constructor(tag, constructor)
def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
Loader.add_multi_constructor(tag_prefix, multi_constructor)
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer)
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
"""
Add a representer for the given type.
Multi-representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
Dumper.add_multi_representer(data_type, multi_representer)
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
class YAMLObject(metaclass=YAMLObjectMetaclass):
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = Loader
yaml_dumper = Dumper
yaml_tag = None
yaml_flow_style = None
@classmethod
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
return loader.construct_yaml_object(node, cls)
@classmethod
def to_yaml(cls, dumper, data):
"""
Convert a Python object to a representation node.
"""
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
flow_style=cls.yaml_flow_style)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Google Connection and Authentication classes.
Information about setting up your Google OAUTH2 credentials:
For libcloud, there are two basic methods for authenticating to Google using
OAUTH2: Service Accounts and Client IDs for Installed Applications.
Both are initially set up from the Cloud Console_
_Console: https://cloud.google.com/console
Setting up Service Account authentication (note that you need the PyCrypto
package installed to use this):
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Click on "Create New Client ID..."
- Select "Service account" and click on "Create Client ID"
- Download the Private Key (should happen automatically).
- The key that you download is a PKCS12 key. It needs to be converted to
the PEM format.
- Convert the key using OpenSSL (the default password is 'notasecret'):
``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts
-passin pass:notasecret | openssl rsa -out PRIV.pem``
- Move the .pem file to a safe location.
- To Authenticate, you will need to pass the Service Account's "Email
address" in as the user_id and the path to the .pem file as the key.
Setting up Installed Application authentication:
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Select "Installed application" and "Other" then click on
"Create Client ID"
- To Authenticate, pass in the "Client ID" as the user_id and the "Client
secret" as the key
- The first time that you do this, the libcloud will give you a URL to
visit. Copy and paste the URL into a browser.
- When you go to the URL it will ask you to log in (if you aren't already)
and ask you if you want to allow the project access to your account.
- Click on Accept and you will be given a code.
- Paste that code at the prompt given to you by the Google libcloud
connection.
- At that point, a token & refresh token will be stored in your home
directory and will be used for authentication.
Please remember to secure your keys and access tokens.
"""
from __future__ import with_statement
try:
import simplejson as json
except ImportError:
import json
import base64
import errno
import time
import datetime
import os
import socket
import sys
from libcloud.utils.connection import get_response_object
from libcloud.utils.py3 import httplib, urlencode, urlparse, PY3
from libcloud.common.base import (ConnectionUserAndKey, JsonResponse,
PollingConnection)
from libcloud.common.types import (ProviderError,
LibcloudError)
try:
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
import Crypto.Random
Crypto.Random.atfork()
except ImportError:
# The pycrypto library is unavailable
SHA256 = None
RSA = None
PKCS1_v1_5 = None
TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def _is_gce():
http_code, http_reason, body = _get_gce_metadata()
if http_code == httplib.OK and body:
return True
return False
def _get_gce_metadata(path=''):
try:
url = "http://metadata/computeMetadata/v1/" + path.lstrip('/')
headers = {'Metadata-Flavor': 'Google'}
response = get_response_object(url, headers=headers)
return response.status, "", response.body
except Exception as e:
return -1, str(e), None
class GoogleAuthError(LibcloudError):
"""Generic Error class for various authentication errors."""
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
class GoogleBaseError(ProviderError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
super(GoogleBaseError, self).__init__(value, http_code, driver)
class InvalidRequestError(GoogleBaseError):
pass
class JsonParseError(GoogleBaseError):
pass
class ResourceNotFoundError(GoogleBaseError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
if isinstance(value, dict) and 'message' in value and \
value['message'].count('/') == 1 and \
value['message'].count('projects/') == 1:
value['message'] = value['message'] + ". A missing project " \
"error may be an authentication issue. " \
"Please ensure your auth credentials match " \
"your project. "
super(GoogleBaseError, self).__init__(value, http_code, driver)
class QuotaExceededError(GoogleBaseError):
pass
class ResourceExistsError(GoogleBaseError):
pass
class ResourceInUseError(GoogleBaseError):
pass
class GoogleResponse(JsonResponse):
"""
Google Base Response class.
"""
def success(self):
"""
Determine if the request was successful.
For the Google response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C{True}
"""
return True
def _get_error(self, body):
"""
Get the error code and message from a JSON response.
Return just the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: Tuple containing error code and message
:rtype: ``tuple`` of ``str`` or ``int``
"""
if 'errors' in body['error']:
err = body['error']['errors'][0]
else:
err = body['error']
if 'code' in err:
code = err.get('code')
message = err.get('message')
else:
code = err.get('reason', None)
message = body.get('error_description', err)
return (code, message)
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
json_error = False
try:
body = json.loads(self.body)
except:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
valid_http_codes = [
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.CONFLICT,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status, None)
elif 'error' in body:
(code, message) = self._get_error(body)
if code == 'QUOTA_EXCEEDED':
raise QuotaExceededError(message, self.status, code)
elif code == 'RESOURCE_ALREADY_EXISTS':
raise ResourceExistsError(message, self.status, code)
elif code == 'alreadyExists':
raise ResourceExistsError(message, self.status, code)
elif code.startswith('RESOURCE_IN_USE'):
raise ResourceInUseError(message, self.status, code)
else:
raise GoogleBaseError(message, self.status, code)
else:
return body
elif self.status == httplib.NOT_FOUND:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise ResourceNotFoundError(message, self.status, code)
elif self.status == httplib.BAD_REQUEST:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise InvalidRequestError(message, self.status, code)
else:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise GoogleBaseError(message, self.status, code)
class GoogleBaseDriver(object):
name = "Google API"
class GoogleBaseAuthConnection(ConnectionUserAndKey):
"""
Base class for Google Authentication. Should be subclassed for specific
types of authentication.
"""
driver = GoogleBaseDriver
responseCls = GoogleResponse
name = 'Google Auth'
host = 'accounts.google.com'
auth_path = '/o/oauth2/auth'
def __init__(self, user_id, key=None, scopes=None,
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
login_hint=None, **kwargs):
"""
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:param scopes: A list of urls defining the scope of authentication
to grant.
:type scopes: ``list``
:keyword redirect_uri: The Redirect URI for the authentication
request. See Google OAUTH2 documentation for
more info.
:type redirect_uri: ``str``
:keyword login_hint: Login hint for authentication request. Useful
for Installed Application authentication.
:type login_hint: ``str``
"""
scopes = scopes or []
self.scopes = " ".join(scopes)
self.redirect_uri = redirect_uri
self.login_hint = login_hint
super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs)
def _now(self):
return datetime.datetime.utcnow()
def add_default_headers(self, headers):
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['Host'] = self.host
return headers
def _token_request(self, request_body):
"""
Return an updated token from a token request body.
:param request_body: A dictionary of values to send in the body of the
token request.
:type request_body: ``dict``
:return: A dictionary with updated token information
:rtype: ``dict``
"""
data = urlencode(request_body)
now = self._now()
response = self.request('/o/oauth2/token', method='POST', data=data)
token_info = response.object
if 'expires_in' in token_info:
expire_time = now + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = expire_time.strftime(TIMESTAMP_FORMAT)
return token_info
def refresh_token(self, token_info):
"""
Refresh the current token.
Fetch an updated refresh token from internal metadata service.
:param token_info: Dictionary containing token information.
(Not used, but here for compatibility)
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
return self.get_new_token()
class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection):
"""Authentication connection for "Installed Application" authentication."""
def get_code(self):
"""
Give the user a URL that they can visit to authenticate and obtain a
code. This method will ask for that code that the user can paste in.
:return: Code supplied by the user after authenticating
:rtype: ``str``
"""
auth_params = {'response_type': 'code',
'client_id': self.user_id,
'redirect_uri': self.redirect_uri,
'scope': self.scopes,
'state': 'Libcloud Request'}
if self.login_hint:
auth_params['login_hint'] = self.login_hint
data = urlencode(auth_params)
url = 'https://%s%s?%s' % (self.host, self.auth_path, data)
print('Please Go to the following URL and sign in:')
print(url)
if PY3:
code = input('Enter Code:')
else:
code = raw_input('Enter Code:')
return code
def get_new_token(self):
"""
Get a new token. Generally used when no previous token exists or there
is no refresh token
:return: Dictionary containing token information
:rtype: ``dict``
"""
# Ask the user for a code
code = self.get_code()
token_request = {'code': code,
'client_id': self.user_id,
'client_secret': self.key,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'}
return self._token_request(token_request)
def refresh_token(self, token_info):
"""
Use the refresh token supplied in the token info to get a new token.
:param token_info: Dictionary containing current token information
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
if 'refresh_token' not in token_info:
return self.get_new_token()
refresh_request = {'refresh_token': token_info['refresh_token'],
'client_id': self.user_id,
'client_secret': self.key,
'grant_type': 'refresh_token'}
new_token = self._token_request(refresh_request)
if 'refresh_token' not in new_token:
new_token['refresh_token'] = token_info['refresh_token']
return new_token
class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for "Service Account" authentication."""
def __init__(self, user_id, key, *args, **kwargs):
"""
Check to see if PyCrypto is available, and convert key file path into a
key string if the key is in a file.
:param user_id: Email address to be used for Service Account
authentication.
:type user_id: ``str``
:param key: The RSA Key or path to file containing the key.
:type key: ``str``
"""
if SHA256 is None:
raise GoogleAuthError('PyCrypto library required for '
'Service Account Authentication.')
# Check to see if 'key' is a file and read the file if it is.
keypath = os.path.expanduser(key)
is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
if is_file_path:
with open(keypath, 'r') as f:
key = f.read()
super(GoogleServiceAcctAuthConnection, self).__init__(
user_id, key, *args, **kwargs)
def get_new_token(self):
"""
Get a new token using the email address and RSA Key.
:return: Dictionary containing token information
:rtype: ``dict``
"""
# The header is always the same
header = {'alg': 'RS256', 'typ': 'JWT'}
header_enc = base64.urlsafe_b64encode(json.dumps(header))
# Construct a claim set
claim_set = {'iss': self.user_id,
'scope': self.scopes,
'aud': 'https://accounts.google.com/o/oauth2/token',
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
claim_set_enc = base64.urlsafe_b64encode(json.dumps(claim_set))
# The message contains both the header and claim set
message = '%s.%s' % (header_enc, claim_set_enc)
# Then the message is signed using the key supplied
key = RSA.importKey(self.key)
hash_func = SHA256.new(message)
signer = PKCS1_v1_5.new(key)
signature = base64.urlsafe_b64encode(signer.sign(hash_func))
# Finally the message and signature are sent to get a token
jwt = '%s.%s' % (message, signature)
request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': jwt}
return self._token_request(request)
class GoogleGCEServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for self-authentication when used with a GCE
istance that supports serviceAccounts.
"""
def get_new_token(self):
"""
Get a new token from the internal metadata service.
:return: Dictionary containing token information
:rtype: ``dict``
"""
path = '/instance/service-accounts/default/token'
http_code, http_reason, token_info = _get_gce_metadata(path)
if http_code == httplib.NOT_FOUND:
raise ValueError("Service Accounts are not enabled for this "
"GCE instance.")
if http_code != httplib.OK:
raise ValueError("Internal GCE Authorization failed: "
"'%s'" % str(http_reason))
token_info = json.loads(token_info)
if 'expires_in' in token_info:
expire_time = self._now() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = expire_time.strftime(TIMESTAMP_FORMAT)
return token_info
class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection):
"""Base connection class for interacting with Google APIs."""
driver = GoogleBaseDriver
responseCls = GoogleResponse
host = 'www.googleapis.com'
poll_interval = 2.0
timeout = 180
def __init__(self, user_id, key=None, auth_type=None,
credential_file=None, scopes=None, **kwargs):
"""
Determine authentication type, set up appropriate authentication
connection and get initial authentication information.
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:keyword auth_type: Accepted values are "SA" or "IA" or "GCE"
("Service Account" or "Installed Application" or
"GCE" if libcloud is being used on a GCE instance
with service account enabled).
If not supplied, auth_type will be guessed based
on value of user_id or if the code is being
executed in a GCE instance.).
If not supplied, auth_type will be guessed based
on value of user_id or if the code is running
on a GCE instance.
:type auth_type: ``str``
:keyword credential_file: Path to file for caching authentication
information.
:type credential_file: ``str``
:keyword scopes: List of OAuth2 scope URLs. The empty default sets
read/write access to Compute, Storage, and DNS.
:type scopes: ``list``
"""
self.credential_file = credential_file or '~/.gce_libcloud_auth'
if auth_type is None:
# Try to guess.
if '@' in user_id:
auth_type = 'SA'
elif _is_gce():
auth_type = 'GCE'
else:
auth_type = 'IA'
# Default scopes to read/write for compute, storage, and dns. Can
# override this when calling get_driver() or setting in secrets.py
self.scopes = scopes
if not self.scopes:
self.scopes = [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/ndev.clouddns.readwrite',
]
self.token_info = self._get_token_info_from_file()
if auth_type == 'GCE':
self.auth_conn = GoogleGCEServiceAcctAuthConnection(
user_id, self.scopes, **kwargs)
elif auth_type == 'SA':
if '@' not in user_id:
raise GoogleAuthError('Service Account auth requires a '
'valid email address')
self.auth_conn = GoogleServiceAcctAuthConnection(
user_id, key, self.scopes, **kwargs)
elif auth_type == 'IA':
self.auth_conn = GoogleInstalledAppAuthConnection(
user_id, key, self.scopes, **kwargs)
else:
raise GoogleAuthError('Invalid auth_type: %s' % str(auth_type))
if self.token_info is None:
self.token_info = self.auth_conn.get_new_token()
self._write_token_info_to_file()
self.token_expire_time = datetime.datetime.strptime(
self.token_info['expire_time'], TIMESTAMP_FORMAT)
super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs)
python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
ver_platform = 'Python %s/%s' % (python_ver, sys.platform)
self.user_agent_append(ver_platform)
def _now(self):
return datetime.datetime.utcnow()
def add_default_headers(self, headers):
"""
@inherits: :class:`Connection.add_default_headers`
"""
headers['Content-Type'] = "application/json"
headers['Host'] = self.host
return headers
def pre_connect_hook(self, params, headers):
"""
Check to make sure that token hasn't expired. If it has, get an
updated token. Also, add the token to the headers.
@inherits: :class:`Connection.pre_connect_hook`
"""
now = self._now()
if self.token_expire_time < now:
self.token_info = self.auth_conn.refresh_token(self.token_info)
self.token_expire_time = datetime.datetime.strptime(
self.token_info['expire_time'], TIMESTAMP_FORMAT)
self._write_token_info_to_file()
headers['Authorization'] = 'Bearer %s' % (
self.token_info['access_token'])
return params, headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
def request(self, *args, **kwargs):
"""
@inherits: :class:`Connection.request`
"""
# Adds some retry logic for the occasional
# "Connection Reset by peer" error.
retries = 4
tries = 0
while tries < (retries - 1):
try:
return super(GoogleBaseConnection, self).request(
*args, **kwargs)
except socket.error:
e = sys.exc_info()[1]
if e.errno == errno.ECONNRESET:
tries = tries + 1
else:
raise e
# One more time, then give up.
return super(GoogleBaseConnection, self).request(*args, **kwargs)
def _get_token_info_from_file(self):
"""
Read credential file and return token information.
:return: Token information dictionary, or None
:rtype: ``dict`` or ``None``
"""
token_info = None
filename = os.path.realpath(os.path.expanduser(self.credential_file))
try:
with open(filename, 'r') as f:
data = f.read()
token_info = json.loads(data)
except IOError:
pass
return token_info
def _write_token_info_to_file(self):
"""
Write token_info to credential file.
"""
filename = os.path.realpath(os.path.expanduser(self.credential_file))
data = json.dumps(self.token_info)
with open(filename, 'w') as f:
f.write(data)
def has_completed(self, response):
"""
Determine if operation has completed based on response.
:param response: JSON response
:type response: I{responseCls}
:return: True if complete, False otherwise
:rtype: ``bool``
"""
if response.object['status'] == 'DONE':
return True
else:
return False
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
@inherits: :class:`PollingConnection.get_poll_request_kwargs`
"""
return {'action': response.object['selfLink']}
def morph_action_hook(self, action):
"""
Update action to correct request path.
In many places, the Google API returns a full URL to a resource.
This will strip the scheme and host off of the path and just return
the request. Otherwise, it will append the base request_path to
the action.
:param action: The action to be called in the http request
:type action: ``str``
:return: The modified request based on the action
:rtype: ``str``
"""
if action.startswith('https://'):
u = urlparse.urlsplit(action)
request = urlparse.urlunsplit(('', '', u[2], u[3], u[4]))
else:
request = self.request_path + action
return request
|
|
"""Useful rrd utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import socket
import time
import six
if six.PY2 and os.name == 'posix':
import subprocess32 as subprocess # pylint: disable=import-error
else:
import subprocess # pylint: disable=wrong-import-order
from treadmill import fs
from treadmill import subproc
_LOGGER = logging.getLogger(__name__)
# This is rrd fields spec
_METRICS_FMT = ':'.join(['{%s}' % svc for svc in [
'memusage',
'softmem',
'hardmem',
'cputotal',
'cpuusage',
'cpuusage_ratio',
'blk_read_iops',
'blk_write_iops',
'blk_read_bps',
'blk_write_bps',
'fs_used_bytes'
]])
RRDTOOL = 'rrdtool'
SOCKET = '/tmp/treadmill.rrd'
# Keeps track which RRA to be queried for the first code point according to the
# timeframces.
TIMEFRAME_TO_RRA_IDX = {"short": "0", "long": "1"}
class RRDError(Exception):
"""RRD protocol error."""
class RRDClient(object):
"""RRD socket client."""
def __init__(self, path):
_LOGGER.info('Initializing rrdclient: %s', path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(path)
self.rrd = sock.makefile('rw')
def command(self, line, oneway=False):
"""Sends rrd command and checks the output."""
line = line.strip()
if not line.startswith('UPDATE'):
_LOGGER.info('rrd command: %s', line)
self.rrd.write(line + '\n')
self.rrd.flush()
if oneway:
self.rrd.close()
return
reply = self.rrd.readline()
status, _msg = reply.split(' ', 1)
status = int(status)
if status < 0:
raise RRDError(reply)
for _ in six.moves.range(0, status):
reply = self.rrd.readline()
_LOGGER.info('rrd reply: %s', reply)
def create(self, rrd_file, step, interval):
"""Creates rrd file for application metrics."""
_LOGGER.info('creating %r', rrd_file)
fs.rm_safe(rrd_file)
self.command(' '.join([
'CREATE',
rrd_file,
'-s', str(step),
'-b', str(int(time.time())),
'DS:memory_usage:GAUGE:%s:0:U' % interval,
'DS:memory_softlimit:GAUGE:%s:0:U' % interval,
'DS:memory_hardlimit:GAUGE:%s:0:U' % interval,
'DS:cpu_total:COUNTER:%s:0:U' % interval,
'DS:cpu_usage:GAUGE:%s:0:U' % interval,
'DS:cpu_ratio:GAUGE:%s:0:U' % interval,
'DS:blk_read_iops:COUNTER:%s:0:U' % interval,
'DS:blk_write_iops:COUNTER:%s:0:U' % interval,
'DS:blk_read_bps:COUNTER:%s:0:U' % interval,
'DS:blk_write_bps:COUNTER:%s:0:U' % interval,
'DS:fs_used_bytes:GAUGE:%s:0:U' % interval,
'RRA:MIN:0.5:{}s:20m'.format(step),
'RRA:MIN:0.5:10m:3d',
'RRA:MAX:0.5:{}s:20m'.format(step),
'RRA:MAX:0.5:10m:3d',
'RRA:AVERAGE:0.5:{}s:20m'.format(step),
'RRA:AVERAGE:0.5:10m:3d',
]))
def update(self, rrdfile, data, metrics_time=None, update_str=None):
"""Updates rrd file with data, create if does not exist."""
if metrics_time is None:
metrics_time = int(time.time())
rrd_update_str = update_str or ':'.join(
[str(metrics_time), _METRICS_FMT.format(**data)]
)
try:
self.command('UPDATE %s %s' % (rrdfile, rrd_update_str))
except RRDError:
# TODO: rather than deleting the file, better to
# create new one with --source <old> option, so that
# data is imported. (see rrdtool create docs).
_LOGGER.exception('Error updating: %s', rrdfile)
fs.rm_safe(rrdfile)
def flush(self, rrdfile, oneway=False):
"""Send flush request to the rrd cache daemon."""
self.command('FLUSH ' + rrdfile, oneway)
def forget(self, rrdfile, oneway=False):
"""Send forget request to the rrd cache daemon."""
try:
self.command('FORGET ' + rrdfile, oneway)
except RRDError:
# File does not exist, ignore.
if not os.path.exists(os.path.realpath(rrdfile)):
pass
def flush_noexc(rrdfile, rrd_socket=SOCKET):
"""Send flush request to the rrd cache daemon."""
try:
rrdclient = RRDClient(rrd_socket)
except Exception: # pylint: disable=W0703
return
try:
rrdclient.flush(rrdfile, oneway=True)
except Exception: # pylint: disable=W0703
# Make it not fatal error.
_LOGGER.exception('error sending command to rrdcache on %s',
rrd_socket)
finally:
rrdclient.rrd.close()
def forget_noexc(rrdfile, rrd_socket=SOCKET):
"""Send flush request to the rrd cache daemon."""
try:
rrdclient = RRDClient(rrd_socket)
except Exception: # pylint: disable=W0703
return
try:
rrdclient.forget(rrdfile, oneway=True)
except Exception: # pylint: disable=W0703
# Make it not fatal error.
_LOGGER.exception('error sending command to rrdcache on %s',
rrd_socket)
finally:
rrdclient.rrd.close()
def first(rrdfile, timeframe, rrdtool=RRDTOOL, rrd_socket=SOCKET,
exec_on_node=True):
"""
Returns the UNIX timestamp of the first data sample entered into the RRD.
"""
try:
rra_idx = TIMEFRAME_TO_RRA_IDX[timeframe]
except KeyError:
rra_idx = TIMEFRAME_TO_RRA_IDX['short']
if exec_on_node:
epoch = subproc.check_output([rrdtool, 'first', rrdfile,
'--daemon', 'unix:%s' % rrd_socket,
'--rraindex', rra_idx])
else:
epoch = subprocess.check_output([rrdtool, 'first', rrdfile,
'--rraindex', rra_idx])
return epoch.strip()
def last(rrdfile, rrdtool=RRDTOOL, rrd_socket=SOCKET, exec_on_node=True):
"""
Returns the UNIX timestamp of the most recent update of the RRD.
"""
if exec_on_node:
epoch = subproc.check_output([rrdtool, 'last', '--daemon',
'unix:%s' % rrd_socket, rrdfile])
else:
epoch = subprocess.check_output([rrdtool, 'last', rrdfile])
return epoch.strip()
def lastupdate(rrdfile, rrdtool=RRDTOOL, rrd_socket=SOCKET):
"""Get lastupdate metric"""
last_udpate = subproc.check_output([rrdtool, 'lastupdate', '--daemon',
'unix:%s' % rrd_socket, rrdfile])
[titles, _empty, data_str] = last_udpate.strip().split('\n')
(timestamp, value_str) = data_str.split(':')
values = value_str.strip().split(' ')
result = {'timestamp': int(timestamp)}
for idx, title in enumerate(titles.strip().split(' ')):
try:
result[title] = int(values[idx])
except ValueError:
# can not be convert to int
try:
result[title] = float(values[idx])
except ValueError:
# it is possible value is 'U'
result[title] = 0
return result
def get_json_metrics(rrdfile, timeframe, rrdtool=RRDTOOL, rrd_socket=SOCKET):
"""Return the metrics in the rrd file as a json string."""
_LOGGER.info('Get the metrics in JSON for %s', rrdfile)
# the command sends a FLUSH to the rrdcached implicitly...
cmd = [rrdtool, 'graph', '-', '--daemon', 'unix:%s' % rrd_socket,
'--imgformat', 'JSONTIME',
'--start=%s' % first(rrdfile, timeframe),
'--end=%s' % last(rrdfile),
# mem usage
'DEF:memory_usage=%s:memory_usage:MAX' % rrdfile,
'LINE:memory_usage:memory usage',
# mem hardlimit
'DEF:memory_hardlimit=%s:memory_hardlimit:MAX' % rrdfile,
'LINE:memory_hardlimit:memory limit',
# cpu usage
'DEF:cpu_usage=%s:cpu_usage:AVERAGE' % rrdfile,
'LINE:cpu_usage:cpu usage',
# cpu ratio
'DEF:cpu_ratio=%s:cpu_ratio:AVERAGE' % rrdfile,
'LINE:cpu_ratio:cpu ratio',
# blk read
'DEF:blk_read_iops=%s:blk_read_iops:MAX' % rrdfile,
'LINE:blk_read_iops:read iops',
# blk write
'DEF:blk_write_iops=%s:blk_write_iops:MAX' % rrdfile,
'LINE:blk_write_iops:write iops',
# blk read
'DEF:blk_read_bps=%s:blk_read_bps:MAX' % rrdfile,
'LINE:blk_read_bps:read bps',
# blk write
'DEF:blk_write_bps=%s:blk_write_bps:MAX' % rrdfile,
'LINE:blk_write_bps:write bps',
# fs_used_bytes
'DEF:fs_used_bytes=%s:fs_used_bytes:MAX' % rrdfile,
'LINE:fs_used_bytes:used bytes']
return subproc.check_output(cmd)
|
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [['--innodb-file-per-table --innodb_file_format=Barracuda']]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
def skip_checks(system_manager):
if not system_manager.code_manager.test_tree.xtradb_version:
return True, "Test requires XtraDB."
return False, ''
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, 'backup')
# remove backup paths
for del_path in [backup_path]:
if os.path.exists(del_path):
shutil.rmtree(del_path)
def create_test_table(self, table_name, server):
queries = ["DROP TABLE IF EXISTS %s" %(table_name)
,("CREATE TABLE %s "
"(`a` int(11) DEFAULT NULL, "
"`number` int(11) DEFAULT NULL) "
" ENGINE=InnoDB DEFAULT CHARSET=latin1"
%(table_name)
)
]
retcode, result = self.execute_queries(queries, server)
self.assertEqual(retcode, 0, msg = result)
def load_table(self, table_name, row_count, server):
queries = []
for i in range(row_count):
queries.append("INSERT INTO %s VALUES (%d, %d)" %(table_name,i, row_count))
retcode, result = self.execute_queries(queries, server)
self.assertEqual(retcode, 0, msg=result)
def test_xb_export(self):
self.servers = servers
master_server = servers[0]
logging = test_executor.logging
xtradb_version = master_server.get_xtradb_version()
if not xtradb_version:
logging.warning("Test requires XtraDB, skipping test...")
return
else:
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, 'backup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
table_name = "`test`"
schema_name = "test"
# This is a bit hacky. We have a version-dependent server
# option and no clean mechanism for doing this at test-collection
# time
import_option = "--innodb_expand_import=1"
if master_server.version.startswith("5.5"):
import_option = "--innodb_import_table_from_xtrabackup=1"
master_server.server_options.append(import_option)
master_server.stop()
master_server.start()
# populate our server with a test bed
self.create_test_table(table_name, master_server)
row_count = 100
self.load_table(table_name, row_count, master_server)
# take a backup
cmd = [ xtrabackup
, "--defaults-file=%s" %master_server.cnf_file
, "--datadir=%s" %master_server.datadir
, "--backup"
, "--target-dir=%s" %backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# load more data
row_count = 100
self.load_table(table_name, row_count, master_server)
# Get a checksum for our table
query = "CHECKSUM TABLE %s" %table_name
retcode, checksum1 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=checksum1)
logging.test_debug("Checksum1: %s" %checksum1)
# Reset the server to treat it as the 'importing' server
# We clean the datadir + create a table w/ similar
# structure
# TODO: see how things fail if we skip / mess up this step
master_server.stop()
master_server.restore_snapshot()
master_server.start()
# recreate the table:
self.create_test_table(table_name, master_server)
logging.test_debug("Server reinitialized")
# discard the tablespace
query = "ALTER TABLE %s DISCARD TABLESPACE" %(table_name)
retcode, result = self.execute_query(query, master_server)
self.assertEqual(retcode,0,msg=result)
# prepare our main backup
# Test the with innodb_file_per_table=0 --export bails out with an error
# Bug #758888
cmd = [ xtrabackup
, "--prepare"
, "--datadir=%s" %master_server.datadir
, "--use-memory=500M"
, "--target-dir=%s" %backup_path
, "--export"
, "--innodb-file-per-table=0"
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,1,output)
# prepare our main backup
cmd = [ xtrabackup
, "--prepare"
, "--datadir=%s" %master_server.datadir
, "--use-memory=500M"
, "--target-dir=%s" %backup_path
, "--export"
, "--innodb-file-per-table=1"
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0,output)
# copy our data files back
db_path = os.path.join(backup_path,schema_name)
for bkp_file in os.listdir(db_path):
if bkp_file.startswith('test'):
shutil.copy(os.path.join(db_path,bkp_file)
,os.path.join(master_server.datadir,schema_name)
)
# import the tablespace
query = "ALTER TABLE %s IMPORT TABLESPACE" %(table_name)
retcode, result = self.execute_query(query, master_server)
self.assertEqual(retcode,0,msg=result)
logging.test_debug("Tablespace imported...")
# Tablespace import is asynchronous, so shutdown the server to have
# consistent backup results. Otherwise we risk ending up with no test.ibd
# in the backup in case importing has not finished before taking backup
master_server.stop()
master_server.start()
self.assertTrue(master_server.status==1, 'Server failed restart from restored datadir...')
query="SELECT COUNT(*) FROM %s" %(table_name)
retcode, result = self.execute_query(query, master_server)
self.assertEqual(retcode,0,result)
expected_result = ((100L,),)
self.assertEqual(expected_result, result, msg = "%s || %s" %(expected_result, result))
query = "SHOW CREATE TABLE %s" %(table_name)
retcode, result = self.execute_query(query, master_server)
self.assertEqual(retcode,0,result)
expected_result = (('test', 'CREATE TABLE `test` (\n `a` int(11) DEFAULT NULL,\n `number` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=latin1'),)
self.assertEqual(expected_result, result, msg = "%s || %s" %(expected_result, result))
# Get a checksum for our table
query = "CHECKSUM TABLE %s" %table_name
retcode, checksum2 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=checksum2)
logging.test_debug("Checksum2: %s" %checksum2)
if checksum1 != checksum2:
logging.warning("Initial and exported/restored checksums do not match!")
logging.warning("Checksum1: %s" %checksum1)
logging.warning("Checksum2: %s" %checksum2)
#self.assertEqual(checksum1,checksum2,msg="%s || %s" %(checksum1,checksum2))
# create a dir to hold the new backup
backup_path = os.path.join(backup_path,'new_backup')
# take a backup of the imported table
cmd = [ xtrabackup
, "--defaults-file=%s" %master_server.cnf_file
, "--datadir=%s" %master_server.datadir
, "--backup"
, "--target-dir=%s" %backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# Clear our table so we know the backup restored
for del_table in [table_name]:
query = "DELETE FROM %s" %del_table
retcode, result = self.execute_query(query,master_server)
self.assertEqual(retcode, 0, result)
# shutdown our server
master_server.stop()
# prepare our main backup
cmd = [ xtrabackup
, "--prepare"
, "--apply-log-only"
, "--datadir=%s" %master_server.datadir
, "--use-memory=500M"
, "--target-dir=%s" %backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# do final prepare on main backup
cmd = [ xtrabackup
, "--prepare"
, "--datadir=%s" %master_server.datadir
, "--use-memory=500M"
, "--target-dir=%s" %backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# copy our data files back
for root, dirs, files in os.walk(backup_path):
if files:
file_info = root.split(backup_path)[1]
for file_name in files:
# We do a quick check to make sure
# no names start with '/' as os.path
# throws a hissy when it sees such things
if file_info.startswith('/'):
file_info = file_info[1:]
if file_name.startswith('/'):
file_name = file_name[1:]
to_path = os.path.join(master_server.datadir
, file_info
, file_name)
new_dir = os.path.dirname(to_path)
try:
if not os.path.exists(new_dir):
os.makedirs(new_dir)
except OSError, e:
logging.error("Could not create directory: %s | %s" %(new_dir, e))
try:
shutil.copy(os.path.join(root,file_name),to_path)
except IOError, e:
logging.error( "ERROR: Could not copy file: %s | %s" %(file_name, e))
# restart server (and ensure it doesn't crash)
master_server.start()
self.assertTrue(master_server.status==1, 'Server failed restart from restored datadir...')
# Get a checksum for our table
query = "CHECKSUM TABLE %s" %table_name
retcode, checksum3 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=checksum3)
logging.test_debug("Checksum3: %s" %checksum3)
self.assertEqual(checksum2,checksum3,msg="%s || %s" %(checksum2,checksum3))
|
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import division
import os
import unittest
from telemetry import decorators
from telemetry import story
from telemetry.testing import legacy_page_test_case
from telemetry.timeline import async_slice
from telemetry.timeline import model as model_module
from benchmarks import blink_perf
_BLINK_PERF_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'third_party', 'blink', 'perf_tests', 'test_data')
_BLINK_PERF_RESOURCES_DIR = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'third_party', 'blink', 'perf_tests', 'resources')
def _Mean(values):
return sum(values) / len(values)
class BlinkPerfTest(legacy_page_test_case.LegacyPageTestCase):
def setUp(self):
super(BlinkPerfTest, self).setUp()
# pylint: disable=protected-access
self.blink_page_test = blink_perf._BlinkPerfMeasurement()
# pylint: enable=protected-access
def HasChromeTraces(self):
return any(name.startswith('trace/traceEvents/')
for name in self.test_result['outputArtifacts'])
@staticmethod
def CreateStorySetForTest(url):
story_set = story.StorySet(
base_dir=_BLINK_PERF_TEST_DATA_DIR,
serving_dirs=[_BLINK_PERF_TEST_DATA_DIR, _BLINK_PERF_RESOURCES_DIR])
assert url.startswith('file://'), 'Expected local URI, got %s' % url
blink_page = blink_perf._BlinkPerfPage( # pylint: disable=protected-access
url, story_set, base_dir=story_set.base_dir, name=url[len('file://'):])
story_set.AddStory(blink_page)
return story_set
@decorators.Disabled('chromeos') # Flaky: https://crbug.com/1271916
def testBlinkPerfTracingMetricsForMeasureTime(self):
measurements = self.RunPageTest(
self.blink_page_test, 'file://append-child-measure-time.html')
self.assertTrue(self.HasChromeTraces())
frame_view_layouts = measurements['LocalFrameView::layout']['samples']
# append-child-measure-time.html specifies 5 iterationCount.
self.assertEquals(len(frame_view_layouts), 5)
self.assertGreater(_Mean(frame_view_layouts), 0.001)
update_layout_trees = measurements['UpdateLayoutTree']['samples']
# append-child-measure-time.html specifies 5 iterationCount.
self.assertEquals(len(update_layout_trees), 5)
self.assertGreater(_Mean(update_layout_trees), 0.001)
@decorators.Disabled('chromeos') # Flaky: https://crbug.com/1284873
def testBlinkPerfTracingMetricsForMeasureFrameTime(self):
measurements = self.RunPageTest(
self.blink_page_test, 'file://color-changes-measure-frame-time.html')
self.assertTrue(self.HasChromeTraces())
frame_view_prepaints = measurements[
'LocalFrameView::RunPrePaintLifecyclePhase']['samples']
# color-changes-measure-frame-time.html specifies 10 iterationCount.
self.assertEquals(len(frame_view_prepaints), 10)
self.assertGreater(_Mean(frame_view_prepaints), 0.001)
frame_view_painttrees = measurements[
'LocalFrameView::RunPaintLifecyclePhase']['samples']
# color-changes-measure-frame-time.html specifies 10 iterationCount.
self.assertEquals(len(frame_view_painttrees), 10)
self.assertGreater(_Mean(frame_view_painttrees), 0.001)
@decorators.Disabled('linux',
'chromeos') # Disable due to flaky: crbug.com/1240931
def testBlinkPerfTracingMetricsForMeasurePageLoadTime(self):
measurements = self.RunPageTest(
self.blink_page_test, 'file://simple-html-measure-page-load-time.html')
self.assertTrue(self.HasChromeTraces())
create_child_frame = measurements[
'WebLocalFrameImpl::createChildframe']['samples']
# color-changes-measure-frame-time.html specifies 7 iterationCount.
self.assertEquals(len(create_child_frame), 7)
self.assertGreater(_Mean(create_child_frame), 0.001)
post_layout_task = measurements[
'LocalFrameView::performPostLayoutTasks']['samples']
# color-changes-measure-frame-time.html specifies 7 iterationCount.
self.assertEquals(len(post_layout_task), 7)
self.assertGreater(_Mean(post_layout_task), 0.001)
@decorators.Disabled('mac', # Flaky on mac: crbug.com/960554
'chromeos') # Flaky on CrOS: crbug.com/1275110
def testBlinkPerfTracingMetricsForMeasureAsync(self):
measurements = self.RunPageTest(
self.blink_page_test, 'file://simple-blob-measure-async.html')
self.assertTrue(self.HasChromeTraces())
blob_requests = measurements['BlobRequest']['samples']
blob_readers = measurements['BlobReader']['samples']
# simple-blob-measure-async.html specifies 6 iterationCount.
self.assertEquals(len(blob_requests), 6)
self.assertEquals(len(blob_readers), 6)
# TODO(mek): Delete non-mojo code paths when blobs are always using mojo.
using_mojo = _Mean(blob_readers) > 0.001
if using_mojo:
self.assertEquals(_Mean(blob_requests), 0)
self.assertGreater(_Mean(blob_readers), 0.001)
else:
self.assertGreater(_Mean(blob_requests), 0.001)
self.assertEquals(_Mean(blob_readers), 0)
if using_mojo:
read_data = measurements['BlobReader::ReadMore']['samples']
else:
read_data = measurements['BlobRequest::ReadRawData']['samples']
# simple-blob-measure-async.html specifies 6 iterationCount.
self.assertEquals(len(read_data), 6)
self.assertGreater(_Mean(read_data), 0.001)
def testBlinkPerfLifecycleMethods(self):
self.RunPageTest(self.blink_page_test, 'file://lifecycle-methods.html')
self.assertFalse(self.HasChromeTraces())
@decorators.Disabled('linux',
'chromeos') # Disable due to flaky: crbug.com/1163628
def testExtraChromeCategories(self):
self.options.extra_chrome_categories = 'cc,blink'
self.RunPageTest(self.blink_page_test, 'file://lifecycle-methods.html')
self.assertTrue(self.HasChromeTraces())
# pylint: disable=protected-access
# This is needed for testing _ComputeTraceEventsThreadTimeForBlinkPerf method.
class ComputeTraceEventsMetricsForBlinkPerfTest(unittest.TestCase):
def _AddAsyncSlice(self, renderer_thread, category, name, start, end):
s = async_slice.AsyncSlice(
category, name,
timestamp=start, duration=end - start, start_thread=renderer_thread,
end_thread=renderer_thread)
renderer_thread.AddAsyncSlice(s)
def _AddBlinkTestSlice(self, renderer_thread, start, end):
self._AddAsyncSlice(
renderer_thread, 'blink', 'blink_perf.runTest', start, end)
def testTraceEventMetricsSingleBlinkTest(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Set up a main thread model that looks like:
# [ blink_perf.run_test ]
# | [ foo ] [ bar ] [ baz ]
# | | | | | | | |
# 100 120 140 400 420 500 550 600
# | | |
# CPU dur: 15 18 70
#
self._AddBlinkTestSlice(renderer_main, 100, 550)
renderer_main.BeginSlice('blink', 'foo', 120, 122)
renderer_main.EndSlice(140, 137)
renderer_main.BeginSlice('blink', 'bar', 400, 402)
renderer_main.EndSlice(420, 420)
# Since this "baz" slice has CPU duration = 70ms, wall-time duration = 100ms
# & its overalapped wall-time with "blink_perf.run_test" is 50 ms, its
# overlapped CPU time with "blink_perf.run_test" is
# 50 * 70 / 100 = 35ms.
renderer_main.BeginSlice('blink', 'baz', 500, 520)
renderer_main.EndSlice(600, 590)
self.assertEquals(
blink_perf._ComputeTraceEventsThreadTimeForBlinkPerf(
model, renderer_main, ['foo', 'bar', 'baz']),
{'foo': [15], 'bar': [18], 'baz': [35]})
def testTraceEventMetricsMultiBlinkTest(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Set up a main thread model that looks like:
# [ blink_perf.run_test ] [ blink_perf.run_test ]
# | [ foo ] [ bar ] | [ | foo ] |
# | | | | | | | | | | |
# 100 120 140 400 420 440 500 520 600 640
# | | |
# CPU dur: 15 18 40
#
self._AddBlinkTestSlice(renderer_main, 100, 440)
self._AddBlinkTestSlice(renderer_main, 520, 640)
renderer_main.BeginSlice('blink', 'foo', 120, 122)
renderer_main.EndSlice(140, 137)
renderer_main.BeginSlice('blink', 'bar', 400, 402)
renderer_main.EndSlice(420, 420)
# Since this "foo" slice has CPU duration = 40ms, wall-time duration = 100ms
# & its overalapped wall-time with "blink_perf.run_test" is 80 ms, its
# overlapped CPU time with "blink_perf.run_test" is
# 80 * 40 / 100 = 32ms.
renderer_main.BeginSlice('blink', 'foo', 500, 520)
renderer_main.EndSlice(600, 560)
self.assertEquals(
blink_perf._ComputeTraceEventsThreadTimeForBlinkPerf(
model, renderer_main, ['foo', 'bar', 'baz']),
{'foo': [15, 32], 'bar': [18, 0], 'baz': [0, 0]})
def testTraceEventMetricsNoThreadTimeAvailable(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Set up a main thread model that looks like:
# [ blink_perf.run_test ]
# | [ foo ] [ bar ] |
# | | | | | |
# 100 120 140 400 420 550
# | |
# CPU dur: None None
#
self._AddBlinkTestSlice(renderer_main, 100, 550)
renderer_main.BeginSlice('blink', 'foo', 120)
renderer_main.EndSlice(140)
renderer_main.BeginSlice('blink', 'bar', 400)
renderer_main.EndSlice(420)
self.assertEquals(
blink_perf._ComputeTraceEventsThreadTimeForBlinkPerf(
model, renderer_main, ['foo', 'bar']),
{'foo': [20], 'bar': [20]})
def testTraceEventMetricsMultiBlinkTestCrossProcesses(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
foo_thread = model.GetOrCreateProcess(2).GetOrCreateThread(4)
bar_thread = model.GetOrCreateProcess(2).GetOrCreateThread(5)
# Set up a main model that looks like (P1 & P2 are different processes):
# P1 [ blink_perf.run_test ] [ blink_perf.run_test ]
# | | | |
# P2 | [ foo ] | [ | foo ] |
# | | | | [ bar ] | | | | |
# | | | | | | | | | | | | |
# 100 120 | 140 400 | 420 440 500 520 | 600 640
# | | |
# CPU dur: 15 N/A 40
#
self._AddBlinkTestSlice(renderer_main, 100, 440)
self._AddBlinkTestSlice(renderer_main, 520, 640)
foo_thread.BeginSlice('blink', 'foo', 120, 122)
foo_thread.EndSlice(140, 137)
bar_thread.BeginSlice('blink', 'bar', 400)
bar_thread.EndSlice(420)
# Since this "foo" slice has CPU duration = 40ms, wall-time duration = 100ms
# & its overalapped wall-time with "blink_perf.run_test" is 80 ms, its
# overlapped CPU time with "blink_perf.run_test" is
# 80 * 40 / 100 = 32ms.
foo_thread.BeginSlice('blink', 'foo', 500, 520)
foo_thread.EndSlice(600, 560)
self.assertEquals(
blink_perf._ComputeTraceEventsThreadTimeForBlinkPerf(
model, renderer_main, ['foo', 'bar', 'baz']),
{'foo': [15, 32], 'bar': [20, 0], 'baz': [0, 0]})
def testTraceEventMetricsNoDoubleCountingBasic(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Set up a main thread model that looks like:
# [ blink_perf.run_test ]
# | [ foo ] [ foo ] |
# | [ foo ] | | |
# | | [ foo ] | | | |
# | | | | | | | |
# 100 120 140 400 420 440 510 550
# | |
# CPU dur of | |
# of top most event: 280 50
#
self._AddBlinkTestSlice(renderer_main, 100, 550)
renderer_main.BeginSlice('blink', 'foo', 120, 130)
renderer_main.BeginSlice('blink', 'foo', 120, 130)
renderer_main.BeginSlice('blink', 'foo', 140, 150)
renderer_main.EndSlice(400, 390)
renderer_main.EndSlice(420, 410)
renderer_main.EndSlice(420, 410)
renderer_main.BeginSlice('blink', 'foo', 440, 455)
renderer_main.EndSlice(510, 505)
self.assertEquals(
blink_perf._ComputeTraceEventsThreadTimeForBlinkPerf(
model, renderer_main, ['foo']), {'foo': [330]})
def testTraceEventMetricsNoDoubleCountingWithOtherSlidesMixedIn(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Set up a main thread model that looks like:
# [ blink_perf.run_test ]
# | [ foo ] [ bar ] |
# | | [ bar ] | | [ foo ] | |
# | | | [ foo ] | | | | | | |
# | | | | | | | | | | | |
# 100 120 130 140 | 400 405 420 440 480 | 510 520 550
# | |
# CPU dur of | |
# of top most event: 280 (foo) & 270 (bar) 50 (bar) & 20 (foo)
#
self._AddBlinkTestSlice(renderer_main, 100, 550)
renderer_main.BeginSlice('blink', 'foo', 120, 130)
renderer_main.BeginSlice('blink', 'bar', 130, 135)
renderer_main.BeginSlice('blink', 'foo', 140, 150)
renderer_main.EndSlice(400, 390)
renderer_main.EndSlice(405, 405)
renderer_main.EndSlice(420, 410)
renderer_main.BeginSlice('blink', 'bar', 440, 455)
renderer_main.BeginSlice('blink', 'foo', 480, 490)
renderer_main.EndSlice(510, 510)
renderer_main.EndSlice(510, 505)
self.assertEquals(
blink_perf._ComputeTraceEventsThreadTimeForBlinkPerf(
model, renderer_main, ['foo', 'bar']),
{'foo': [300], 'bar': [320]})
def testAsyncTraceEventMetricsOverlapping(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Set up a main thread model that looks like:
# [ blink_perf.run_test ]
# | [ foo ] [ bar ] |
# | [ foo ] | | | |
# | | | | | | | |
# 100 110 120 130 140 400 420 550
# CPU dur: None for all.
#
self._AddBlinkTestSlice(renderer_main, 100, 550)
self._AddAsyncSlice(renderer_main, 'blink', 'foo', 110, 130)
self._AddAsyncSlice(renderer_main, 'blink', 'foo', 120, 140)
self._AddAsyncSlice(renderer_main, 'blink', 'bar', 400, 420)
self.assertEquals(
blink_perf._ComputeTraceEventsThreadTimeForBlinkPerf(
model, renderer_main, ['foo', 'bar']),
{'foo': [30], 'bar': [20]})
def testAsyncTraceEventMetricsMultipleTests(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Set up a main model that looks like:
# [ blink_perf.run_test ] [ blink_perf.run_test ]
# | | | |
# [ foo ]
# | [ foo ]
# | | | | | | | |
# 80 90 100 200 300 400 500 510
# CPU dur: None for all
#
self._AddBlinkTestSlice(renderer_main, 100, 200)
self._AddBlinkTestSlice(renderer_main, 300, 400)
# Both events totally intersect both tests.
self._AddAsyncSlice(renderer_main, 'blink', 'foo', 80, 500)
self._AddAsyncSlice(renderer_main, 'blink', 'bar', 90, 510)
self.assertEquals(
blink_perf._ComputeTraceEventsThreadTimeForBlinkPerf(
model, renderer_main, ['foo', 'bar']),
{'foo': [100, 100], 'bar': [100, 100]})
|
|
# Copyright (c) 2019-2020 by Rocky Bernstein
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from uncompyle6.show import maybe_show_tree
from copy import copy
from spark_parser import GenericASTTraversal, GenericASTTraversalPruningException
from uncompyle6.semantics.helper import find_code_node
from uncompyle6.parsers.treenode import SyntaxTree
from uncompyle6.scanners.tok import NoneToken, Token
from uncompyle6.semantics.consts import RETURN_NONE, ASSIGN_DOC_STRING
def is_docstring(node, version, co_consts):
if node == "sstmt":
node = node[0]
# TODO: the test below on 2.7 succeeds for
# class OldClass:
# __doc__ = DocDescr()
# which produces:
#
# assign (2)
# 0. expr
# call (2)
# 0. expr
# L. 16 6 LOAD_DEREF 0 'DocDescr'
# 1. 9 CALL_FUNCTION_0 0 None
# 1. store
#
# See Python 2.7 test_descr.py
# If ASSIGN_DOC_STRING doesn't work we need something like the below
# but more elaborate to address the above.
# try:
# return node.kind == "assign" and node[1][0].pattr == "__doc__"
# except:
# return False
if version <= 2.7:
doc_load = "LOAD_CONST"
else:
doc_load = "LOAD_STR"
return node == ASSIGN_DOC_STRING(co_consts[0], doc_load)
def is_not_docstring(call_stmt_node):
try:
return (
call_stmt_node == "call_stmt"
and call_stmt_node[0][0] == "LOAD_STR"
and call_stmt_node[1] == "POP_TOP"
)
except:
return False
class TreeTransform(GenericASTTraversal, object):
def __init__(self, version, show_ast=None, is_pypy=False):
self.version = version
self.showast = show_ast
self.is_pypy = is_pypy
return
def maybe_show_tree(self, ast):
if isinstance(self.showast, dict) and self.showast:
maybe_show_tree(self, ast)
def preorder(self, node=None):
"""Walk the tree in roughly 'preorder' (a bit of a lie explained below).
For each node with typestring name *name* if the
node has a method called n_*name*, call that before walking
children.
In typical use a node with children can call "preorder" in any
order it wants which may skip children or order then in ways
other than first to last. In fact, this this happens. So in
this sense this function not strictly preorder.
"""
if node is None:
node = self.ast
try:
name = "n_" + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
node = func(node)
except GenericASTTraversalPruningException:
return
for i, kid in enumerate(node):
node[i] = self.preorder(kid)
return node
def n_mkfunc(self, node):
"""If the function has a docstring (this is found in the code
constants), pull that out and make it part of the syntax
tree. When generating the source string that AST node rather
than the code field is seen and used.
"""
if self.version >= 3.7:
code_index = -3
else:
code_index = -2
code = find_code_node(node, code_index).attr
mkfunc_pattr = node[-1].pattr
if isinstance(mkfunc_pattr, tuple):
assert len(mkfunc_pattr, 4) and isinstance(mkfunc_pattr, int)
is_closure = node[-1].pattr[3] != 0
else:
# FIXME: This is what we had before. It is hoaky and probably wrong.
is_closure = mkfunc_pattr == "closure"
if (
(not is_closure)
and len(code.co_consts) > 0
and isinstance(code.co_consts[0], str)
):
docstring_node = SyntaxTree(
"docstring", [Token("LOAD_STR", has_arg=True, pattr=code.co_consts[0])]
)
docstring_node.transformed_by = "n_mkfunc"
node = SyntaxTree("mkfunc", node[:-1] + [docstring_node, node[-1]])
node.transformed_by = "n_mkfunc"
return node
def n_ifstmt(self, node):
"""Here we check if we can turn an `ifstmt` or 'iflaststmtl` into
some kind of `assert` statement"""
testexpr = node[0]
if testexpr not in ("testexpr", "testexprl"):
return node
if node.kind in ("ifstmt", "ifstmtl"):
ifstmts_jump = node[1]
if ifstmts_jump == "_ifstmts_jumpl" and ifstmts_jump[0] == "_ifstmts_jump":
ifstmts_jump = ifstmts_jump[0]
elif ifstmts_jump not in ("_ifstmts_jump", "_ifstmts_jumpl", "ifstmts_jumpl"):
return node
stmts = ifstmts_jump[0]
else:
# iflaststmtl works this way
stmts = node[1]
if stmts in ("c_stmts", "stmts", "stmts_opt") and len(stmts) == 1:
raise_stmt = stmts[0]
if raise_stmt != "raise_stmt1" and len(raise_stmt) > 0:
raise_stmt = raise_stmt[0]
testtrue_or_false = testexpr[0]
if (
raise_stmt.kind == "raise_stmt1"
and 1 <= len(testtrue_or_false) <= 2
and raise_stmt.first_child().pattr == "AssertionError"
):
if testtrue_or_false in ("testtrue", "testtruel"):
# Skip over the testtrue because because it would
# produce a "not" and we don't want that here.
assert_expr = testtrue_or_false[0]
jump_cond = NoneToken
else:
assert testtrue_or_false in ("testfalse", "testfalsel")
assert_expr = testtrue_or_false[0]
if assert_expr in ("testfalse_not_and", "and_not"):
# FIXME: come back to stuff like this
return node
jump_cond = testtrue_or_false[1]
assert_expr.kind = "assert_expr"
pass
expr = raise_stmt[0]
RAISE_VARARGS_1 = raise_stmt[1]
call = expr[0]
if call == "call":
# ifstmt
# 0. testexpr
# testtrue (2)
# 0. expr
# 1. _ifstmts_jump (2)
# 0. c_stmts
# stmt
# raise_stmt1 (2)
# 0. expr
# call (3)
# 1. RAISE_VARARGS_1
# becomes:
# assert2 ::= assert_expr jmp_true LOAD_ASSERT expr RAISE_VARARGS_1 COME_FROM
if jump_cond in ("jmp_true", NoneToken):
kind = "assert2"
else:
if jump_cond == "jmp_false":
# FIXME: We don't handle this kind of thing yet.
return node
kind = "assert2not"
LOAD_ASSERT = call[0].first_child()
if LOAD_ASSERT not in ( "LOAD_ASSERT", "LOAD_GLOBAL"):
return node
if isinstance(call[1], SyntaxTree):
expr = call[1][0]
node = SyntaxTree(
kind,
[
assert_expr,
jump_cond,
LOAD_ASSERT,
expr,
RAISE_VARARGS_1,
],
)
node.transformed_by = "n_ifstmt"
pass
pass
else:
# ifstmt
# 0. testexpr (2)
# testtrue
# 0. expr
# 1. _ifstmts_jump (2)
# 0. c_stmts
# stmts
# raise_stmt1 (2)
# 0. expr
# LOAD_ASSERT
# 1. RAISE_VARARGS_1
# becomes:
# assert ::= assert_expr jmp_true LOAD_ASSERT RAISE_VARARGS_1 COME_FROM
if jump_cond in ("jmp_true", NoneToken):
if self.is_pypy:
kind = "assert0_pypy"
else:
kind = "assert"
else:
assert jump_cond == "jmp_false"
kind = "assertnot"
LOAD_ASSERT = expr[0]
node = SyntaxTree(
kind, [assert_expr, jump_cond, LOAD_ASSERT, RAISE_VARARGS_1]
)
node.transformed_by = ("n_ifstmt",)
pass
pass
return node
n_ifstmtl = n_iflaststmtl = n_ifstmt
# preprocess is used for handling chains of
# if elif elif
def n_ifelsestmt(self, node, preprocess=False):
"""
Transformation involving if..else statments.
For example
if ...
else
if ..
into:
if ..
elif ...
[else ...]
where appropriate.
"""
else_suite = node[3]
n = else_suite[0]
old_stmts = None
else_suite_index = 1
len_n = len(n)
if len_n == 1 == len(n[0]) and n[0] == "stmt":
n = n[0][0]
elif len_n == 0:
return node
elif n[0].kind in ("lastc_stmt", "lastl_stmt"):
n = n[0]
if n[0].kind in (
"ifstmt",
"iflaststmt",
"iflaststmtl",
"ifelsestmtl",
"ifelsestmtc",
"ifpoplaststmtl",
):
n = n[0]
if n.kind == "ifpoplaststmtl":
old_stmts = n[2]
else_suite_index = 2
pass
pass
else:
if (
len_n > 1
and isinstance(n[0], SyntaxTree)
and 1 == len(n[0])
and n[0] == "stmt"
and n[1].kind == "stmt"
):
else_suite_stmts = n[0]
elif len_n == 1:
else_suite_stmts = n
else:
return node
if else_suite_stmts[0].kind in (
"ifstmt",
"iflaststmt",
"ifelsestmt",
"ifelsestmtl",
):
old_stmts = n
n = else_suite_stmts[0]
else:
return node
if n.kind in ("ifstmt", "iflaststmt", "iflaststmtl", "ifpoplaststmtl"):
node.kind = "ifelifstmt"
n.kind = "elifstmt"
elif n.kind in ("ifelsestmtr",):
node.kind = "ifelifstmt"
n.kind = "elifelsestmtr"
elif n.kind in ("ifelsestmt", "ifelsestmtc", "ifelsestmtl"):
node.kind = "ifelifstmt"
self.n_ifelsestmt(n, preprocess=True)
if n == "ifelifstmt":
n.kind = "elifelifstmt"
elif n.kind in ("ifelsestmt", "ifelsestmtc", "ifelsestmtl"):
n.kind = "elifelsestmt"
if not preprocess:
if old_stmts:
if n.kind == "elifstmt":
trailing_else = SyntaxTree("stmts", old_stmts[1:])
if len(trailing_else):
# We use elifelsestmtr because it has 3 nodes
elifelse_stmt = SyntaxTree(
"elifelsestmtr", [n[0], n[else_suite_index], trailing_else]
)
node[3] = elifelse_stmt
else:
elif_stmt = SyntaxTree("elifstmt", [n[0], n[else_suite_index]])
node[3] = elif_stmt
node.transformed_by = "n_ifelsestmt"
pass
else:
# Other cases for n.kind may happen here
pass
pass
return node
n_ifelsestmtc = n_ifelsestmtl = n_ifelsestmt
def n_import_from37(self, node):
importlist37 = node[3]
assert importlist37 == "importlist37"
if len(importlist37) == 1:
alias37 = importlist37[0]
store = alias37[1]
assert store == "store"
alias_name = store[0].attr
import_name_attr = node[2]
assert import_name_attr == "IMPORT_NAME_ATTR"
dotted_names = import_name_attr.attr.split(".")
if len(dotted_names) > 1 and dotted_names[-1] == alias_name:
# Simulate:
# Instead of
# import_from37 ::= LOAD_CONST LOAD_CONST IMPORT_NAME_ATTR importlist37 POP_TOP
# import_as37 ::= LOAD_CONST LOAD_CONST importlist37 store POP_TOP
# 'import_as37': ( '%|import %c as %c\n', 2, -2),
node = SyntaxTree(
"import_as37", [node[0], node[1], import_name_attr, store, node[-1]]
)
node.transformed_by = "n_import_from37"
pass
pass
return node
def n_list_for(self, list_for_node):
expr = list_for_node[0]
if expr == "expr" and expr[0] == "get_iter":
# Remove extraneous get_iter() inside the "for" of a comprehension
assert expr[0][0] == "expr"
list_for_node[0] = expr[0][0]
list_for_node.transformed_by = ("n_list_for",)
return list_for_node
def n_stmts(self, node):
if node.first_child() == "SETUP_ANNOTATIONS":
prev = node[0][0]
new_stmts = [node[0]]
for i, sstmt in enumerate(node[1:]):
ann_assign = sstmt[0][0]
if (
sstmt[0] == "stmt"
and ann_assign == "ann_assign"
and prev == "assign"
):
annotate_var = ann_assign[-2]
if annotate_var.attr == prev[-1][0].attr:
del new_stmts[-1]
sstmt[0][0] = SyntaxTree(
"ann_assign_init", [ann_assign[0], prev[0], annotate_var]
)
sstmt[0][0].transformed_by = "n_stmts"
pass
pass
new_stmts.append(sstmt)
prev = ann_assign
pass
node.data = new_stmts
return node
def traverse(self, node, is_lambda=False):
node = self.preorder(node)
return node
def transform(self, ast, code):
self.maybe_show_tree(ast)
self.ast = copy(ast)
self.ast = self.traverse(self.ast, is_lambda=False)
try:
# Disambiguate a string (expression) which appears as a "call_stmt" at
# the beginning of a function versus a docstring. Seems pretty academic,
# but this is Python.
call_stmt = ast[0][0]
if is_not_docstring(call_stmt):
call_stmt.kind = "string_at_beginning"
call_stmt.transformed_by = "transform"
pass
except:
pass
try:
for i in range(len(self.ast)):
sstmt = ast[i]
if len(sstmt) == 1 and sstmt == "sstmt":
self.ast[i] = self.ast[i][0]
if is_docstring(self.ast[i], self.version, code.co_consts):
load_const = self.ast[i].first_child()
docstring_ast = SyntaxTree(
"docstring",
[
Token(
"LOAD_STR",
has_arg=True,
offset=0,
attr=load_const.attr,
pattr=load_const.pattr,
)
],
)
docstring_ast.transformed_by = "transform"
del self.ast[i]
self.ast.insert(0, docstring_ast)
break
if self.ast[-1] == RETURN_NONE:
self.ast.pop() # remove last node
# todo: if empty, add 'pass'
except:
pass
return self.ast
# Write template_engine
# def template_engine
|
|
import logging
import random
from datetime import datetime
import bson
import modularodm.exceptions
import pytz
from django.conf import settings
from django.contrib.contenttypes.fields import (GenericForeignKey,
GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db import models
from django.db.models import F, Q
from django.db.models import ForeignKey
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from osf.utils.caching import cached_property
from osf.exceptions import ValidationError
from osf.modm_compat import to_django_query
from osf.utils.datetime_aware_jsonfield import (DateTimeAwareJSONField,
coerce_nonnaive_datetimes)
from osf.utils.fields import LowercaseCharField, NonNaiveDateTimeField
ALPHABET = '23456789abcdefghjkmnpqrstuvwxyz'
logger = logging.getLogger(__name__)
def generate_guid(length=5):
while True:
guid_id = ''.join(random.sample(ALPHABET, length))
try:
# is the guid in the blacklist
BlackListGuid.objects.get(guid=guid_id)
except BlackListGuid.DoesNotExist:
# it's not, check and see if it's already in the database
try:
Guid.objects.get(_id=guid_id)
except Guid.DoesNotExist:
# valid and unique guid
return guid_id
def generate_object_id():
return str(bson.ObjectId())
class MODMCompatibilityQuerySet(models.QuerySet):
def __getitem__(self, k):
item = super(MODMCompatibilityQuerySet, self).__getitem__(k)
if hasattr(item, 'wrapped'):
return item.wrapped()
else:
return item
def __iter__(self):
items = super(MODMCompatibilityQuerySet, self).__iter__()
for item in items:
if hasattr(item, 'wrapped'):
yield item.wrapped()
else:
yield item
def eager(self, *fields):
qs = self._clone()
field_set = set(fields)
fk_fields = set(qs.model.get_fk_field_names()) & field_set
m2m_fields = set(qs.model.get_m2m_field_names()) & field_set
if 'contributors' in field_set:
m2m_fields.add('_contributors')
qs = qs.select_related(*fk_fields).prefetch_related(*m2m_fields)
return qs
def sort(self, *fields):
# Fields are passed in as e.g. [('title', 1), ('date_created', -1)]
if isinstance(fields[0], list):
fields = fields[0]
def sort_key(item):
if isinstance(item, basestring):
return item
elif isinstance(item, tuple):
field_name, direction = item
prefix = '-' if direction == -1 else ''
return ''.join([prefix, field_name])
sort_keys = [sort_key(each) for each in fields]
return self.order_by(*sort_keys)
def limit(self, n):
return self[:n]
class BaseModel(models.Model):
"""Base model that acts makes subclasses mostly compatible with the
modular-odm ``StoredObject`` interface.
"""
migration_page_size = 50000
objects = MODMCompatibilityQuerySet.as_manager()
class Meta:
abstract = True
@classmethod
def get_fk_field_names(cls):
return [field.name for field in cls._meta.get_fields() if
field.is_relation and not field.auto_created and (field.many_to_one or field.one_to_one) and not isinstance(field, GenericForeignKey)]
@classmethod
def get_m2m_field_names(cls):
return [field.attname or field.name for field in
cls._meta.get_fields() if
field.is_relation and field.many_to_many and not hasattr(field, 'field')]
@classmethod
def load(cls, data):
try:
if issubclass(cls, GuidMixin):
return cls.objects.get(guids___id=data)
elif issubclass(cls, ObjectIDMixin):
return cls.objects.get(_id=data)
elif isinstance(data, basestring):
# Some models (CitationStyle) have an _id that is not a bson
# Looking up things by pk will never work with a basestring
return cls.objects.get(_id=data)
return cls.objects.get(pk=data)
except cls.DoesNotExist:
return None
@classmethod
def find_one(cls, query):
try:
return cls.objects.get(to_django_query(query, model_cls=cls))
except cls.DoesNotExist:
raise modularodm.exceptions.NoResultsFound()
except cls.MultipleObjectsReturned as e:
raise modularodm.exceptions.MultipleResultsFound(*e.args)
@classmethod
def find(cls, query=None):
if not query:
return cls.objects.all()
else:
return cls.objects.filter(to_django_query(query, model_cls=cls))
@classmethod
def remove(cls, query=None):
return cls.find(query).delete()
@classmethod
def remove_one(cls, obj):
if obj.pk:
return obj.delete()
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
return django_obj
@property
def _primary_name(self):
return '_id'
@property
def _is_loaded(self):
return bool(self.pk)
def reload(self):
return self.refresh_from_db()
def _natural_key(self):
return self.pk
def clone(self):
"""Create a new, unsaved copy of this object."""
copy = self.__class__.objects.get(pk=self.pk)
copy.id = None
# empty all the fks
fk_field_names = [f.name for f in self._meta.model._meta.get_fields() if isinstance(f, (ForeignKey, GenericForeignKey))]
for field_name in fk_field_names:
setattr(copy, field_name, None)
try:
copy._id = bson.ObjectId()
except AttributeError:
pass
return copy
def save(self, *args, **kwargs):
# Make Django validate on save (like modm)
if not kwargs.get('force_insert') and not kwargs.get('force_update'):
try:
self.full_clean()
except DjangoValidationError as err:
raise ValidationError(*err.args)
return super(BaseModel, self).save(*args, **kwargs)
# TODO: Rename to Identifier?
class Guid(BaseModel):
"""Stores either a short guid or long object_id for any model that inherits from BaseIDMixin.
Each ID field (e.g. 'guid', 'object_id') MUST have an accompanying method, named with
'initialize_<ID type>' (e.g. 'initialize_guid') that generates and sets the field.
"""
primary_identifier_name = '_id'
# TODO DELETE ME POST MIGRATION
modm_query = None
migration_page_size = 500000
# /TODO DELETE ME POST MIGRATION
id = models.AutoField(primary_key=True)
_id = LowercaseCharField(max_length=255, null=False, blank=False, default=generate_guid, db_index=True,
unique=True)
referent = GenericForeignKey()
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
created = NonNaiveDateTimeField(db_index=True, default=timezone.now) # auto_now_add=True)
# Override load in order to load by GUID
@classmethod
def load(cls, data):
try:
return cls.objects.get(_id=data)
except cls.DoesNotExist:
return None
def reload(self):
del self._referent_cache
return super(Guid, self).reload()
@classmethod
def migrate_from_modm(cls, modm_obj, object_id=None, content_type=None):
"""
Given a modm Guid make a django Guid
:param object_id:
:param content_type:
:param modm_obj:
:return:
"""
django_obj = cls()
if modm_obj._id != modm_obj.referent._id:
# if the object has a BSON id, get the created date from that
django_obj.created = bson.ObjectId(modm_obj.referent._id).generation_time
else:
# just make it now
django_obj.created = timezone.now()
django_obj._id = modm_obj._id
if object_id and content_type:
# if the referent was passed set the GFK to point to it
django_obj.content_type = content_type
django_obj.object_id = object_id
return django_obj
class Meta:
ordering = ['-created']
get_latest_by = 'created'
index_together = (
('content_type', 'object_id', 'created'),
)
class BlackListGuid(BaseModel):
# TODO DELETE ME POST MIGRATION
modm_model_path = 'framework.guid.model.BlacklistGuid'
primary_identifier_name = 'guid'
modm_query = None
migration_page_size = 500000
# /TODO DELETE ME POST MIGRATION
id = models.AutoField(primary_key=True)
guid = LowercaseCharField(max_length=255, unique=True, db_index=True)
@property
def _id(self):
return self.guid
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm BlacklistGuid make a django BlackListGuid
:param modm_obj:
:return:
"""
django_obj = cls()
django_obj.guid = modm_obj._id
return django_obj
def generate_guid_instance():
return Guid.objects.create().id
class PKIDStr(str):
def __new__(self, _id, pk):
return str.__new__(self, _id)
def __init__(self, _id, pk):
self.__pk = pk
def __int__(self):
return self.__pk
class BaseIDMixin(models.Model):
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set([x.name for x in django_obj._meta.get_fields() if not x.is_relation])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
return django_obj
class Meta:
abstract = True
class ObjectIDMixin(BaseIDMixin):
primary_identifier_name = '_id'
_id = models.CharField(max_length=24, default=generate_object_id, unique=True, db_index=True)
@classmethod
def load(cls, q):
try:
return cls.objects.get(_id=q)
except cls.DoesNotExist:
# modm doesn't throw exceptions when loading things that don't exist
return None
@classmethod
def migrate_from_modm(cls, modm_obj):
django_obj = super(ObjectIDMixin, cls).migrate_from_modm(modm_obj)
django_obj._id = str(modm_obj._id)
return django_obj
class Meta:
abstract = True
def _natural_key(self):
return self._id
class InvalidGuid(Exception):
pass
class OptionalGuidMixin(BaseIDMixin):
"""
This makes it so that things can **optionally** have guids. Think files.
Things that inherit from this must also inherit from ObjectIDMixin ... probably
"""
__guid_min_length__ = 5
guids = GenericRelation(Guid, related_name='referent', related_query_name='referents')
guid_string = ArrayField(models.CharField(max_length=255, null=True, blank=True), null=True, blank=True)
content_type_pk = models.PositiveIntegerField(null=True, blank=True)
def get_guid(self, create=False):
if not self.pk:
logger.warn('Implicitly saving object before creating guid')
self.save()
if create:
try:
guid, created = Guid.objects.get_or_create(
object_id=self.pk,
content_type_id=ContentType.objects.get_for_model(self).pk
)
except MultipleObjectsReturned:
# lol, hacks
pass
else:
return guid
return self.guids.order_by('-created').first()
@classmethod
def migrate_from_modm(cls, modm_obj):
instance = super(OptionalGuidMixin, cls).migrate_from_modm(modm_obj)
from website.models import Guid as MODMGuid
from modularodm import Q as MODMQ
if modm_obj.get_guid():
guids = MODMGuid.find(MODMQ('referent', 'eq', modm_obj._id))
setattr(instance, 'guid_string', [x.lower() for x in guids.get_keys()])
setattr(instance, 'content_type_pk', ContentType.objects.get_for_model(cls).pk)
return instance
class Meta:
abstract = True
# NOTE: Define only a stub mixin when using Redis caching. It is faster to lookup the individual cached objects
# than build the entire SQL query w/ Django.
if settings.CACHEOPS_ENABLED:
class GuidMixinQuerySet(MODMCompatibilityQuerySet):
def annotate_query_with_guids(self):
return self
def remove_guid_annotations(self):
return self
else:
class GuidMixinQuerySet(MODMCompatibilityQuerySet):
tables = ['osf_guid', 'django_content_type']
GUID_FIELDS = [
'guids__id',
'guids___id',
'guids__content_type_id',
'guids__object_id',
'guids__created'
]
def annotate_query_with_guids(self):
self._prefetch_related_lookups = ['guids']
for field in self.GUID_FIELDS:
self.query.add_annotation(
F(field), '_{}'.format(field), is_summary=False
)
for table in self.tables:
if table not in self.query.tables:
self.safe_table_alias(table)
def remove_guid_annotations(self):
for k, v in self.query.annotations.iteritems():
if k[1:] in self.GUID_FIELDS:
del self.query.annotations[k]
for table_name in ['osf_guid', 'django_content_type']:
if table_name in self.query.alias_map:
del self.query.alias_map[table_name]
if table_name in self.query.alias_refcount:
del self.query.alias_refcount[table_name]
if table_name in self.query.tables:
del self.query.tables[self.query.tables.index(table_name)]
def safe_table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.query.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
if alias in self.query.alias_refcount:
self.query.alias_refcount[alias] += 1
else:
self.query.alias_refcount[alias] = 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.query.alias_prefix, len(self.query.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.query.table_map[alias] = [alias]
self.query.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def _clone(self, annotate=False, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
if annotate:
self.annotate_query_with_guids()
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
# this method was copied from the default django queryset except for the below two lines
if annotate:
clone.annotate_query_with_guids()
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def annotate(self, *args, **kwargs):
self.annotate_query_with_guids()
return super(GuidMixinQuerySet, self).annotate(*args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
'Cannot filter a query once a slice has been taken.'
clone = self._clone(annotate=True)
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def all(self):
return self._clone(annotate=True)
# does implicit filter
def get(self, *args, **kwargs):
# add this to make sure we don't get dupes
self.query.add_distinct_fields('id')
return super(GuidMixinQuerySet, self).get(*args, **kwargs)
# TODO: Below lines are commented out to ensure that
# the annotations are used after running .count()
# e.g.
# queryset.count()
# queryset[0]
# This is more efficient when doing chained operations
# on a queryset, but less efficient when only getting a count.
# Figure out a way to get the best of both worlds
# def count(self):
# self.remove_guid_annotations()
# return super(GuidMixinQuerySet, self).count()
def update(self, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).update(**kwargs)
def update_or_create(self, defaults=None, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).update_or_create(defaults=defaults, **kwargs)
def values(self, *fields):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).values(*fields)
def create(self, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).create(**kwargs)
def bulk_create(self, objs, batch_size=None):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).bulk_create(objs, batch_size)
def get_or_create(self, defaults=None, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).get_or_create(defaults, **kwargs)
def values_list(self, *fields, **kwargs):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).values_list(*fields, **kwargs)
def exists(self):
self.remove_guid_annotations()
return super(GuidMixinQuerySet, self).exists()
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
if 'guids' in self._prefetch_related_lookups and self._result_cache and hasattr(self._result_cache[0], '_guids__id'):
# if guids is requested for prefetch and there are things in the result cache and the first one has
# the annotated guid fields then remove guids from prefetch_related_lookups
del self._prefetch_related_lookups[self._prefetch_related_lookups.index('guids')]
results = []
for result in self._result_cache:
# loop through the result cache
guid_dict = {}
for field in self.GUID_FIELDS:
# pull the fields off of the result object and put them in a dictionary without prefixed names
guid_dict[field] = getattr(result, '_{}'.format(field), None)
if None in guid_dict.values():
# if we get an invalid result field value, stop
logger.warning(
'Annotated guids came back will None values for {}, resorting to extra query'.format(result))
return
if not hasattr(result, '_prefetched_objects_cache'):
# initialize _prefetched_objects_cache
result._prefetched_objects_cache = {}
if 'guids' not in result._prefetched_objects_cache:
# intialize guids in _prefetched_objects_cache
result._prefetched_objects_cache['guids'] = Guid.objects.none()
# build a result dictionary of even more proper fields
result_dict = {key.replace('guids__', ''): value for key, value in guid_dict.iteritems()}
# make an unsaved guid instance
guid = Guid(**result_dict)
result._prefetched_objects_cache['guids']._result_cache = [guid, ]
results.append(result)
# replace the result cache with the new set of results
self._result_cache = results
self._prefetch_related_objects()
class GuidMixin(BaseIDMixin):
__guid_min_length__ = 5
primary_identifier_name = 'guid_string'
guids = GenericRelation(Guid, related_name='referent', related_query_name='referents')
guid_string = ArrayField(models.CharField(max_length=255, null=True, blank=True), null=True, blank=True)
content_type_pk = models.PositiveIntegerField(null=True, blank=True)
objects = GuidMixinQuerySet.as_manager()
# TODO: use pre-delete signal to disable delete cascade
def _natural_key(self):
return self.guid_string
@cached_property
def _id(self):
try:
guid = self.guids.all()[0]
except IndexError:
return None
if guid:
return guid._id
return None
@_id.setter
def _id(self, value):
# TODO do we really want to allow this?
guid, created = Guid.objects.get_or_create(_id=value)
if created:
guid.object_id = self.pk
guid.content_type = ContentType.objects.get_for_model(self)
guid.save()
elif guid.content_type == ContentType.objects.get_for_model(self) and guid.object_id == self.pk:
# TODO should this up the created for the guid until now so that it appears as the first guid
# for this object?
return
else:
raise InvalidGuid('Cannot indirectly repoint an existing guid, please use the Guid model')
_primary_key = _id
@classmethod
def load(cls, q):
try:
return cls.objects.filter(guids___id=q)[0]
except IndexError:
# modm doesn't throw exceptions when loading things that don't exist
return None
@property
def deep_url(self):
return None
@classmethod
def migrate_from_modm(cls, modm_obj):
"""
Given a modm object, make a django object with the same local fields.
This is a base method that may work for simple objects.
It should be customized in the child class if it doesn't work.
:param modm_obj:
:return:
"""
django_obj = cls()
local_django_fields = set(
[x.name for x in django_obj._meta.get_fields() if not x.is_relation and x.name != '_id'])
intersecting_fields = set(modm_obj.to_storage().keys()).intersection(
set(local_django_fields))
for field in intersecting_fields:
modm_value = getattr(modm_obj, field)
if modm_value is None:
continue
if isinstance(modm_value, datetime):
modm_value = pytz.utc.localize(modm_value)
# TODO Remove this after migration
if isinstance(django_obj._meta.get_field(field), DateTimeAwareJSONField):
modm_value = coerce_nonnaive_datetimes(modm_value)
setattr(django_obj, field, modm_value)
from website.models import Guid as MODMGuid
from modularodm import Q as MODMQ
guids = MODMGuid.find(MODMQ('referent', 'eq', modm_obj._id))
setattr(django_obj, 'guid_string', list(set([x.lower() for x in guids.get_keys()])))
setattr(django_obj, 'content_type_pk', ContentType.objects.get_for_model(cls).pk)
return django_obj
class Meta:
abstract = True
@receiver(post_save)
def ensure_guid(sender, instance, created, **kwargs):
if not issubclass(sender, GuidMixin):
return False
existing_guids = Guid.objects.filter(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance))
has_cached_guids = hasattr(instance, '_prefetched_objects_cache') and 'guids' in instance._prefetched_objects_cache
if not existing_guids.exists():
# Clear query cache of instance.guids
if has_cached_guids:
del instance._prefetched_objects_cache['guids']
Guid.objects.create(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance),
_id=generate_guid(instance.__guid_min_length__))
elif not existing_guids.exists() and instance.guid_string is not None:
# Clear query cache of instance.guids
if has_cached_guids:
del instance._prefetched_objects_cache['guids']
Guid.objects.create(object_id=instance.pk, content_type_id=instance.content_type_pk,
_id=instance.guid_string)
|
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import math
from .._errors import pretty_message
from .._ffi import new, null, is_null, buffer_from_bytes, bytes_from_buffer, deref
from ._libcrypto import libcrypto, LibcryptoConst, handle_openssl_error
from ..util import rand_bytes
from .._types import type_name, byte_cls
__all__ = [
'aes_cbc_no_padding_decrypt',
'aes_cbc_no_padding_encrypt',
'aes_cbc_pkcs7_decrypt',
'aes_cbc_pkcs7_encrypt',
'des_cbc_pkcs5_decrypt',
'des_cbc_pkcs5_encrypt',
'rc2_cbc_pkcs5_decrypt',
'rc2_cbc_pkcs5_encrypt',
'rc4_decrypt',
'rc4_encrypt',
'tripledes_cbc_pkcs5_decrypt',
'tripledes_cbc_pkcs5_encrypt',
]
def aes_cbc_no_padding_encrypt(key, data, iv):
"""
Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and
no padding. This means the ciphertext must be an exact multiple of 16 bytes
long.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - either a byte string 16-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
cipher = _calculate_aes_cipher(key)
if not iv:
iv = rand_bytes(16)
elif len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
if len(data) % 16 != 0:
raise ValueError(pretty_message(
'''
data must be a multiple of 16 bytes long - is %s
''',
len(data)
))
return (iv, _encrypt(cipher, key, data, iv, False))
def aes_cbc_no_padding_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no
padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
cipher = _calculate_aes_cipher(key)
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt(cipher, key, data, iv, False)
def aes_cbc_pkcs7_encrypt(key, data, iv):
"""
Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and
PKCS#7 padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - either a byte string 16-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
cipher = _calculate_aes_cipher(key)
if not iv:
iv = rand_bytes(16)
elif len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt(cipher, key, data, iv, True))
def aes_cbc_pkcs7_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
cipher = _calculate_aes_cipher(key)
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt(cipher, key, data, iv, True)
def _calculate_aes_cipher(key):
"""
Determines if the key is a valid AES 128, 192 or 256 key
:param key:
A byte string of the key to use
:raises:
ValueError - when an invalid key is provided
:return:
A unicode string of the AES variation - "aes128", "aes192" or "aes256"
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(key) == 16:
cipher = 'aes128'
elif len(key) == 24:
cipher = 'aes192'
elif len(key) == 32:
cipher = 'aes256'
return cipher
def rc4_encrypt(key, data):
"""
Encrypts plaintext using RC4 with a 40-128 bit key
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the ciphertext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
return _encrypt('rc4', key, data, None, None)
def rc4_decrypt(key, data):
"""
Decrypts RC4 ciphertext using a 40-128 bit key
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The ciphertext - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
return _decrypt('rc4', key, data, None, None)
def rc2_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using RC2 in CBC mode with a 40-128 bit key and PKCS#5
padding.
:param key:
The encryption key - a byte string 8 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('rc2', key, data, iv, True))
def rc2_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts RC2 ciphertext ib CBC mode using a 40-128 bit key and PKCS#5
padding.
:param key:
The encryption key - a byte string 8 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8 bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt('rc2', key, data, iv, True)
def tripledes_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using 3DES in CBC mode using either the 2 or 3 key
variant (16 or 24 byte long key) and PKCS#5 padding.
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - %s
''',
len(iv)
))
cipher = 'tripledes_3key'
# Expand 2-key to actual 24 byte byte string used by cipher
if len(key) == 16:
key = key + key[0:8]
cipher = 'tripledes_2key'
return (iv, _encrypt(cipher, key, data, iv, True))
def tripledes_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts 3DES ciphertext in CBC mode using either the 2 or 3 key variant
(16 or 24 byte long key) and PKCS#5 padding.
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
cipher = 'tripledes_3key'
# Expand 2-key to actual 24 byte byte string used by cipher
if len(key) == 16:
key = key + key[0:8]
cipher = 'tripledes_2key'
return _decrypt(cipher, key, data, iv, True)
def des_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using DES in CBC mode with a 56 bit key and PKCS#5
padding.
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('des', key, data, iv, True))
def des_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts DES ciphertext in CBC mode using a 56 bit key and PKCS#5 padding.
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt('des', key, data, iv, True)
def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The encryption key - a byte string 5-32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and not padding:
# AES in CBC mode can be allowed with no padding if
# the data is an exact multiple of the key size
aes128_no_padding = (
cipher == 'aes128' and
padding is False and
len(data) % 16 == 0
)
aes192_no_padding = (
cipher == 'aes192' and
padding is False and
len(data) % 24 == 0
)
aes256_no_padding = (
cipher == 'aes256' and
padding is False and
len(data) % 32 == 0
)
if aes128_no_padding is False and aes192_no_padding is False and aes256_no_padding is False:
raise ValueError('padding must be specified')
evp_cipher_ctx = None
try:
evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()
if is_null(evp_cipher_ctx):
handle_openssl_error(0)
evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)
if iv is None:
iv = null()
if cipher in set(['rc2', 'rc4']):
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())
handle_openssl_error(res)
res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))
handle_openssl_error(res)
if cipher == 'rc2':
res = libcrypto.EVP_CIPHER_CTX_ctrl(
evp_cipher_ctx,
LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,
len(key) * 8,
null()
)
handle_openssl_error(res)
evp_cipher = null()
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)
handle_openssl_error(res)
if padding is not None:
res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))
handle_openssl_error(res)
buffer = buffer_from_bytes(buffer_size)
output_length = new(libcrypto, 'int *')
res = libcrypto.EVP_EncryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))
handle_openssl_error(res)
output = bytes_from_buffer(buffer, deref(output_length))
res = libcrypto.EVP_EncryptFinal_ex(evp_cipher_ctx, buffer, output_length)
handle_openssl_error(res)
output += bytes_from_buffer(buffer, deref(output_length))
return output
finally:
if evp_cipher_ctx:
libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)
def _decrypt(cipher, key, data, iv, padding):
"""
Decrypts AES/RC4/RC2/3DES/DES ciphertext
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The encryption key - a byte string 5-32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and padding is None:
raise ValueError('padding must be specified')
evp_cipher_ctx = None
try:
evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()
if is_null(evp_cipher_ctx):
handle_openssl_error(0)
evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)
if iv is None:
iv = null()
if cipher in set(['rc2', 'rc4']):
res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())
handle_openssl_error(res)
res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))
handle_openssl_error(res)
if cipher == 'rc2':
res = libcrypto.EVP_CIPHER_CTX_ctrl(
evp_cipher_ctx,
LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,
len(key) * 8,
null()
)
handle_openssl_error(res)
evp_cipher = null()
res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)
handle_openssl_error(res)
if padding is not None:
res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))
handle_openssl_error(res)
buffer = buffer_from_bytes(buffer_size)
output_length = new(libcrypto, 'int *')
res = libcrypto.EVP_DecryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))
handle_openssl_error(res)
output = bytes_from_buffer(buffer, deref(output_length))
res = libcrypto.EVP_DecryptFinal_ex(evp_cipher_ctx, buffer, output_length)
handle_openssl_error(res)
output += bytes_from_buffer(buffer, deref(output_length))
return output
finally:
if evp_cipher_ctx:
libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)
def _setup_evp_encrypt_decrypt(cipher, data):
"""
Creates an EVP_CIPHER pointer object and determines the buffer size
necessary for the parameter specified.
:param evp_cipher_ctx:
An EVP_CIPHER_CTX pointer
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The key byte string
:param data:
The plaintext or ciphertext as a byte string
:param padding:
If padding is to be used
:return:
A 2-element tuple with the first element being an EVP_CIPHER pointer
and the second being an integer that is the required buffer size
"""
evp_cipher = {
'aes128': libcrypto.EVP_aes_128_cbc,
'aes192': libcrypto.EVP_aes_192_cbc,
'aes256': libcrypto.EVP_aes_256_cbc,
'rc2': libcrypto.EVP_rc2_cbc,
'rc4': libcrypto.EVP_rc4,
'des': libcrypto.EVP_des_cbc,
'tripledes_2key': libcrypto.EVP_des_ede_cbc,
'tripledes_3key': libcrypto.EVP_des_ede3_cbc,
}[cipher]()
if cipher == 'rc4':
buffer_size = len(data)
else:
block_size = {
'aes128': 16,
'aes192': 16,
'aes256': 16,
'rc2': 8,
'des': 8,
'tripledes_2key': 8,
'tripledes_3key': 8,
}[cipher]
buffer_size = block_size * int(math.ceil(len(data) / block_size))
return (evp_cipher, buffer_size)
|
|
#!/usr/bin/python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import unittest2
from unittest2 import SkipTest
from uuid import uuid4
import time
from six.moves import range
from test.functional import check_response, retry, requires_acls, \
requires_policies
import test.functional as tf
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestObject(unittest2.TestCase):
def setUp(self):
if tf.skip:
raise SkipTest
self.container = uuid4().hex
self.containers = []
self._create_container(self.container)
self._create_container(self.container, use_account=2)
self.obj = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, self.obj), 'test',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def _create_container(self, name=None, headers=None, use_account=1):
if not name:
name = uuid4().hex
self.containers.append(name)
headers = headers or {}
def put(url, token, parsed, conn, name):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('PUT', parsed.path + '/' + name, '',
new_headers)
return check_response(conn)
resp = retry(put, name, use_account=use_account)
resp.read()
self.assertEqual(resp.status, 201)
# With keystoneauth we need the accounts to have had the project
# domain id persisted as sysmeta prior to testing ACLs. This may
# not be the case if, for example, the account was created using
# a request with reseller_admin role, when project domain id may
# not have been known. So we ensure that the project domain id is
# in sysmeta by making a POST to the accounts using an admin role.
def post(url, token, parsed, conn):
conn.request('POST', parsed.path, '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(post, use_account=use_account)
resp.read()
self.assertEqual(resp.status, 204)
return name
def tearDown(self):
if tf.skip:
raise SkipTest
# get list of objects in container
def get(url, token, parsed, conn, container):
conn.request(
'GET', parsed.path + '/' + container + '?format=json', '',
{'X-Auth-Token': token})
return check_response(conn)
# delete an object
def delete(url, token, parsed, conn, container, obj):
conn.request(
'DELETE', '/'.join([parsed.path, container, obj['name']]), '',
{'X-Auth-Token': token})
return check_response(conn)
for container in self.containers:
while True:
resp = retry(get, container)
body = resp.read()
if resp.status == 404:
break
self.assertTrue(resp.status // 100 == 2, resp.status)
objs = json.loads(body)
if not objs:
break
for obj in objs:
resp = retry(delete, container, obj)
resp.read()
self.assertIn(resp.status, (204, 404))
# delete the container
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
for container in self.containers:
resp = retry(delete, container)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_if_none_match(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, 'if_none_match_test'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'If-None-Match': '*'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 412)
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, 'if_none_match_test'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'If-None-Match': 'somethingelse'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 400)
def test_too_small_x_timestamp(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'too_small_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Timestamp': '-1'})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', '%s/%s/%s' % (parsed.path, self.container,
'too_small_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0'})
return check_response(conn)
ts_before = time.time()
resp = retry(put)
body = resp.read()
ts_after = time.time()
if resp.status == 400:
# shunt_inbound_x_timestamp must be false
self.assertIn(
'X-Timestamp should be a UNIX timestamp float value', body)
else:
self.assertEqual(resp.status, 201)
self.assertEqual(body, '')
resp = retry(head)
resp.read()
self.assertGreater(float(resp.headers['x-timestamp']), ts_before)
self.assertLess(float(resp.headers['x-timestamp']), ts_after)
def test_too_big_x_timestamp(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'too_big_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Timestamp': '99999999999.9999999999'})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', '%s/%s/%s' % (parsed.path, self.container,
'too_big_x_timestamp'),
'', {'X-Auth-Token': token,
'Content-Length': '0'})
return check_response(conn)
ts_before = time.time()
resp = retry(put)
body = resp.read()
ts_after = time.time()
if resp.status == 400:
# shunt_inbound_x_timestamp must be false
self.assertIn(
'X-Timestamp should be a UNIX timestamp float value', body)
else:
self.assertEqual(resp.status, 201)
self.assertEqual(body, '')
resp = retry(head)
resp.read()
self.assertGreater(float(resp.headers['x-timestamp']), ts_before)
self.assertLess(float(resp.headers['x-timestamp']), ts_after)
def test_x_delete_after(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'x_delete_after'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-After': '1'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def get(url, token, parsed, conn):
conn.request(
'GET',
'%s/%s/%s' % (parsed.path, self.container, 'x_delete_after'),
'',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
resp.read()
count = 0
while resp.status == 200 and count < 10:
resp = retry(get)
resp.read()
count += 1
time.sleep(1)
self.assertEqual(resp.status, 404)
# To avoid an error when the object deletion in tearDown(),
# the object is added again.
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def test_x_delete_at(self):
def put(url, token, parsed, conn):
dt = datetime.datetime.now()
epoch = time.mktime(dt.timetuple())
delete_time = str(int(epoch) + 3)
conn.request(
'PUT',
'%s/%s/%s' % (parsed.path, self.container, 'x_delete_at'),
'',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-At': delete_time})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def get(url, token, parsed, conn):
conn.request(
'GET',
'%s/%s/%s' % (parsed.path, self.container, 'x_delete_at'),
'',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
resp.read()
count = 0
while resp.status == 200 and count < 10:
resp = retry(get)
resp.read()
count += 1
time.sleep(1)
self.assertEqual(resp.status, 404)
# To avoid an error when the object deletion in tearDown(),
# the object is added again.
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def test_non_integer_x_delete_after(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'non_integer_x_delete_after'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-After': '*'})
return check_response(conn)
resp = retry(put)
body = resp.read()
self.assertEqual(resp.status, 400)
self.assertEqual(body, 'Non-integer X-Delete-After')
def test_non_integer_x_delete_at(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'non_integer_x_delete_at'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-At': '*'})
return check_response(conn)
resp = retry(put)
body = resp.read()
self.assertEqual(resp.status, 400)
self.assertEqual(body, 'Non-integer X-Delete-At')
def test_x_delete_at_in_the_past(self):
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (parsed.path, self.container,
'x_delete_at_in_the_past'),
'', {'X-Auth-Token': token,
'Content-Length': '0',
'X-Delete-At': '0'})
return check_response(conn)
resp = retry(put)
body = resp.read()
self.assertEqual(resp.status, 400)
self.assertEqual(body, 'X-Delete-At in past')
def test_copy_object(self):
if tf.skip:
raise SkipTest
source = '%s/%s' % (self.container, self.obj)
dest = '%s/%s' % (self.container, 'test_copy')
# get contents of source
def get_source(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, source),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_source)
source_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(source_contents, 'test')
# copy source to dest with X-Copy-From
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From': source})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
def get_dest(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, dest),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_dest)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# delete the copy
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
# verify dest does not exist
resp = retry(get_dest)
resp.read()
self.assertEqual(resp.status, 404)
# copy source to dest with COPY
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s' % (parsed.path, source), '',
{'X-Auth-Token': token,
'Destination': dest})
return check_response(conn)
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
resp = retry(get_dest)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# copy source to dest with COPY and range
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s' % (parsed.path, source), '',
{'X-Auth-Token': token,
'Destination': dest,
'Range': 'bytes=1-2'})
return check_response(conn)
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
resp = retry(get_dest)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents[1:3])
# delete the copy
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_copy_between_accounts(self):
if tf.skip:
raise SkipTest
source = '%s/%s' % (self.container, self.obj)
dest = '%s/%s' % (self.container, 'test_copy')
# get contents of source
def get_source(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, source),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_source)
source_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(source_contents, 'test')
acct = tf.parsed[0].path.split('/', 2)[2]
# copy source to dest with X-Copy-From-Account
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From-Account': acct,
'X-Copy-From': source})
return check_response(conn)
# try to put, will not succeed
# user does not have permissions to read from source
resp = retry(put, use_account=2)
self.assertEqual(resp.status, 403)
# add acl to allow reading from source
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[1]})
return check_response(conn)
resp = retry(post)
self.assertEqual(resp.status, 204)
# retry previous put, now should succeed
resp = retry(put, use_account=2)
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
def get_dest(url, token, parsed, conn):
conn.request('GET',
'%s/%s' % (parsed.path, dest),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get_dest, use_account=2)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# delete the copy
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s' % (parsed.path, dest), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete, use_account=2)
resp.read()
self.assertIn(resp.status, (204, 404))
# verify dest does not exist
resp = retry(get_dest, use_account=2)
resp.read()
self.assertEqual(resp.status, 404)
acct_dest = tf.parsed[1].path.split('/', 2)[2]
# copy source to dest with COPY
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s' % (parsed.path, source), '',
{'X-Auth-Token': token,
'Destination-Account': acct_dest,
'Destination': dest})
return check_response(conn)
# try to copy, will not succeed
# user does not have permissions to write to destination
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 403)
# add acl to allow write to destination
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token,
'X-Container-Write': tf.swift_test_perm[0]})
return check_response(conn)
resp = retry(post, use_account=2)
self.assertEqual(resp.status, 204)
# now copy will succeed
resp = retry(copy)
resp.read()
self.assertEqual(resp.status, 201)
# contents of dest should be the same as source
resp = retry(get_dest, use_account=2)
dest_contents = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(dest_contents, source_contents)
# delete the copy
resp = retry(delete, use_account=2)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_public_object(self):
if tf.skip:
raise SkipTest
def get(url, token, parsed, conn):
conn.request('GET',
'%s/%s/%s' % (parsed.path, self.container, self.obj))
return check_response(conn)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assertTrue(str(err).startswith('No result after '))
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.container, '',
{'X-Auth-Token': token,
'X-Container-Read': '.r:*'})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(get)
resp.read()
self.assertEqual(resp.status, 200)
def post(url, token, parsed, conn):
conn.request('POST', parsed.path + '/' + self.container, '',
{'X-Auth-Token': token, 'X-Container-Read': ''})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
try:
resp = retry(get)
raise Exception('Should not have been able to GET')
except Exception as err:
self.assertTrue(str(err).startswith('No result after '))
def test_private_object(self):
if tf.skip or tf.skip3:
raise SkipTest
# Ensure we can't access the object with the third account
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# create a shared container writable by account3
shared_container = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (
parsed.path, shared_container), '',
{'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[2],
'X-Container-Write': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account can not copy from private container
def copy(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, shared_container, 'private_object'), '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From': '%s/%s' % (self.container, self.obj)})
return check_response(conn)
resp = retry(copy, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# verify third account can write "obj1" to shared container
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), 'test',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account can copy "obj1" to shared container
def copy2(url, token, parsed, conn):
conn.request('COPY', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token,
'Destination': '%s/%s' % (shared_container, 'obj1')})
return check_response(conn)
resp = retry(copy2, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account STILL can not copy from private container
def copy3(url, token, parsed, conn):
conn.request('COPY', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
{'X-Auth-Token': token,
'Destination': '%s/%s' % (shared_container,
'private_object')})
return check_response(conn)
resp = retry(copy3, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# clean up "obj1"
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
# clean up shared_container
def delete(url, token, parsed, conn):
conn.request('DELETE',
parsed.path + '/' + shared_container, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_container_write_only(self):
if tf.skip or tf.skip3:
raise SkipTest
# Ensure we can't access the object with the third account
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, self.obj), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# create a shared container writable (but not readable) by account3
shared_container = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s' % (
parsed.path, shared_container), '',
{'X-Auth-Token': token,
'X-Container-Write': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account can write "obj1" to shared container
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), 'test',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put, use_account=3)
resp.read()
self.assertEqual(resp.status, 201)
# verify third account cannot copy "obj1" to shared container
def copy(url, token, parsed, conn):
conn.request('COPY', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token,
'Destination': '%s/%s' % (shared_container, 'obj2')})
return check_response(conn)
resp = retry(copy, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# verify third account can POST to "obj1" in shared container
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token,
'X-Object-Meta-Color': 'blue'})
return check_response(conn)
resp = retry(post, use_account=3)
resp.read()
self.assertEqual(resp.status, 202)
# verify third account can DELETE from shared container
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, shared_container, 'obj1'), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete, use_account=3)
resp.read()
self.assertIn(resp.status, (204, 404))
# clean up shared_container
def delete(url, token, parsed, conn):
conn.request('DELETE',
parsed.path + '/' + shared_container, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
@requires_acls
def test_read_only(self):
if tf.skip3:
raise tf.SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), 'test',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list objects
resp = retry(get_listing, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# cannot get object
resp = retry(get, self.obj, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# grant read-only access
acl_user = tf.swift_test_user[2]
acl = {'read-only': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list objects
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing)
# can get object
resp = retry(get, self.obj, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(body, 'test')
# can not put an object
obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 403)
# can not delete an object
resp = retry(delete, self.obj, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 403)
# sanity with account1
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertNotIn(obj_name, listing)
self.assertIn(self.obj, listing)
@requires_acls
def test_read_write(self):
if tf.skip3:
raise SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), 'test',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list objects
resp = retry(get_listing, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# cannot get object
resp = retry(get, self.obj, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# grant read-write access
acl_user = tf.swift_test_user[2]
acl = {'read-write': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list objects
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing)
# can get object
resp = retry(get, self.obj, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(body, 'test')
# can put an object
obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 201)
# can delete an object
resp = retry(delete, self.obj, use_account=3)
body = resp.read()
self.assertIn(resp.status, (204, 404))
# sanity with account1
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(obj_name, listing)
self.assertNotIn(self.obj, listing)
@requires_acls
def test_admin(self):
if tf.skip3:
raise SkipTest
def get_listing(url, token, parsed, conn):
conn.request('GET', '%s/%s' % (parsed.path, self.container), '',
{'X-Auth-Token': token})
return check_response(conn)
def post_account(url, token, parsed, conn, headers):
new_headers = dict({'X-Auth-Token': token}, **headers)
conn.request('POST', parsed.path, '', new_headers)
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
def put(url, token, parsed, conn, name):
conn.request('PUT', '%s/%s/%s' % (
parsed.path, self.container, name), 'test',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', '%s/%s/%s' % (
parsed.path, self.container, name), '',
{'X-Auth-Token': token})
return check_response(conn)
# cannot list objects
resp = retry(get_listing, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# cannot get object
resp = retry(get, self.obj, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# grant admin access
acl_user = tf.swift_test_user[2]
acl = {'admin': [acl_user]}
headers = {'x-account-access-control': json.dumps(acl)}
resp = retry(post_account, headers=headers, use_account=1)
resp.read()
self.assertEqual(resp.status, 204)
# can list objects
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing)
# can get object
resp = retry(get, self.obj, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 200)
self.assertEqual(body, 'test')
# can put an object
obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3)
body = resp.read()
self.assertEqual(resp.status, 201)
# can delete an object
resp = retry(delete, self.obj, use_account=3)
body = resp.read()
self.assertIn(resp.status, (204, 404))
# sanity with account1
resp = retry(get_listing, use_account=3)
listing = resp.read()
self.assertEqual(resp.status, 200)
self.assertIn(obj_name, listing)
self.assertNotIn(self.obj, listing)
def test_manifest(self):
if tf.skip:
raise SkipTest
# Data for the object segments
segments1 = ['one', 'two', 'three', 'four', 'five']
segments2 = ['six', 'seven', 'eight']
segments3 = ['nine', 'ten', 'eleven']
# Upload the first set of segments
def put(url, token, parsed, conn, objnum):
conn.request('PUT', '%s/%s/segments1/%s' % (
parsed.path, self.container, str(objnum)), segments1[objnum],
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments1)):
resp = retry(put, objnum)
resp.read()
self.assertEqual(resp.status, 201)
# Upload the manifest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token,
'X-Object-Manifest': '%s/segments1/' % self.container,
'Content-Type': 'text/jibberish', 'Content-Length': '0'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# Get the manifest (should get all the segments as the body)
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1))
self.assertEqual(resp.status, 200)
self.assertEqual(resp.getheader('content-type'), 'text/jibberish')
# Get with a range at the start of the second segment
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token, 'Range': 'bytes=3-'})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1[1:]))
self.assertEqual(resp.status, 206)
# Get with a range in the middle of the second segment
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token, 'Range': 'bytes=5-'})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1)[5:])
self.assertEqual(resp.status, 206)
# Get with a full start and stop range
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token, 'Range': 'bytes=5-10'})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1)[5:11])
self.assertEqual(resp.status, 206)
# Upload the second set of segments
def put(url, token, parsed, conn, objnum):
conn.request('PUT', '%s/%s/segments2/%s' % (
parsed.path, self.container, str(objnum)), segments2[objnum],
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments2)):
resp = retry(put, objnum)
resp.read()
self.assertEqual(resp.status, 201)
# Get the manifest (should still be the first segments of course)
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments1))
self.assertEqual(resp.status, 200)
# Update the manifest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/manifest' % (
parsed.path, self.container), '', {
'X-Auth-Token': token,
'X-Object-Manifest': '%s/segments2/' % self.container,
'Content-Length': '0'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# Get the manifest (should be the second set of segments now)
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments2))
self.assertEqual(resp.status, 200)
if not tf.skip3:
# Ensure we can't access the manifest with the third account
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# Grant access to the third account
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, self.container),
'', {'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# The third account should be able to get the manifest now
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
self.assertEqual(resp.read(), ''.join(segments2))
self.assertEqual(resp.status, 200)
# Create another container for the third set of segments
acontainer = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', parsed.path + '/' + acontainer, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# Upload the third set of segments in the other container
def put(url, token, parsed, conn, objnum):
conn.request('PUT', '%s/%s/segments3/%s' % (
parsed.path, acontainer, str(objnum)), segments3[objnum],
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments3)):
resp = retry(put, objnum)
resp.read()
self.assertEqual(resp.status, 201)
# Update the manifest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/manifest' % (
parsed.path, self.container), '',
{'X-Auth-Token': token,
'X-Object-Manifest': '%s/segments3/' % acontainer,
'Content-Length': '0'})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
# Get the manifest to ensure it's the third set of segments
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get)
self.assertEqual(resp.read(), ''.join(segments3))
self.assertEqual(resp.status, 200)
if not tf.skip3:
# Ensure we can't access the manifest with the third account
# (because the segments are in a protected container even if the
# manifest itself is not).
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
resp.read()
self.assertEqual(resp.status, 403)
# Grant access to the third account
def post(url, token, parsed, conn):
conn.request('POST', '%s/%s' % (parsed.path, acontainer),
'', {'X-Auth-Token': token,
'X-Container-Read': tf.swift_test_perm[2]})
return check_response(conn)
resp = retry(post)
resp.read()
self.assertEqual(resp.status, 204)
# The third account should be able to get the manifest now
def get(url, token, parsed, conn):
conn.request('GET', '%s/%s/manifest' % (
parsed.path, self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(get, use_account=3)
self.assertEqual(resp.read(), ''.join(segments3))
self.assertEqual(resp.status, 200)
# Delete the manifest
def delete(url, token, parsed, conn, objnum):
conn.request('DELETE', '%s/%s/manifest' % (
parsed.path,
self.container), '', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete, objnum)
resp.read()
self.assertIn(resp.status, (204, 404))
# Delete the third set of segments
def delete(url, token, parsed, conn, objnum):
conn.request('DELETE', '%s/%s/segments3/%s' % (
parsed.path, acontainer, str(objnum)), '',
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments3)):
resp = retry(delete, objnum)
resp.read()
self.assertIn(resp.status, (204, 404))
# Delete the second set of segments
def delete(url, token, parsed, conn, objnum):
conn.request('DELETE', '%s/%s/segments2/%s' % (
parsed.path, self.container, str(objnum)), '',
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments2)):
resp = retry(delete, objnum)
resp.read()
self.assertIn(resp.status, (204, 404))
# Delete the first set of segments
def delete(url, token, parsed, conn, objnum):
conn.request('DELETE', '%s/%s/segments1/%s' % (
parsed.path, self.container, str(objnum)), '',
{'X-Auth-Token': token})
return check_response(conn)
for objnum in range(len(segments1)):
resp = retry(delete, objnum)
resp.read()
self.assertIn(resp.status, (204, 404))
# Delete the extra container
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s' % (parsed.path, acontainer), '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
def test_delete_content_type(self):
if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/hi' % (parsed.path, self.container),
'there', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/hi' % (parsed.path, self.container),
'', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertIn(resp.status, (204, 404))
self.assertEqual(resp.getheader('Content-Type'),
'text/html; charset=UTF-8')
def test_delete_if_delete_at_bad(self):
if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
conn.request('PUT',
'%s/%s/hi-delete-bad' % (parsed.path, self.container),
'there', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
def delete(url, token, parsed, conn):
conn.request('DELETE', '%s/%s/hi' % (parsed.path, self.container),
'', {'X-Auth-Token': token,
'X-If-Delete-At': 'bad'})
return check_response(conn)
resp = retry(delete)
resp.read()
self.assertEqual(resp.status, 400)
def test_null_name(self):
if tf.skip:
raise SkipTest
def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/abc%%00def' % (
parsed.path,
self.container), 'test', {'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
if (tf.web_front_end == 'apache2'):
self.assertEqual(resp.status, 404)
else:
self.assertEqual(resp.read(), 'Invalid UTF8 or contains NULL')
self.assertEqual(resp.status, 412)
def test_cors(self):
if tf.skip:
raise SkipTest
try:
strict_cors = tf.cluster_info['swift']['strict_cors_mode']
except KeyError:
raise SkipTest("cors mode is unknown")
def put_cors_cont(url, token, parsed, conn, orig):
conn.request(
'PUT', '%s/%s' % (parsed.path, self.container),
'', {'X-Auth-Token': token,
'X-Container-Meta-Access-Control-Allow-Origin': orig})
return check_response(conn)
def put_obj(url, token, parsed, conn, obj):
conn.request(
'PUT', '%s/%s/%s' % (parsed.path, self.container, obj),
'test', {'X-Auth-Token': token})
return check_response(conn)
def check_cors(url, token, parsed, conn,
method, obj, headers):
if method != 'OPTIONS':
headers['X-Auth-Token'] = token
conn.request(
method, '%s/%s/%s' % (parsed.path, self.container, obj),
'', headers)
return conn.getresponse()
resp = retry(put_cors_cont, '*')
resp.read()
self.assertEqual(resp.status // 100, 2)
resp = retry(put_obj, 'cat')
resp.read()
self.assertEqual(resp.status // 100, 2)
resp = retry(check_cors,
'OPTIONS', 'cat', {'Origin': 'http://m.com'})
self.assertEqual(resp.status, 401)
resp = retry(check_cors,
'OPTIONS', 'cat',
{'Origin': 'http://m.com',
'Access-Control-Request-Method': 'GET'})
self.assertEqual(resp.status, 200)
resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'})
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com',
'X-Web-Mode': 'True'})
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'*')
####################
resp = retry(put_cors_cont, 'http://secret.com')
resp.read()
self.assertEqual(resp.status // 100, 2)
resp = retry(check_cors,
'OPTIONS', 'cat',
{'Origin': 'http://m.com',
'Access-Control-Request-Method': 'GET'})
resp.read()
self.assertEqual(resp.status, 401)
if strict_cors:
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'})
resp.read()
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertNotIn('access-control-allow-origin', headers)
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://secret.com'})
resp.read()
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'http://secret.com')
else:
resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'})
resp.read()
self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEqual(headers.get('access-control-allow-origin'),
'http://m.com')
@requires_policies
def test_cross_policy_copy(self):
# create container in first policy
policy = self.policies.select()
container = self._create_container(
headers={'X-Storage-Policy': policy['name']})
obj = uuid4().hex
# create a container in second policy
other_policy = self.policies.exclude(name=policy['name']).select()
other_container = self._create_container(
headers={'X-Storage-Policy': other_policy['name']})
other_obj = uuid4().hex
def put_obj(url, token, parsed, conn, container, obj):
# to keep track of things, use the original path as the body
content = '%s/%s' % (container, obj)
path = '%s/%s' % (parsed.path, content)
conn.request('PUT', path, content, {'X-Auth-Token': token})
return check_response(conn)
# create objects
for c, o in zip((container, other_container), (obj, other_obj)):
resp = retry(put_obj, c, o)
resp.read()
self.assertEqual(resp.status, 201)
def put_copy_from(url, token, parsed, conn, container, obj, source):
dest_path = '%s/%s/%s' % (parsed.path, container, obj)
conn.request('PUT', dest_path, '',
{'X-Auth-Token': token,
'Content-Length': '0',
'X-Copy-From': source})
return check_response(conn)
copy_requests = (
(container, other_obj, '%s/%s' % (other_container, other_obj)),
(other_container, obj, '%s/%s' % (container, obj)),
)
# copy objects
for c, o, source in copy_requests:
resp = retry(put_copy_from, c, o, source)
resp.read()
self.assertEqual(resp.status, 201)
def get_obj(url, token, parsed, conn, container, obj):
path = '%s/%s/%s' % (parsed.path, container, obj)
conn.request('GET', path, '', {'X-Auth-Token': token})
return check_response(conn)
# validate contents, contents should be source
validate_requests = copy_requests
for c, o, body in validate_requests:
resp = retry(get_obj, c, o)
self.assertEqual(resp.status, 200)
self.assertEqual(body, resp.read())
if __name__ == '__main__':
unittest2.main()
|
|
"""State unit tests."""
import pytest
from vivid.classes.valueset import ValueSet
from vivid.classes.attribute import Attribute
from vivid.classes.attribute_structure import AttributeStructure
from vivid.classes.attribute_system import AttributeSystem
from vivid.classes.state import State
def test___init__():
"""Test State constructor."""
def test_TypeError(attribute_system, ascriptions={}):
"""Test constructor for TypeErrors with given params."""
with pytest.raises(TypeError) as excinfo:
State(attribute_system, ascriptions)
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
# test no attribute system
test_TypeError(a)
test_TypeError(object)
test_TypeError(None)
# test bad ascriptions
test_TypeError(asys, [])
test_TypeError(asys, None)
test_TypeError(asys, object)
s = State(asys)
assert s._attribute_system == asys
assert s._attribute_system is not asys
s = State(asys, {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']})
assert s[(('color', 's1'))] == ValueSet(['R'])
assert s[(('color', 's2'))] == ValueSet(['G', 'B'])
assert s[(('size', 's1'))] == ValueSet(['M'])
assert s[(('size', 's2'))] == ValueSet(['L', 'S'])
def test___eq__():
"""Test == operator."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
s1 = State(asys, {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']})
s2 = State(asys, {
('size', 's1'): ['M'],
('color', 's2'): ['B', 'G'],
('color', 's1'): ['R'],
('size', 's2'): ['L', 'S']})
assert s1 == s2
assert not s == s1
s.set_ascription(('color', 's1'), ['R'])
assert not s == s1
s.set_ascription(('color', 's2'), ['B', 'G'])
assert not s == s1
s.set_ascription(('size', 's1'), ['M'])
assert not s == s1
s.set_ascription(('size', 's2'), ['L', 'S'])
assert s == s1
assert s == s1 == s2
def test_total_ordering():
"""Test < operator overloaded for proper extension."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
ascr = {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
ascr2 = {
('color', 's1'): ['R'],
('color', 's2'): ['G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
s1 = State(asys, ascr)
s2 = State(asys, ascr2)
assert not s < s
assert not s > s
assert s1 < s
assert s1 <= s
assert s > s1
assert s >= s1
assert s2 < s1 < s
assert s2 <= s2 <= s1 <= s1 <= s <= s
assert s > s1 > s2
assert s >= s >= s1 >= s1 >= s2 >= s2
def test___le__():
"""Test <= operator overloaded for extension."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
ascr = {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
s1 = State(asys, ascr)
assert s <= s
assert s1 <= s
assert not s <= s1
def test___ne__():
"""Test != operator."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
s1 = State(asys, {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']})
s2 = State(asys, {
('size', 's1'): ['M'],
('color', 's2'): ['B', 'G'],
('color', 's1'): ['R'],
('size', 's2'): ['L', 'S']})
assert not s1 != s2
assert s != s1
s.set_ascription(('color', 's1'), ['R'])
assert s != s1
s.set_ascription(('color', 's2'), ['B', 'G'])
assert s != s1
s.set_ascription(('size', 's1'), ['M'])
assert s != s1
s.set_ascription(('size', 's2'), ['L', 'S'])
assert not s != s1
assert not s != s1 != s2
def test___deepcopy__():
"""Test deepcopy"""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
ascr = {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
s = State(asys, ascr)
from copy import deepcopy
s_copy = deepcopy(s)
assert s == s_copy
assert s is not s_copy
assert s._attribute_system == s_copy._attribute_system
assert s._attribute_system is not s_copy._attribute_system
assert s._ascriptions == s_copy._ascriptions
assert s._ascriptions is not s_copy._ascriptions
def test_set_ascription():
"""Test set_ascription function."""
def test_TypeError(state, ascription, valueset):
"""Test set_ascription for TypeErrors with given params."""
with pytest.raises(TypeError) as excinfo:
state.set_ascription(ascription, valueset)
def test_ValueError(state, ascription, valueset):
"""Test set_ascription for ValueErrors with given params."""
with pytest.raises(ValueError) as excinfo:
state.set_ascription(ascription, valueset)
def test_KeyError(state, ascription, valueset):
"""Test set_ascription for KeyErrors with given params."""
with pytest.raises(KeyError) as excinfo:
state.set_ascription(ascription, valueset)
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
# test bad ao_pair types/values
test_TypeError(s, [], ['R'])
test_ValueError(s, (), ['R'])
test_ValueError(s, (1, 2, 3), ['R'])
test_ValueError(s, (1, 2), ['R'])
test_ValueError(s, (1, ''), ['R'])
test_ValueError(s, ('', 1), ['R'])
# test bad types for ValueSet
test_TypeError(s, ('color', 's1'), None)
test_TypeError(s, ('color', 's1'), ())
test_TypeError(s, ('color', 's1'), 'a')
test_TypeError(s, ('color', 's1'), object)
# test empty ValueSet catching
test_ValueError(s, ('color', 's1'), [])
test_ValueError(s, ('color', 's1'), set([]))
test_ValueError(s, ('color', 's1'), ValueSet([]))
# test bad ao-pair keys
test_KeyError(s, ('color', 'bad object'), ['R'])
test_KeyError(s, ('bad label', 's2'), ['R'])
# test nonsubset valuesets
test_ValueError(s, ('color', 's2'), ['a'])
test_ValueError(s, ('color', 's2'), [1])
s.set_ascription(('color', 's2'), ['R'])
assert s[('color', 's2')] == ValueSet(['R'])
# check reversion to superset is possible
s.set_ascription(('color', 's2'), ['R', 'G'])
assert s[('color', 's2')] == ValueSet(['R', 'G'])
s.set_ascription(('size', 's1'), ['M', 'S'])
assert s[('size', 's1')] == ValueSet(['S', 'M'])
def test___getitem__():
"""Test indexing for State"""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
ascr = {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
s = State(asys, ascr)
assert s[('color', 's1')] == ValueSet(['R'])
assert s[('color', 's2')] == ValueSet(['G', 'B'])
assert s[('size', 's1')] == ValueSet(['M'])
assert s[('size', 's2')] == ValueSet(['L', 'S'])
assert s['color'] == [ValueSet(['R']), ValueSet(['B', 'G'])]
assert s['size'] == [ValueSet(['M']), ValueSet(['L', 'S'])]
def test_add_object():
"""Test add object function to state."""
def test_TypeError(state, obj, ascriptions=None):
"""Test constructor for TypeErrors with given params."""
with pytest.raises(TypeError) as excinfo:
state.add_object(obj, ascriptions)
def test_ValueError(state, obj, ascriptions=None):
"""Test constructor for ValueErrors with given params."""
with pytest.raises(ValueError) as excinfo:
state.add_object(obj, ascriptions)
color = Attribute("color", ['R', 'G', 'B'])
a = AttributeStructure(color)
o = ['s1']
asys = AttributeSystem(a, o)
s = State(asys)
test_TypeError(s, None)
test_TypeError(s, 1)
test_TypeError(s, object)
test_TypeError(s, "")
test_TypeError(s, "a", 1)
test_TypeError(s, "a", object)
test_ValueError(s, "s1")
test_ValueError(s, "a", {"s1": 1})
test_ValueError(s, "a", {("s1"): 1})
test_ValueError(s, "a", {("s1", 's1', 's1'): 1})
test_ValueError(s, "a", {("color", "s1"): 1})
test_ValueError(s, "a", {("s", "a"): 1})
s.add_object("a")
ascr = {("color", "s1"): ValueSet(['R', 'G', 'B']),
("color", "a"): ValueSet(['R', 'G', 'B'])}
assert s._ascriptions == ascr
s = State(asys)
s.add_object("a", {("color", "a"): ['R']})
ascr = {("color", "s1"): ValueSet(['R', 'G', 'B']),
("color", "a"): ValueSet(['R'])}
assert s._ascriptions == ascr
def test_is_valuation():
"""Test is_valuation function."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
ascr = {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
s = State(asys, ascr)
assert not s.is_valuation('color')
assert not s.is_valuation('size')
s.set_ascription(('color', 's2'), ['B'])
assert s.is_valuation('color')
s.set_ascription(('size', 's2'), ['L'])
assert s.is_valuation('size')
def test_is_world():
"""Test is_world function."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
ascr = {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
s = State(asys, ascr)
assert not s.is_world()
s.set_ascription(('color', 's2'), ['B'])
s.set_ascription(('size', 's2'), ['L'])
assert s.is_world()
def test_get_worlds():
"""Test get_worlds function."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
ascr = {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
ascr1 = {
('color', 's1'): ['R'],
('color', 's2'): ['G'],
('size', 's1'): ['M'],
('size', 's2'): ['L']}
ascr2 = {
('color', 's1'): ['R'],
('color', 's2'): ['G'],
('size', 's1'): ['M'],
('size', 's2'): ['S']}
ascr3 = {
('color', 's1'): ['R'],
('color', 's2'): ['B'],
('size', 's1'): ['M'],
('size', 's2'): ['L']}
ascr4 = {
('color', 's1'): ['R'],
('color', 's2'): ['B'],
('size', 's1'): ['M'],
('size', 's2'): ['S']}
s = State(asys, ascr)
w1 = State(asys, ascr1)
w2 = State(asys, ascr2)
w3 = State(asys, ascr3)
w4 = State(asys, ascr4)
worlds = [w1, w2, w3, w4]
for w in s.get_worlds():
assert w in worlds
assert len(s.get_worlds()) == len(worlds)
def test_is_disjoint():
"""Test is_disjoint function."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
ascr = {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
s1 = State(asys, ascr)
s3 = State(asys, ascr)
length = Attribute("length", [1, 3, 5])
shape = Attribute("shape", ['circle', 'triangle', 'rectangle'])
a = AttributeStructure(length, shape)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
ascr = {
('length', 's1'): [5],
('length', 's2'): [1, 3],
('shape', 's1'): ['circle'],
('shape', 's2'): ['triangle', 'rectangle']}
s2 = State(asys, ascr)
assert s1.is_disjoint(s2)
assert not s1.is_disjoint(s1)
assert not s1.is_disjoint(s3)
def test_is_alternate_extension():
"""Test is_alternate_extension function."""
from copy import deepcopy
color, size = Attribute(
"color", ['R', 'G', 'B']), Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
s.set_ascription(('color', 's1'), ['R', 'B'])
s.set_ascription(('size', 's2'), ['M', 'L'])
s1 = deepcopy(s)
s1.set_ascription(('color', 's1'), ['B'])
s1.set_ascription(('size', 's1'), ['S', 'M'])
s1.set_ascription(('color', 's2'), ['B', 'G'])
s2 = deepcopy(s)
s2.set_ascription(('size', 's1'), ['L'])
s2.set_ascription(('size', 's2'), ['L'])
s3 = deepcopy(s)
s3.set_ascription(('color', 's1'), ['R'])
aes = s.get_alternate_extensions(s1, s2, s3)
ae_s5, ae_s6, ae_s4 = aes
for ae in aes:
print s.is_alternate_extension(ae, s1, s2, s3)
color, size = Attribute(
"color", ['R', 'G', 'B']), Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s']
asys = AttributeSystem(a, o)
s = State(asys)
s1 = deepcopy(s)
s1.set_ascription(('color', 's'), ['B', 'G'])
s1.set_ascription(('size', 's'), ['S'])
aes = s.get_alternate_extensions(s1)
ae_s2, ae_s3 = aes
for ae in aes:
print s.is_alternate_extension(ae, s1)
def test_get_alternate_extensions():
"""Test get_alternate_extensions function."""
from copy import deepcopy
color, size = Attribute(
"color", ['R', 'G', 'B']), Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
s.set_ascription(('color', 's1'), ['R', 'B'])
s.set_ascription(('size', 's2'), ['M', 'L'])
s1 = deepcopy(s)
s1.set_ascription(('color', 's1'), ['B'])
s1.set_ascription(('size', 's1'), ['S', 'M'])
s1.set_ascription(('color', 's2'), ['B', 'G'])
s2 = deepcopy(s)
s2.set_ascription(('size', 's1'), ['L'])
s2.set_ascription(('size', 's2'), ['L'])
s3 = deepcopy(s)
s3.set_ascription(('color', 's1'), ['R'])
aes = s.get_alternate_extensions(s1, s2, s3)
ae_s5, ae_s6, ae_s4 = aes
s4 = State(asys)
s4.set_ascription(('color', 's1'), ['B'])
s4.set_ascription(('color', 's2'), ['B', 'G', 'R'])
s4.set_ascription(('size', 's1'), ['L'])
s4.set_ascription(('size', 's2'), ['M'])
s5 = State(asys)
s5.set_ascription(('color', 's1'), ['B'])
s5.set_ascription(('color', 's2'), ['R'])
s5.set_ascription(('size', 's1'), ['M', 'S'])
s5.set_ascription(('size', 's2'), ['L', 'M'])
s6 = State(asys)
s6.set_ascription(('color', 's1'), ['B'])
s6.set_ascription(('color', 's2'), ['R'])
s6.set_ascription(('size', 's1'), ['L', 'M', 'S'])
s6.set_ascription(('size', 's2'), ['M'])
assert ae_s4 == s4
assert ae_s5 == s5
assert ae_s6 == s6
color, size = Attribute(
"color", ['R', 'G', 'B']), Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s']
asys = AttributeSystem(a, o)
s = State(asys)
s1 = deepcopy(s)
s1.set_ascription(('color', 's'), ['B', 'G'])
s1.set_ascription(('size', 's'), ['S'])
aes = s.get_alternate_extensions(s1)
ae_s2, ae_s3 = aes
s2 = deepcopy(s)
s2.set_ascription(('color', 's'), ['R'])
s2.set_ascription(('size', 's'), ['S', 'M', 'L'])
s3 = deepcopy(s)
s3.set_ascription(('color', 's'), ['R', 'B', 'G'])
s3.set_ascription(('size', 's'), ['L', 'M'])
assert ae_s2 == s2
assert ae_s3 == s3
def test_join():
"""Test join function for States."""
def test_ValueError(s1, s2):
"""Test constructor for ValueErrors with given params."""
with pytest.raises(ValueError) as excinfo:
State.join(s1, s2)
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
ascr1 = {('color', 's1'): ['R'],
('color', 's2'): ['B'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']}
ascr2 = {('color', 's1'): ['G'],
('color', 's2'): ['G'],
('size', 's1'): ['L'],
('size', 's2'): ['M', 'S']}
ascr3 = {('color', 's1'): ['R', 'G'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['L', 'M'],
('size', 's2'): ['M', 'S', 'L']}
s1 = State(asys, ascr1)
s2 = State(asys, ascr2)
s3 = State(asys, ascr3)
assert s3 == State.join(s1, s2)
length = Attribute("length", [1, 3, 5])
shape = Attribute("shape", ['circle', 'triangle', 'rectangle'])
a = AttributeStructure(length, shape)
o = ['s1', 's2']
bad_asys = AttributeSystem(a, o)
bad_state = State(bad_asys)
test_ValueError(s1, bad_state)
def test___str__():
"""Test str(State)"""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
s1 = State(asys, {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']})
s_empty = State(AttributeSystem(AttributeStructure(), []))
assert s_empty.__str__() == ""
assert s.__str__() == "color(s1): {V(B, G, R)}\ncolor(s2): {V(B, G, R)}\nsize(s1): {V(L, M, S)}\nsize(s2): {V(L, M, S)}"
assert s1.__str__() == "color(s1): {V(R)}\ncolor(s2): {V(B, G)}\nsize(s1): {V(M)}\nsize(s2): {V(L, S)}"
def test___repr__():
"""Test repr(State)."""
color = Attribute("color", ['R', 'G', 'B'])
size = Attribute("size", ['S', 'M', 'L'])
a = AttributeStructure(color, size)
o = ['s1', 's2']
asys = AttributeSystem(a, o)
s = State(asys)
s1 = State(asys, {
('color', 's1'): ['R'],
('color', 's2'): ['B', 'G'],
('size', 's1'): ['M'],
('size', 's2'): ['L', 'S']})
s_empty = State(AttributeSystem(AttributeStructure(), []))
assert s_empty.__repr__() == ""
assert s.__repr__() == "color(s1): {V(B, G, R)}\ncolor(s2): {V(B, G, R)}\nsize(s1): {V(L, M, S)}\nsize(s2): {V(L, M, S)}"
assert s1.__repr__() == "color(s1): {V(R)}\ncolor(s2): {V(B, G)}\nsize(s1): {V(M)}\nsize(s2): {V(L, S)}"
|
|
# -*- coding: utf-8 -*-
from cStringIO import StringIO
from struct import pack
from struct import unpack
from datetime import datetime
import time
def _calcCRC(crc, byte):
table = [0x0000, 0xCC01, 0xD801, 0x1400, 0xF001, 0x3C00, 0x2800, 0xE401,
0xA001, 0x6C00, 0x7800, 0xB401, 0x5000, 0x9C01, 0x8801, 0x4400]
# compute checksum of lower four bits of byte
tmp = table[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ table[byte & 0xF]
# now compute checksum of upper four bits of byte
tmp = table[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ table[(byte >> 4) & 0xF]
return crc
class FitBaseType(object):
"""BaseType Definition
see FIT Protocol Document(Page.20)"""
enum = {'#': 0, 'endian': 0, 'field': 0x00, 'name': 'enum', 'invalid': 0xFF, 'size': 1}
sint8 = {'#': 1, 'endian': 0, 'field': 0x01, 'name': 'sint8', 'invalid': 0x7F, 'size': 1}
uint8 = {'#': 2, 'endian': 0, 'field': 0x02, 'name': 'uint8', 'invalid': 0xFF, 'size': 1}
sint16 = {'#': 3, 'endian': 1, 'field': 0x83, 'name': 'sint16', 'invalid': 0x7FFF, 'size': 2}
uint16 = {'#': 4, 'endian': 1, 'field': 0x84, 'name': 'uint16', 'invalid': 0xFFFF, 'size': 2}
sint32 = {'#': 5, 'endian': 1, 'field': 0x85, 'name': 'sint32', 'invalid': 0x7FFFFFFF, 'size': 4}
uint32 = {'#': 6, 'endian': 1, 'field': 0x86, 'name': 'uint32', 'invalid': 0xFFFFFFFF, 'size': 4}
string = {'#': 7, 'endian': 0, 'field': 0x07, 'name': 'string', 'invalid': 0x00, 'size': 1}
float32 = {'#': 8, 'endian': 1, 'field': 0x88, 'name': 'float32', 'invalid': 0xFFFFFFFF, 'size': 2}
float64 = {'#': 9, 'endian': 1, 'field': 0x89, 'name': 'float64', 'invalid': 0xFFFFFFFFFFFFFFFF, 'size': 4}
uint8z = {'#': 10, 'endian': 0, 'field': 0x0A, 'name': 'uint8z', 'invalid': 0x00, 'size': 1}
uint16z = {'#': 11, 'endian': 1, 'field': 0x8B, 'name': 'uint16z', 'invalid': 0x0000, 'size': 2}
uint32z = {'#': 12, 'endian': 1, 'field': 0x8C, 'name': 'uint32z', 'invalid': 0x00000000, 'size': 4}
byte = {'#': 13, 'endian': 0, 'field': 0x0D, 'name': 'byte', 'invalid': 0xFF, 'size': 1} # array of byte, field is invalid if all bytes are invalid
@staticmethod
def get_format(basetype):
formats = {
0: 'B', 1: 'b', 2: 'B', 3: 'h', 4: 'H', 5: 'i', 6: 'I', 7: 's', 8: 'f',
9: 'd', 10: 'B', 11: 'H', 12: 'I', 13: 'c',
}
return formats[basetype['#']]
@staticmethod
def pack(basetype, value):
"""function to avoid DeprecationWarning"""
if basetype['#'] in (1,2,3,4,5,6,10,11,12):
value = int(value)
fmt = FitBaseType.get_format(basetype)
return pack(fmt, value)
class Fit(object):
HEADER_SIZE = 12
GMSG_NUMS = {
'file_id': 0,
'device_info': 23,
'weight_scale': 30,
'file_creator': 49,
}
class FitEncoder(Fit):
def timestamp(self, t):
"""the timestamp in fit protocol is seconds since
UTC 00:00 Dec 31 1989 (631065600)"""
if isinstance(t, datetime):
t = time.mktime(t.timetuple())
return t - 631065600
class FitEncoder_Weight(FitEncoder):
FILE_TYPE = 9
LMSG_TYPE_FILE_INFO = 0
LMSG_TYPE_FILE_CREATOR = 1
LMSG_TYPE_DEVICE_INFO = 2
LMSG_TYPE_WEIGHT_SCALE = 3
def __init__(self):
self.buf = StringIO()
self.write_header() # create header first
self.device_info_defined = False
self.weight_scale_defined = False
def __str__(self):
orig_pos = self.buf.tell()
self.buf.seek(0)
lines = []
while True:
b = self.buf.read(16)
if not b:
break
lines.append(' '.join(['%02x' % ord(c) for c in b]))
self.buf.seek(orig_pos)
return '\n'.join(lines)
def write_header(self, header_size=Fit.HEADER_SIZE,
protocol_version=16,
profile_version=108,
data_size=0,
data_type='.FIT'):
self.buf.seek(0)
s = pack('BBHI4s', header_size, protocol_version, profile_version, data_size, data_type)
self.buf.write(s)
def _build_content_block(self, content):
field_defs = []
values = []
for num, basetype, value, scale in content:
s = pack('BBB', num, basetype['size'], basetype['field'])
field_defs.append(s)
if value is None:
# invalid value
value = basetype['invalid']
elif scale is not None:
value *= scale
values.append(FitBaseType.pack(basetype, value))
return (''.join(field_defs), ''.join(values))
def write_file_info(self, serial_number=None, time_created=None, manufacturer=None, product=None, number=None):
if time_created is None:
time_created = datetime.now()
content = [
(3, FitBaseType.uint32z, serial_number, None),
(4, FitBaseType.uint32, self.timestamp(time_created), None),
(1, FitBaseType.uint16, manufacturer, None),
(2, FitBaseType.uint16, product, None),
(5, FitBaseType.uint16, number, None),
(0, FitBaseType.enum, self.FILE_TYPE, None), # type
]
fields, values = self._build_content_block(content)
# create fixed content
msg_number = self.GMSG_NUMS['file_id']
fixed_content = pack('BBHB', 0, 0, msg_number, len(content)) # reserved, architecture(0: little endian)
self.buf.write(''.join([
# definition
self.record_header(definition=True, lmsg_type=self.LMSG_TYPE_FILE_INFO),
fixed_content,
fields,
#record
self.record_header(lmsg_type=self.LMSG_TYPE_FILE_INFO),
values,
]))
def write_file_creator(self, software_version=None, hardware_version=None):
content = [
(0, FitBaseType.uint16, software_version, None),
(1, FitBaseType.uint8, hardware_version, None),
]
fields, values = self._build_content_block(content)
msg_number = self.GMSG_NUMS['file_creator']
fixed_content = pack('BBHB', 0, 0, msg_number, len(content)) # reserved, architecture(0: little endian)
self.buf.write(''.join([
# definition
self.record_header(definition=True, lmsg_type=self.LMSG_TYPE_FILE_CREATOR),
fixed_content,
fields,
#record
self.record_header(lmsg_type=self.LMSG_TYPE_FILE_CREATOR),
values,
]))
def write_device_info(self, timestamp, serial_number=None, cum_operationg_time=None, manufacturer=None,
product=None, software_version=None, battery_voltage=None, device_index=None,
device_type=None, hardware_version=None, battery_status=None):
content = [
(253, FitBaseType.uint32, self.timestamp(timestamp), 1),
(3, FitBaseType.uint32z, serial_number, 1),
(7, FitBaseType.uint32, cum_operationg_time, 1),
(8, FitBaseType.uint32, None, None), # unknown field(undocumented)
(2, FitBaseType.uint16, manufacturer, 1),
(4, FitBaseType.uint16, product, 1),
(5, FitBaseType.uint16, software_version, 100),
(10, FitBaseType.uint16, battery_voltage, 256),
(0, FitBaseType.uint8, device_index, 1),
(1, FitBaseType.uint8, device_type, 1),
(6, FitBaseType.uint8, hardware_version, 1),
(11, FitBaseType.uint8, battery_status, None),
]
fields, values = self._build_content_block(content)
if not self.device_info_defined:
header = self.record_header(definition=True, lmsg_type=self.LMSG_TYPE_DEVICE_INFO)
msg_number = self.GMSG_NUMS['device_info']
fixed_content = pack('BBHB', 0, 0, msg_number, len(content)) # reserved, architecture(0: little endian)
self.buf.write(header + fixed_content + fields)
self.device_info_defined = True
header = self.record_header(lmsg_type=self.LMSG_TYPE_DEVICE_INFO)
self.buf.write(header + values)
def write_weight_scale(self, timestamp, weight, percent_fat=None, percent_hydration=None,
visceral_fat_mass=None, bone_mass=None, muscle_mass=None, basal_met=None,
active_met=None, physique_rating=None, metabolic_age=None, visceral_fat_rating=None):
content = [
(253, FitBaseType.uint32, self.timestamp(timestamp), 1),
(0, FitBaseType.uint16, weight, 100),
(1, FitBaseType.uint16, percent_fat, 100),
(2, FitBaseType.uint16, percent_hydration, 100),
(3, FitBaseType.uint16, visceral_fat_mass, 100),
(4, FitBaseType.uint16, bone_mass, 100),
(5, FitBaseType.uint16, muscle_mass, 100),
(7, FitBaseType.uint16, basal_met, 4),
(9, FitBaseType.uint16, active_met, 4),
(8, FitBaseType.uint8, physique_rating, 1),
(10, FitBaseType.uint8, metabolic_age, 1),
(11, FitBaseType.uint8, visceral_fat_rating, 1),
]
fields, values = self._build_content_block(content)
if not self.weight_scale_defined:
header = self.record_header(definition=True, lmsg_type=self.LMSG_TYPE_WEIGHT_SCALE)
msg_number = self.GMSG_NUMS['weight_scale']
fixed_content = pack('BBHB', 0, 0, msg_number, len(content)) # reserved, architecture(0: little endian)
self.buf.write(header + fixed_content + fields)
self.weight_scale_defined = True
header = self.record_header(lmsg_type=self.LMSG_TYPE_WEIGHT_SCALE)
self.buf.write(header + values)
def record_header(self, definition=False, lmsg_type=0):
msg = 0
if definition:
msg = 1 << 6 # 6th bit is a definition message
return pack('B', msg + lmsg_type)
def crc(self):
orig_pos = self.buf.tell()
self.buf.seek(0)
crc = 0
while(True):
b = self.buf.read(1)
if not b:
break
crc = _calcCRC(crc, unpack('b', b)[0])
self.buf.seek(orig_pos)
return pack('H', crc)
def finish(self):
"""re-weite file-header, then append crc to end of file"""
data_size = self.get_size() - self.HEADER_SIZE
self.write_header(data_size=data_size)
crc = self.crc()
self.buf.seek(0, 2)
self.buf.write(crc)
def get_size(self):
orig_pos = self.buf.tell()
self.buf.seek(0, 2)
size = self.buf.tell()
self.buf.seek(orig_pos)
return size
def getvalue(self):
return self.buf.getvalue()
|
|
"""
Copyright (c) 2015, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import os
import json
import types
from textwrap import dedent
import yaml
from atomic_reactor.plugins.input_osv3 import (
OSv3InputPlugin,
OSv3SourceContainerInputPlugin,
)
from osbs.api import OSBS
from osbs.constants import USER_PARAMS_KIND_SOURCE_CONTAINER_BUILDS
from osbs.exceptions import OsbsValidationException
from tests.constants import REACTOR_CONFIG_MAP
from atomic_reactor.constants import (PLUGIN_BUMP_RELEASE_KEY,
PLUGIN_DISTGIT_FETCH_KEY,
PLUGIN_FETCH_MAVEN_KEY,
PLUGIN_INJECT_PARENT_IMAGE_KEY,
PLUGIN_KOJI_IMPORT_PLUGIN_KEY,
PLUGIN_KOJI_PARENT_KEY,
PLUGIN_KOJI_PROMOTE_PLUGIN_KEY,
PLUGIN_KOJI_TAG_BUILD_KEY,
PLUGIN_KOJI_UPLOAD_PLUGIN_KEY,
PLUGIN_KOJI_DELEGATE_KEY,
PLUGIN_RESOLVE_COMPOSES_KEY,
PLUGIN_SENDMAIL_KEY)
import pytest
from flexmock import flexmock
def enable_plugins_configuration(plugins_json):
# flexmock won't mock a non-existent method, so add it if necessary
try:
getattr(OSBS, 'render_plugins_configuration')
except AttributeError:
setattr(OSBS, 'render_plugins_configuration',
types.MethodType(lambda x: x, 'render_plugins_configuration'))
(flexmock(OSBS)
.should_receive('render_plugins_configuration')
.and_return(json.dumps(plugins_json)))
class TestOSv3InputPlugin(object):
"""Tests for OSv3InputPlugin"""
def test_does_fail_if_no_plugins(self):
mock_env = {
'BUILD': '{}',
'SOURCE_URI': 'https://github.com/foo/bar.git',
'SOURCE_REF': 'master',
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
}
flexmock(os, environ=mock_env)
plugin = OSv3InputPlugin()
with pytest.raises(RuntimeError) as exc:
plugin.run()
assert 'No plugin configuration found!' in str(exc.value)
@pytest.mark.parametrize('build, expected', [
('{"metadata": {"selfLink": "/foo/bar"}}', '/foo/bar'),
('{"metadata": {}}', None),
('{}', None),
])
@pytest.mark.parametrize(('plugins_variable', 'valid'), [
('USER_PARAMS', True),
('DOCK_PLUGINS', False),
])
def test_plugins_variable_and_selflink(self, build, expected, plugins_variable, valid):
plugins_json = {
'postbuild_plugins': [],
}
mock_env = {
'BUILD': build,
'SOURCE_URI': 'https://github.com/foo/bar.git',
'SOURCE_REF': 'master',
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
plugins_variable: json.dumps(plugins_json),
}
if plugins_variable == 'USER_PARAMS':
mock_env['REACTOR_CONFIG'] = REACTOR_CONFIG_MAP
enable_plugins_configuration(plugins_json)
mock_env.update({
plugins_variable: json.dumps({
'build_json_dir': 'inputs',
'build_type': 'orchestrator',
'git_ref': 'test',
'git_uri': 'test',
'user': 'user'
}),
})
flexmock(os, environ=mock_env)
plugin = OSv3InputPlugin()
if valid:
assert plugin.run()['postbuild_plugins'] is not None
assert plugin.run()['openshift_build_selflink'] == expected
else:
with pytest.raises(RuntimeError):
plugin.run()
def test_remove_everything(self):
plugins_json = {
'build_json_dir': 'inputs',
'build_type': 'orchestrator',
'git_ref': 'test',
'git_uri': 'test',
'user': 'user',
'prebuild_plugins': [
{'name': 'before', },
{'name': PLUGIN_BUMP_RELEASE_KEY, },
{'name': PLUGIN_KOJI_DELEGATE_KEY, },
{'name': PLUGIN_FETCH_MAVEN_KEY, },
{'name': PLUGIN_DISTGIT_FETCH_KEY, },
{'name': PLUGIN_INJECT_PARENT_IMAGE_KEY, },
{'name': PLUGIN_KOJI_PARENT_KEY, },
{'name': PLUGIN_RESOLVE_COMPOSES_KEY, },
{'name': 'after', },
],
'postbuild_plugins': [
{'name': 'before', },
{'name': PLUGIN_KOJI_UPLOAD_PLUGIN_KEY, },
{'name': 'after', },
],
'exit_plugins': [
{'name': 'before', },
{'name': PLUGIN_KOJI_IMPORT_PLUGIN_KEY, },
{'name': PLUGIN_KOJI_PROMOTE_PLUGIN_KEY, },
{'name': PLUGIN_KOJI_TAG_BUILD_KEY, },
{'name': PLUGIN_SENDMAIL_KEY, },
{'name': 'after', },
]
}
minimal_config = dedent("""\
version: 1
koji:
hub_url: ''
root_url: ''
auth: {}
""")
mock_env = {
'BUILD': '{}',
'SOURCE_URI': 'https://github.com/foo/bar.git',
'SOURCE_REF': 'master',
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
'USER_PARAMS': json.dumps(plugins_json),
'REACTOR_CONFIG': minimal_config
}
flexmock(os, environ=mock_env)
enable_plugins_configuration(plugins_json)
plugin = OSv3InputPlugin()
plugins = plugin.run()
for phase in ('prebuild_plugins', 'postbuild_plugins', 'exit_plugins'):
assert plugins[phase] == [
{'name': 'before', },
{'name': 'after', },
]
@pytest.mark.parametrize(('override', 'valid'), [
('invalid_override', False),
({
'version': 1,
'koji': {
'hub_url': '',
'root_url': '',
'auth': {}
}
}, True),
(None, True),
])
@pytest.mark.parametrize('buildtype', [
'worker', 'orchestrator'
])
def test_validate_reactor_config_override(self, override, valid, buildtype):
plugins_json = {
'postbuild_plugins': [],
}
user_params = {
'build_json_dir': 'inputs',
'build_type': buildtype,
'git_ref': 'test',
'git_uri': 'test',
'user': 'user',
'reactor_config_map': REACTOR_CONFIG_MAP,
}
if override:
user_params['reactor_config_override'] = override
mock_env = {
'BUILD': '{}',
'SOURCE_URI': 'https://github.com/foo/bar.git',
'SOURCE_REF': 'master',
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
'REACTOR_CONFIG': REACTOR_CONFIG_MAP,
'USER_PARAMS': json.dumps(user_params)
}
enable_plugins_configuration(plugins_json)
flexmock(os, environ=mock_env)
plugin = OSv3InputPlugin()
if valid:
plugin.run()
else:
with pytest.raises(OsbsValidationException):
plugin.run()
@pytest.mark.parametrize(('arrangement_version', 'valid'), [
(1, False),
(2, False),
(3, False),
(4, False),
(5, False),
(6, True),
])
@pytest.mark.parametrize('buildtype', [
'worker', 'orchestrator'
])
def test_arrangement_version(self, arrangement_version, valid, buildtype):
plugins_json = {
'postbuild_plugins': [],
}
user_params = {
'arrangement_version': arrangement_version,
'build_json_dir': 'inputs',
'build_type': buildtype,
'git_ref': 'test',
'git_uri': 'test',
'user': 'user',
'reactor_config_map': REACTOR_CONFIG_MAP,
}
mock_env = {
'BUILD': '{}',
'SOURCE_URI': 'https://github.com/foo/bar.git',
'SOURCE_REF': 'master',
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
'REACTOR_CONFIG': REACTOR_CONFIG_MAP,
'USER_PARAMS': json.dumps(user_params)
}
enable_plugins_configuration(plugins_json)
flexmock(os, environ=mock_env)
plugin = OSv3InputPlugin()
if valid:
plugin.run()
else:
with pytest.raises(ValueError):
plugin.run()
class TestOSv3SourceContainerInputPlugin(object):
"""Tests for OSv3SourceContainerInputPlugin class"""
@property
def user_params(self):
return {
'build_json_dir': '/usr/share/osbs/',
'kind': USER_PARAMS_KIND_SOURCE_CONTAINER_BUILDS,
'reactor_config_map': REACTOR_CONFIG_MAP,
'sources_for_koji_build_nvr': 'test-1-123',
'user': 'user',
}
@pytest.mark.parametrize('build, expected', [
('{"metadata": {"selfLink": "/foo/bar"}}', '/foo/bar'),
('{"metadata": {}}', None),
('{}', None),
])
def test_sets_selflink(self, build, expected):
mock_env = {
'BUILD': build,
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
'REACTOR_CONFIG': REACTOR_CONFIG_MAP,
'USER_PARAMS': json.dumps(self.user_params),
}
flexmock(os, environ=mock_env)
plugin = OSv3SourceContainerInputPlugin()
assert plugin.run()['openshift_build_selflink'] == expected
@pytest.mark.parametrize(('override', 'valid'), [
('invalid_override', False),
({
'version': 1,
'koji': {
'hub_url': '',
'root_url': '',
'auth': {}
}
}, True),
(None, True),
])
def test_validate_reactor_config_override(self, override, valid):
plugins_json = {
'postbuild_plugins': [],
}
user_params = self.user_params
if override:
user_params['reactor_config_override'] = override
mock_env = {
'BUILD': '{}',
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
'REACTOR_CONFIG': REACTOR_CONFIG_MAP,
'USER_PARAMS': json.dumps(user_params),
}
enable_plugins_configuration(plugins_json)
flexmock(os, environ=mock_env)
plugin = OSv3SourceContainerInputPlugin()
if valid:
plugin.run()
else:
with pytest.raises(OsbsValidationException):
plugin.run()
@pytest.mark.parametrize(('arrangement_version', 'valid'), [
(1, False),
(2, False),
(3, False),
(4, False),
(5, False),
(6, True),
])
def test_arrangement_version(self, arrangement_version, valid):
plugins_json = {
'postbuild_plugins': [],
}
user_params = self.user_params
user_params['arrangement_version'] = arrangement_version
mock_env = {
'BUILD': '{}',
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
'REACTOR_CONFIG': REACTOR_CONFIG_MAP,
'USER_PARAMS': json.dumps(user_params),
}
enable_plugins_configuration(plugins_json)
flexmock(os, environ=mock_env)
plugin = OSv3SourceContainerInputPlugin()
if valid:
plugin.run()
else:
with pytest.raises(ValueError):
plugin.run()
@pytest.mark.parametrize('mock_env,expected', [
({}, False),
({'USER_PARAMS': '{}'}, False),
({'USER_PARAMS': '{"kind":"random_something"}'}, False),
({'USER_PARAMS': '{"kind":"source_containers_user_params"}'}, True)
])
def test_is_autousable(self, mock_env, expected):
flexmock(os, environ=mock_env)
assert OSv3SourceContainerInputPlugin.is_autousable() == expected
def test_fail_without_koji(self):
plugins_json = {
'postbuild_plugins': [],
}
# remove koji from config
reactor_config_map = yaml.safe_load(REACTOR_CONFIG_MAP)
del reactor_config_map['koji']
no_koji_config_map = yaml.dump(reactor_config_map)
user_params = self.user_params
mock_env = {
'BUILD': '{}',
'OUTPUT_IMAGE': 'asdf:fdsa',
'OUTPUT_REGISTRY': 'localhost:5000',
'REACTOR_CONFIG': no_koji_config_map,
'USER_PARAMS': json.dumps(user_params),
}
enable_plugins_configuration(plugins_json)
flexmock(os, environ=mock_env)
plugin = OSv3SourceContainerInputPlugin()
with pytest.raises(OsbsValidationException) as exc_info:
plugin.run()
assert ("validating 'required' has failed "
"(%r is a required property)" % u'koji') in str(exc_info.value)
|
|
#!/usr/bin/env python
import getpass
import optparse
import os
import struct
import sys
import unittest
from hashlib import sha256
from random import randrange
PY3 = sys.version_info[0] == 3
if PY3:
import builtins
print_ = getattr(builtins, 'print')
raw_input = getattr(builtins, 'input')
unicode_type = str
# PyCrypto uses time.clock internally, which was removed in 3.8. We'll just
# patch it in for now.
if sys.version_info >= (3, 8, 0):
import time
time.clock = time.process_time
else:
unicode_type = unicode
def print_(s):
sys.stdout.write(s)
sys.stdout.write('\n')
from io import BytesIO
from Crypto.Cipher import AES
from Crypto.Cipher import Blowfish
from Crypto import Random
CIPHER_BLOWFISH = 1
CIPHER_AES = 2
def _gen_padding(file_size, block_size):
pad_bytes = block_size - (file_size % block_size)
padding = Random.get_random_bytes(pad_bytes - 1)
bflag = randrange(block_size - 2, 256 - block_size)
bflag -= bflag % block_size - pad_bytes
return padding + chr(bflag).encode('raw_unicode_escape')
def _read_padding(buffer, block_size):
return (buffer[-1] % block_size) or block_size
def generate_iv(block_size):
return Random.get_random_bytes(block_size)
def get_blowfish_cipher(key, iv):
return Blowfish.new(key, Blowfish.MODE_CBC, iv)
def get_aes_cipher(key, iv):
if isinstance(key, unicode_type):
key = key.encode('utf-8')
iv_length = AES.block_size # 16.
key_length = 32
key_iv_length = iv_length + key_length
d = d_i = b''
while len(d) < key_iv_length:
d_i = sha256(d_i + key).digest()
d += d_i[:16]
new_key = d[:key_length]
new_iv = d[key_length:key_iv_length]
return AES.new(new_key, AES.MODE_CBC, new_iv)
CIPHER_MAP = {
CIPHER_BLOWFISH: (get_blowfish_cipher, Blowfish.block_size),
CIPHER_AES: (get_aes_cipher, AES.block_size),
}
def encrypt(in_buf, out_buf, key, chunk_size=4096,
cipher_type=CIPHER_BLOWFISH):
get_cipher, block_size = CIPHER_MAP[cipher_type]
iv = generate_iv(block_size)
cipher = get_cipher(key, iv)
bytes_read = 0
wrote_padding = False
out_buf.write(iv)
while 1:
buffer = in_buf.read(chunk_size)
buffer_len = len(buffer)
bytes_read += buffer_len
if buffer:
if buffer_len < chunk_size:
buffer += _gen_padding(bytes_read, block_size)
wrote_padding = True
out_buf.write(cipher.encrypt(buffer))
else:
if not wrote_padding:
padding = _gen_padding(bytes_read, block_size)
out_buf.write(cipher.encrypt(padding))
break
def decrypt(in_buf, out_buf, key, chunk_size=4096,
cipher_type=CIPHER_BLOWFISH):
get_cipher, block_size = CIPHER_MAP[cipher_type]
iv = in_buf.read(block_size)
cipher = get_cipher(key, iv)
decrypted = ''
while 1:
buffer = in_buf.read(chunk_size)
if buffer:
decrypted = cipher.decrypt(buffer)
out_buf.write(decrypted)
else:
break
if decrypted:
padding = _read_padding(decrypted, block_size)
out_buf.seek(-padding, 2)
out_buf.truncate()
def encrypt_file(in_file, out_file, key, chunk_size=4096,
cipher_type=CIPHER_BLOWFISH):
with open(in_file, 'rb') as in_fh:
with open(out_file, 'wb') as out_fh:
encrypt(in_fh, out_fh, key, chunk_size, cipher_type)
def decrypt_file(in_file, out_file, key, chunk_size=4096,
cipher_type=CIPHER_BLOWFISH):
with open(in_file, 'rb') as in_fh:
with open(out_file, 'wb') as out_fh:
decrypt(in_fh, out_fh, key, chunk_size, cipher_type)
class TestEncryptDecrypt(unittest.TestCase):
cipher_type = CIPHER_BLOWFISH
def setUp(self):
self.in_filename = '/tmp/crypt.tmp.in'
self.out_filename = '/tmp/crypt.tmp.out'
self.dec_filename = '/tmp/crypt.tmp.dec'
self.key = 'testkey'
def tearDown(self):
self.remove_files(
self.in_filename,
self.out_filename,
self.dec_filename,
)
def remove_files(self, *filenames):
for fn in filenames:
if os.path.exists(fn):
os.unlink(fn)
def write_bytes(self, num, ch=b'a'):
buf = ch * num
with open(self.in_filename, 'wb') as fh:
fh.write(buf)
return buf
def crypt_data(self, num_bytes, ch, in_key=None, out_key=None, chunk_size=4096):
in_key = in_key or self.key
out_key = out_key or self.key
buf = self.write_bytes(num_bytes, ch)
encrypt_file(self.in_filename, self.out_filename, in_key, chunk_size,
self.cipher_type)
decrypt_file(self.out_filename, self.dec_filename, out_key, chunk_size,
self.cipher_type)
with open(self.dec_filename, 'rb') as fh:
decrypted = fh.read()
return buf, decrypted
def test_encrypt_decrypt(self):
def encrypt_flow(ch):
for i in range(33):
buf, decrypted = self.crypt_data(i, ch)
self.assertEqual(buf, decrypted)
encrypt_flow(b'a')
encrypt_flow(b'\x00')
encrypt_flow(b'\x01')
encrypt_flow(b'\xff')
def test_key(self):
buf, decrypted = self.crypt_data(128, b'a', self.key, self.key+'x')
self.assertNotEqual(buf, decrypted)
def test_chunk_sizes(self):
for i in [128, 1024, 2048, 4096]:
nb = [i - 1, i, i + 1, i * 2, i * 2 + 1]
for num_bytes in nb:
buf, decrypted = self.crypt_data(num_bytes, b'a', chunk_size=i)
self.assertEqual(buf, decrypted)
def test_stringio(self):
for i in [128, 1024, 2048, 4096]:
nb = [i - 1, i, i + 1, i * 2, i * 2 + 1]
for num_bytes in nb:
in_buf = BytesIO()
out_buf = BytesIO()
dec_buf = BytesIO()
in_buf.write(num_bytes * b'a')
in_buf.seek(0)
encrypt(in_buf, out_buf, self.key, i, self.cipher_type)
out_buf.seek(0)
decrypt(out_buf, dec_buf, self.key, i, self.cipher_type)
self.assertEqual(in_buf.getvalue(), dec_buf.getvalue())
def test_cipher_stability(self):
get_cipher, block_size = CIPHER_MAP[self.cipher_type]
make_cipher = lambda: get_cipher(b'passphrase', b'\x00' * block_size)
# Test that the same passphrase and IV yield same ciphertext.
data = 'a' * block_size * 4
crypt_data1 = make_cipher().encrypt(data)
crypt_data2 = make_cipher().encrypt(data)
self.assertEqual(crypt_data1, crypt_data2)
class TestEncryptDecryptAES(TestEncryptDecrypt):
cipher_type = CIPHER_AES
if __name__ == '__main__':
parser = optparse.OptionParser(usage='%prog [-e|-d] INFILE OUTFILE')
parser.add_option('-e', '--encrypt', dest='encrypt', action='store_true')
parser.add_option('-d', '--decrypt', dest='decrypt', action='store_true')
parser.add_option('-k', '--key', dest='key', action='store', type='str')
parser.add_option('-a', '--aes', dest='aes', action='store_true',
help='Use AES256 cipher (default is blowfish).')
parser.add_option('-t', '--test', dest='run_tests', action='store_true')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true',
help='test output verbosity (when running with -t)')
(options, args) = parser.parse_args()
if options.run_tests:
unittest.main(argv=sys.argv[:1], verbosity=not options.quiet and 2 or 0)
if len(args) == 1:
if options.aes and args[0].endswith('.e'):
print('AES selected, but appears to use blowfish extension.')
if raw_input('Use blowfish instead? (Yn) ') != 'n':
options.aes = False
elif not options.aes and args[0].endswith('.ae'):
print('AES not selected, but appears to use AES extension.')
if raw_input('Use AES instead? (Yn) ') != 'n':
options.aes = True
ext = '.ae' if options.aes else '.e'
if options.encrypt:
default = '%s%s' % (args[0], ext)
else:
default = args[0].rstrip(ext)
args.append(raw_input('Destination? (%s) ' % default) or default)
if len(args) < 2 or not (options.encrypt or options.decrypt):
parser.print_help()
sys.exit(1)
if not options.key:
while 1:
key = getpass.getpass('Key: ')
verify = getpass.getpass('Verify: ')
if key == verify:
break
else:
print_('Keys did not match')
else:
key = options.key
infile, outfile = args[0], args[1]
if os.path.exists(outfile):
print_('%s will be overwritten' % outfile)
if raw_input('Continue? yN ') != 'y':
sys.exit(2)
cipher_type = CIPHER_AES if options.aes else CIPHER_BLOWFISH
if options.encrypt:
encrypt_file(infile, outfile, key, cipher_type=cipher_type)
else:
decrypt_file(infile, outfile, key, cipher_type=cipher_type)
|
|
from numba import jit, jitclass, float64
import numpy as np
@jit(nopython=True)
def _B1_LT_085(x):
# B1.LT_085: lt(0.85)
if 0.0 == 0.0:
return 1.0 if x < 0.85 else 0.0
x1 = 0.85 - 0.0
x2 = 0.85 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B1_GT_1(x):
# B1.GT_1: gt(1.0)
if 0.0 == 0.0:
return 1.0 if x > 1.0 else 0.0
x1 = 1.0 - 0.0
x2 = 1.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B2_GT_0(x):
# B2.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B3_LT_005(x):
# B3.LT_005: lt(0.05)
if 0.0 == 0.0:
return 1.0 if x < 0.05 else 0.0
x1 = 0.05 - 0.0
x2 = 0.05 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_01(x):
# B3.LT_01: lt(0.1)
if 0.0 == 0.0:
return 1.0 if x < 0.1 else 0.0
x1 = 0.1 - 0.0
x2 = 0.1 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_015(x):
# B3.LT_015: lt(0.15)
if 0.0 == 0.0:
return 1.0 if x < 0.15 else 0.0
x1 = 0.15 - 0.0
x2 = 0.15 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B3_LT_02(x):
# B3.LT_02: lt(0.2)
if 0.0 == 0.0:
return 1.0 if x < 0.2 else 0.0
x1 = 0.2 - 0.0
x2 = 0.2 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B4_NODATA(x):
# B4.NODATA: eq(0.0)
if 0.0 == 0.0:
return 1.0 if x == 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0
x3 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
if x <= x3:
return 1.0 - (x - x2) / (x3 - x2)
return 0.0
@jit(nopython=True)
def _B5_LT_01(x):
# B5.LT_01: lt(0.1)
if 0.0 == 0.0:
return 1.0 if x < 0.1 else 0.0
x1 = 0.1 - 0.0
x2 = 0.1 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B7_LT_05(x):
# B7.LT_05: lt(0.5)
if 0.0 == 0.0:
return 1.0 if x < 0.5 else 0.0
x1 = 0.5 - 0.0
x2 = 0.5 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B8_GT_0(x):
# B8.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_LT_009(x):
# B8.LT_009: lt(0.09)
if 0.0 == 0.0:
return 1.0 if x < 0.09 else 0.0
x1 = 0.09 - 0.0
x2 = 0.09 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B8_GT_033(x):
# B8.GT_033: gt(0.33)
if 0.0 == 0.0:
return 1.0 if x > 0.33 else 0.0
x1 = 0.33 - 0.0
x2 = 0.33 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_035(x):
# B8.GT_035: gt(0.35)
if 0.0 == 0.0:
return 1.0 if x > 0.35 else 0.0
x1 = 0.35 - 0.0
x2 = 0.35 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_04(x):
# B8.GT_04: gt(0.4)
if 0.0 == 0.0:
return 1.0 if x > 0.4 else 0.0
x1 = 0.4 - 0.0
x2 = 0.4 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_GT_045(x):
# B8.GT_045: gt(0.45)
if 0.0 == 0.0:
return 1.0 if x > 0.45 else 0.0
x1 = 0.45 - 0.0
x2 = 0.45 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B8_LT_085(x):
# B8.LT_085: lt(0.85)
if 0.0 == 0.0:
return 1.0 if x < 0.85 else 0.0
x1 = 0.85 - 0.0
x2 = 0.85 + 0.0
if x <= x1:
return 1.0
if x <= x2:
return 1.0 - (x - x1) / (x2 - x1)
return 0.0
@jit(nopython=True)
def _B16_GT_0(x):
# B16.GT_0: gt(0.0)
if 0.0 == 0.0:
return 1.0 if x > 0.0 else 0.0
x1 = 0.0 - 0.0
x2 = 0.0 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _B19_GT_015(x):
# B19.GT_015: gt(0.15)
if 0.0 == 0.0:
return 1.0 if x > 0.15 else 0.0
x1 = 0.15 - 0.0
x2 = 0.15 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_011(x):
# BSum.GT_011: gt(0.11)
if 0.0 == 0.0:
return 1.0 if x > 0.11 else 0.0
x1 = 0.11 - 0.0
x2 = 0.11 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_013(x):
# BSum.GT_013: gt(0.13)
if 0.0 == 0.0:
return 1.0 if x > 0.13 else 0.0
x1 = 0.13 - 0.0
x2 = 0.13 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _BSum_GT_016(x):
# BSum.GT_016: gt(0.16)
if 0.0 == 0.0:
return 1.0 if x > 0.16 else 0.0
x1 = 0.16 - 0.0
x2 = 0.16 + 0.0
if x <= x1:
return 0.0
if x <= x2:
return (x - x1) / (x2 - x1)
return 1.0
@jit(nopython=True)
def _Class_FALSE(x):
# Class.FALSE: false()
return 0.0
@jit(nopython=True)
def _Class_TRUE(x):
# Class.TRUE: true()
return 1.0
_InputsSpec = [
("b1", float64[:]),
("b2", float64[:]),
("b3", float64[:]),
("b4", float64[:]),
("b5", float64[:]),
("b6", float64[:]),
("b7", float64[:]),
("b8", float64[:]),
("b12", float64[:]),
("b13", float64[:]),
("b14", float64[:]),
("b15", float64[:]),
("b16", float64[:]),
("b19", float64[:]),
("b100", float64[:]),
("bsum", float64[:]),
]
@jitclass(_InputsSpec)
class Inputs:
def __init__(self, size: int):
self.b1 = np.zeros(size, dtype=np.float64)
self.b2 = np.zeros(size, dtype=np.float64)
self.b3 = np.zeros(size, dtype=np.float64)
self.b4 = np.zeros(size, dtype=np.float64)
self.b5 = np.zeros(size, dtype=np.float64)
self.b6 = np.zeros(size, dtype=np.float64)
self.b7 = np.zeros(size, dtype=np.float64)
self.b8 = np.zeros(size, dtype=np.float64)
self.b12 = np.zeros(size, dtype=np.float64)
self.b13 = np.zeros(size, dtype=np.float64)
self.b14 = np.zeros(size, dtype=np.float64)
self.b15 = np.zeros(size, dtype=np.float64)
self.b16 = np.zeros(size, dtype=np.float64)
self.b19 = np.zeros(size, dtype=np.float64)
self.b100 = np.zeros(size, dtype=np.float64)
self.bsum = np.zeros(size, dtype=np.float64)
_OutputsSpec = [
("nodata", float64[:]),
("Wasser", float64[:]),
("Schill", float64[:]),
("Muschel", float64[:]),
("dense2", float64[:]),
("dense1", float64[:]),
("Strand", float64[:]),
("Sand", float64[:]),
("Misch", float64[:]),
("Misch2", float64[:]),
("Schlick", float64[:]),
("schlick_t", float64[:]),
("Wasser2", float64[:]),
]
@jitclass(_OutputsSpec)
class Outputs:
def __init__(self, size: int):
self.nodata = np.zeros(size, dtype=np.float64)
self.Wasser = np.zeros(size, dtype=np.float64)
self.Schill = np.zeros(size, dtype=np.float64)
self.Muschel = np.zeros(size, dtype=np.float64)
self.dense2 = np.zeros(size, dtype=np.float64)
self.dense1 = np.zeros(size, dtype=np.float64)
self.Strand = np.zeros(size, dtype=np.float64)
self.Sand = np.zeros(size, dtype=np.float64)
self.Misch = np.zeros(size, dtype=np.float64)
self.Misch2 = np.zeros(size, dtype=np.float64)
self.Schlick = np.zeros(size, dtype=np.float64)
self.schlick_t = np.zeros(size, dtype=np.float64)
self.Wasser2 = np.zeros(size, dtype=np.float64)
@jit(nopython=True)
def apply_rules(inputs: Inputs, outputs: Outputs):
for i in range(len(outputs.nodata)):
t0 = 1.0
# if b4 is NODATA:
t1 = min(t0, _B4_NODATA(inputs.b4[i]))
# nodata = TRUE
outputs.nodata[i] = t1
# elif (b8 is GT_033 and b1 is LT_085) or b8 is LT_009:
t1 = min(t0, 1.0 - t1)
t2 = min(t1, max(min(_B8_GT_033(inputs.b8[i]), _B1_LT_085(inputs.b1[i])), _B8_LT_009(inputs.b8[i])))
# if b5 is LT_01:
t3 = min(t2, _B5_LT_01(inputs.b5[i]))
# Wasser = TRUE
outputs.Wasser[i] = t3
# elif (b19 is GT_015 and (b8 is GT_04 and b8 is LT_085) and b7 is LT_05) or (b8 is GT_04 and bsum is GT_011) or (b8 is GT_035 and bsum is GT_016):
t3 = min(t2, 1.0 - t3)
t4 = min(t3, max(max(min(min(_B19_GT_015(inputs.b19[i]), min(_B8_GT_04(inputs.b8[i]), _B8_LT_085(inputs.b8[i]))), _B7_LT_05(inputs.b7[i])), min(_B8_GT_04(inputs.b8[i]), _BSum_GT_011(inputs.bsum[i]))), min(_B8_GT_035(inputs.b8[i]), _BSum_GT_016(inputs.bsum[i]))))
# if bsum is GT_013:
t5 = min(t4, _BSum_GT_013(inputs.bsum[i]))
# Schill = TRUE
outputs.Schill[i] = t5
# else:
t5 = min(t4, 1.0 - t5)
# Muschel = TRUE
outputs.Muschel[i] = t5
# elif b8 is GT_045:
t4 = min(t3, 1.0 - t4)
t5 = min(t4, _B8_GT_045(inputs.b8[i]))
# dense2 = TRUE
outputs.dense2[i] = t5
# else:
t5 = min(t4, 1.0 - t5)
# dense1 = TRUE
outputs.dense1[i] = t5
# elif b1 is GT_1:
t2 = min(t1, 1.0 - t2)
t3 = min(t2, _B1_GT_1(inputs.b1[i]))
# Strand = TRUE
outputs.Strand[i] = t3
# elif b3 is LT_005:
t3 = min(t2, 1.0 - t3)
t4 = min(t3, _B3_LT_005(inputs.b3[i]))
# Sand = TRUE
outputs.Sand[i] = t4
# elif b3 is LT_01 and b8 is GT_0:
t4 = min(t3, 1.0 - t4)
t5 = min(t4, min(_B3_LT_01(inputs.b3[i]), _B8_GT_0(inputs.b8[i])))
# Misch = TRUE
outputs.Misch[i] = t5
# elif b3 is LT_015 and b8 is GT_0:
t5 = min(t4, 1.0 - t5)
t6 = min(t5, min(_B3_LT_015(inputs.b3[i]), _B8_GT_0(inputs.b8[i])))
# Misch2 = TRUE
outputs.Misch2[i] = t6
# elif b3 is LT_02 and b2 is GT_0 and b8 is GT_0:
t6 = min(t5, 1.0 - t6)
t7 = min(t6, min(min(_B3_LT_02(inputs.b3[i]), _B2_GT_0(inputs.b2[i])), _B8_GT_0(inputs.b8[i])))
# Schlick = TRUE
outputs.Schlick[i] = t7
# elif b16 is GT_0 and b8 is GT_0:
t7 = min(t6, 1.0 - t7)
t8 = min(t7, min(_B16_GT_0(inputs.b16[i]), _B8_GT_0(inputs.b8[i])))
# schlick_t = TRUE
outputs.schlick_t[i] = t8
# else:
t8 = min(t7, 1.0 - t8)
# Wasser2 = TRUE
outputs.Wasser2[i] = t8
|
|
import copy
import math
import random
from scipy.spatial.distance import hamming as ham
class CHC:
def __init__(self, archivo):
f = open(archivo)
self.tam_crom = int(f.readline())
print ("Numero instancias: " + str(self.tam_crom))
self.tip = f.readline()
self.tip = self.tip.strip().split('\t')
print("Tipo de atributos ")
print(self.tip)
f.readline()
self.lines = f.readlines()
f.close()
self.crom_ini = []
for i in range(self.tam_crom):
self.crom_ini.append(1)
print ("Cromosoma inicial ")
print (self.crom_ini)
def init_P(self, t):
print("Poblacion Inicial ")
self.pob = {}
crom = []
self.tam = t
for i in range(self.tam):
for e in range(self.tam_crom):
crom.append(random.randrange(0,2))
self.pob[i] = crom
crom = []
print (self.pob)
return self.pob, self.tam_crom
#Evalua a la poblacion
def eval_P(self, p):
self.eval = {}
form = ""
for e in range(len(self.tip[:-1])):
if self.tip[e] == '0':
form += 'num' + '\t'
else:
form += 'attr' + '\t'
form += 'class'
#print("Formato: " + form)
c_i = 0
for ip in range(len(p)):
f_aux = open("aux.txt","w")
c_e = 0
v = p[c_i]
for line in self.lines:
if v[c_e] == 1:
f_aux.write(line)
c_e += 1
f_aux.close()
fit = Clasificador_Bayes("aux.txt", form)
r = fit.evaluar("T_V")
self.eval[c_i] = r
c_i += 1
if range(len(p) > 0):
f_aux.close()
print("Evaluaciones")
print (self.eval)
return self.eval
#Evalua cromosoma inicial
def eval_Crom(self):
self.eval = {}
form = ""
for e in range(len(self.tip[:-1])):
if self.tip[e] == '0':
form += 'num' + '\t'
else:
form += 'attr' + '\t'
form += 'class'
print("Formato: " + form)
f_aux = open("aux.txt","w")
for line in self.lines:
f_aux.write(line)
f_aux.close()
fit = Clasificador_Bayes("aux.txt", form)
fit.evaluar("T_V")
#HUX
def hux(self, p, u = 0):
if u == 0:
u = self.tam_crom/4
print ("Umbrar de apareamiento: " + str(u))
self.p_d = {}
aux = 0
for n in range(int(self.tam/2)):
p1 = p[random.randrange(0, len(p))]
p2 = p[random.randrange(0, len(p))]
if ( (ham(p1, p2) * self.tam_crom) > u):
m = (ham(p1, p2) * self.tam_crom) / 2
m = int(m)
h1 = p1
h2 = p2
while (m > 0):
bit_p = random.randrange(0, len(p1))
if (p1[bit_p] != p2[bit_p]) and (p1[bit_p] != h2[bit_p]):
# Crear decendiente de p1, p2
aux1 = p2[bit_p]
aux2 = p1[bit_p]
h1[bit_p] = aux1
h2[bit_p] = aux2
m -= 1
self.p_d[aux] = h1
self.p_d[aux + 1] = h2
aux += 1
print("Descendencia")
print (self.p_d)
return self.p_d
#Seleccion elitista
def sel_eti(self, p, h):
self.p_n = {}
print("Seleccion elitista")
punt_p = g.eval_P(p)
punt_h = g.eval_P(h)
pob_total = {}
l_aux = []
cont = 0
for key in p:
l_aux.append(p[key])
l_aux.append(punt_p[key])
pob_total[cont] = l_aux
l_aux = []
cont += 1
for key in h:
l_aux.append(h[key])
l_aux.append(punt_h[key])
pob_total[cont] = l_aux
l_aux = []
cont += 1
print("Poblacion total")
print(pob_total)
max = 0
key_d = None
for e in range(self.tam):
for key in pob_total:
if (pob_total[key][1] >= max):
max = pob_total[key][1]
key_d = key
self.p_n[e] = pob_total[key_d]
max = 0
del (pob_total[key_d])
print("Nueva poblacion")
p_nueva = {}
for key in self.p_n:
p_nueva[key] = self.p_n[key][0]
print (self.p_n)
#-----------
return p_nueva, self.p_n
#Reinicializacion
def reinicializacion(self, p, umb):
p_r = {}
max = 0
crom_max = []
print("Reinicializacion")
for key in p:
if p[key][1] > max:
max = p[key][1]
crom_max = p[key][0]
print ("Mejor Individuo(cromosoma)")
print (str(crom_max) + " , " + str(max))
porc = int(100 * float (umb) /float (self.tam_crom))
print ("::" + str(porc))
crom = copy.deepcopy(crom_max)
for i in range(self.tam):
crom = copy.deepcopy(crom_max)
for j in range(porc):
crom[random.randrange(0, self.tam_crom)] = random.randrange(0,2)
p_r[i] = crom
print ("Nueva poblacion")
print(p_r)
return p_r
class Clasificador_Bayes:
def __init__(self, archivo, formato):
total = 0
classes = {}
counts = {}
totals = {} # Para atributos numericos
numericValues = {}
self.formato = formato.strip().split('\t')
self.prior = {}
self.conditional = {}
f = open(archivo)
lines = f.readlines()
f.close()
for line in lines:
fields = line.strip().split('\t')
# print(fields)
vector = []
nums = []
for i in range(len(fields)):
if self.formato[i] == 'num':
nums.append(float(fields[i]))
elif self.formato[i] == 'attr':
vector.append(fields[i])
elif self.formato[i] == 'class':
category = fields[i]
total += 1
classes.setdefault(category, 0)
counts.setdefault(category, {})
totals.setdefault(category, {})
numericValues.setdefault(category, {})
classes[category] += 1
# Atributos no numericos
col = 0
for columnValue in vector:
col += 1
counts[category].setdefault(col, {})
counts[category][col].setdefault(columnValue, 0)
counts[category][col][columnValue] += 1
# atributos numericos
col = 0
for columnValue in nums:
col += 1
totals[category].setdefault(col, 0)
#totals[category][col].setdefault(columnValue, 0)
totals[category][col] += columnValue
numericValues[category].setdefault(col, [])
numericValues[category][col].append(columnValue)
# p(c) #
for (category, count) in classes.items():
self.prior[category] = (float(count) / float(total))
# p(c|D) #
for (category, columns) in counts.items():
self.conditional.setdefault(category, {})
for (col, valueCounts) in columns.items():
self.conditional[category].setdefault(col, {})
for (attrValue, count) in valueCounts.items():
self.conditional[category][col][attrValue] = (float(count) / float(classes[category]))
self.tmp = counts
# Media y desviacion estandar
self.means = {}
self.totals = totals
for (category, columns) in totals.items():
self.means.setdefault(category, {})
for (col, cTotal) in columns.items():
self.means[category][col] = (float(cTotal) / float(classes[category]))
# Desviacion estandar
self.ssd = {}
for (category, columns) in numericValues.items():
self.ssd.setdefault(category, {})
for (col, values) in columns.items():
sumOfSquareDifferences = 0
theMean = self.means[category][col]
for value in values:
sumOfSquareDifferences += (value - theMean) ** 2
columns[col] = 0
self.ssd[category][col] = math.sqrt(sumOfSquareDifferences / (classes[category] - 1))
def clasificar(self, itemVector, numVector):
results = []
sqrt2pi = math.sqrt(2 * math.pi)
for (category, prior) in self.prior.items():
prob = prior
col = 1
for attrValue in itemVector:
if not attrValue in self.conditional[category][col]:
prob = 0
else:
prob = prob * self.conditional[category][col][attrValue]
col += 1
col = 1
for x in numVector:
mean = self.means[category][col]
ssd = self.ssd[category][col]
ePart = math.pow(math.e, -(x - mean) ** 2 / (2 * ssd ** 2))
prob *= (1.0 / (sqrt2pi * ssd)) * ePart
col += 1
results.append((prob, category))
return max(results)[1]
def evaluar(self, archivo):
clases = []
aciertos = []
f = open(archivo)
self.tip = f.readline()
self.tip = self.tip.strip().split('\t')
lines = f.readlines()
f.close()
for line in lines:
l = line.strip().split('\t')
clases.append(l[-1])
total = len(clases)
#print("Numero de ejemplo: " + str(total))
#print (clases)
vector_num = []
vector_nom = []
self.tip = self.tip[:-1]
for line in lines:
l = line.strip().split('\t')
l = l[:-1]
c = 0
for e in l:
if (self.tip[c] == '0'):
vector_num.append(float(e))
else:
vector_nom.append(e)
c += 1
res = self.clasificar(vector_nom, vector_num)
aciertos.append(res)
vector_num = []
vector_nom = []
#print(aciertos)
#Calcular porcentaje de aciertos
c = 0
for e in range(len(clases)):
if (clases[e] == aciertos[e]):
c += 1
#Regla se tres
por = (c * 100) / len(clases)
#print"Porcentaje de aciertos: ", str(por)
return por
#c = Clasificador_Bayes("datos", "attr\tattr\tattr\tclass")
#print(c.clasificar(['health', 'moderate', 'moderate'], []))
#print(c.clasificar(['both', 'sedentary', 'moderate'], []))
#c2 = Clasificador_Bayes("pima_e", "num\tnum\tnum\tnum\tnum\tnum\tnum\tnum\tclass")
#print("Clase: " + str(c2.clasificar([], [3, 78, 50, 32, 88, 31.0, 0.248, 26])))
#print("Clase: " + str(c2.clasificar([], [2, 197, 70, 45, 543, 30.5, 0.158, 53])))
#print("Clase: " + str(c2.clasificar([], [3, 78, 50, 32, 88, 31.0, 0.248, 26])))
#print("Clase: " + str(c2.clasificar([], [1, 91, 54, 25, 100, 25.2, 0.234, 23])))
#c2.evaluar("pima_v")
#c2.evaluar("pima_v2")
#c3 = Clasificador_Bayes("prueba", "num\tnum\tnum\tnum\tnum\tnum\tnum\tnum\tclass")
#c3.evaluar("pima_v3")
#c4 = Clasificador_Bayes("pima_e2", "num\tnum\tnum\tnum\tnum\tnum\tnum\tnum\tattr\tclass")
#print("Clase: " + str(c4.clasificar(['no'], [1, 91, 54, 25, 100, 25.2, 0.234, 23])))
#print("Clase: " + str(c4.clasificar(['yes'], [2, 197, 70, 45, 543, 30.5, 0.158, 53])))
#print("CHC")
#print("Parametros")
#num_g = int(raw_input("Numero de generaciones (iteraciones): "))
#tam_p = int(raw_input("Tamano de la poblacion: "))
#umb_c = int(raw_input("Umbral (Si es 0, entonces se toma por defecto L/4, donde L = longitud del cromosoma) :"))
#umb_r = int(raw_input("Porcentaje reinicializacion (Si es 0, entonces se toma por defecto 35%:"))
umb_r = 3.5
g = CHC("T")
p, tam_c = g.init_P(10)
d = tam_c / 4
for i in range(200):
p_aux = copy.deepcopy(p)
desc = g.hux(p, d)
p, p_eva = g.sel_eti(p_aux,desc)
if (len(desc)) == 0:
d = d - 1
if (d == 0) :
print("*** Reinicializacion ***")
print("Iteracion : " + str(i))
p = g.reinicializacion(p_eva, umb_r )
d = tam_c / 4
print("***************")
print("Terminacion")
print("***************")
|
|
import numpy as np
import collections
import pdb
from nn.math import softmax, make_onehot
# This is a 2-Layer Deep Recursive Neural Netowrk with two ReLU Layers and a softmax layer
# You must update the forward and backward propogation functions of this file.
# You can run this file via 'python rnn2deep.py' to perform a gradient check
# tip: insert pdb.set_trace() in places where you are unsure whats going on
class RNN2:
def __init__(self,wvecDim, middleDim, outputDim,numWords,mbSize=30,rho=1e-4):
self.wvecDim = wvecDim
self.outputDim = outputDim
self.middleDim = middleDim
self.numWords = numWords
self.mbSize = mbSize
self.defaultVec = lambda : np.zeros((wvecDim,))
self.rho = rho
def initParams(self):
np.random.seed(12341)
# Word vectors
self.L = 0.01*np.random.randn(self.wvecDim,self.numWords)
# Hidden activation weights for layer 1
self.W1 = 0.01*np.random.randn(self.wvecDim,2*self.wvecDim)
self.b1 = np.zeros((self.wvecDim))
# Hidden activation weights for layer 2
self.W2 = 0.01*np.random.randn(self.middleDim,self.wvecDim)
self.b2 = np.zeros((self.middleDim))
# Softmax weights
self.Ws = 0.01*np.random.randn(self.outputDim,self.middleDim) # note this is " U " in the notes and the handout.. there is a reason for the change in notation
self.bs = np.zeros((self.outputDim))
self.stack = [self.L, self.W1, self.b1, self.W2, self.b2, self.Ws, self.bs]
# Gradients
self.dW1 = np.empty(self.W1.shape)
self.db1 = np.empty((self.wvecDim))
self.dW2 = np.empty(self.W2.shape)
self.db2 = np.empty((self.middleDim))
self.dWs = np.empty(self.Ws.shape)
self.dbs = np.empty((self.outputDim))
def costAndGrad(self,mbdata,test=False):
"""
Each datum in the minibatch is a tree.
Forward prop each tree.
Backprop each tree.
Returns
cost
Gradient w.r.t. W1, W2, Ws, b1, b2, bs
Gradient w.r.t. L in sparse form.
or if in test mode
Returns
cost, correctArray, guessArray, total
"""
cost = 0.0
correct = []
guess = []
total = 0.0
self.L, self.W1, self.b1, self.W2, self.b2, self.Ws, self.bs = self.stack
# Zero gradients
self.dW1[:] = 0
self.db1[:] = 0
self.dW2[:] = 0
self.db2[:] = 0
self.dWs[:] = 0
self.dbs[:] = 0
self.dL = collections.defaultdict(self.defaultVec)
# Forward prop each tree in minibatch
for tree in mbdata:
c,tot = self.forwardProp(tree.root,correct,guess)
cost += c
total += tot
if test:
return (1./len(mbdata))*cost,correct, guess, total
# Back prop each tree in minibatch
for tree in mbdata:
self.backProp(tree.root)
# scale cost and grad by mb size
scale = (1./self.mbSize)
for v in self.dL.itervalues():
v *=scale
# Add L2 Regularization
cost += (self.rho/2)*np.sum(self.W1**2)
cost += (self.rho/2)*np.sum(self.W2**2)
cost += (self.rho/2)*np.sum(self.Ws**2)
return scale*cost,[self.dL,scale*(self.dW1 + self.rho*self.W1),scale*self.db1,
scale*(self.dW2 + self.rho*self.W2),scale*self.db2,
scale*(self.dWs+self.rho*self.Ws),scale*self.dbs]
def ReLU(self,x):
return x*(x > 0)
#return 1.0/(1+np.exp(-x))
def df(self,x):
#f = self.ReLU(x)
#return f*(1-f)
return 1.0*(x > 0)
def forwardProp(self,node, correct=[], guess=[]):
cost = total = 0.0
# this is exactly the same setup as forwardProp in rnn.py
if node.isLeaf == True:
node.fprop = True
node.hActs1 = self.L[:,node.word]
node.hActs2 = self.ReLU(self.W2.dot(node.hActs1)+self.b2)
node.probs = softmax(self.Ws.dot(node.hActs2)+self.bs)
p = node.probs*make_onehot(node.label,len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
return cost, 1
c1,t1 = self.forwardProp(node.left,correct,guess)
c2,t2 = self.forwardProp(node.right,correct,guess)
if node.left.fprop and node.right.fprop:
node.fprop = True
h = np.hstack([node.left.hActs1, node.right.hActs1])
node.hActs1 = self.ReLU(self.W1.dot(h) + self.b1)
node.hActs2 = self.ReLU(self.W2.dot(node.hActs1) + self.b2)
node.probs = softmax(self.Ws.dot(node.hActs2)+self.bs)
p = node.probs*make_onehot(node.label,len(self.bs))
cost = -np.log(np.sum(p))
correct.append(node.label)
guess.append(np.argmax(node.probs))
cost += c1
cost += c2
total += t1
total += t2
return cost, total + 1
def backProp(self,node,error=None):
# Clear nodes
node.fprop = False
# this is exactly the same setup as backProp in rnn.py
errorCur = node.probs - make_onehot(node.label,len(self.bs))
self.dWs += np.outer(errorCur,node.hActs2)
self.dbs += errorCur
errorCur = errorCur.dot(self.Ws)*self.df(node.hActs2)
self.dW2 += np.outer(errorCur,node.hActs1)
self.db2 += errorCur
errorCur = errorCur.dot(self.W2)
if error is not None:
errorCur += error
if node.isLeaf == True:
self.dL[node.word] += errorCur
return
errorCur = errorCur*self.df(node.hActs1)
tmp1 = np.ones(self.W1.shape).dot(np.diag(np.hstack([node.left.hActs1, node.right.hActs1])))
self.dW1 += np.diag(errorCur).dot(tmp1)
self.db1 += errorCur
errorCur = errorCur.dot(self.W1)
self.backProp(node.left,errorCur[:self.wvecDim])
self.backProp(node.right,errorCur[self.wvecDim:])
def updateParams(self,scale,update,log=False):
"""
Updates parameters as
p := p - scale * update.
If log is true, prints root mean square of parameter
and update.
"""
if log:
for P,dP in zip(self.stack[1:],update[1:]):
pRMS = np.sqrt(np.mean(P**2))
dpRMS = np.sqrt(np.mean((scale*dP)**2))
print "weight rms=%f -- update rms=%f"%(pRMS,dpRMS)
self.stack[1:] = [P+scale*dP for P,dP in zip(self.stack[1:],update[1:])]
# handle dictionary update sparsely
dL = update[0]
for j in dL.iterkeys():
self.L[:,j] += scale*dL[j]
def toFile(self,fid):
import cPickle as pickle
pickle.dump(self.stack,fid)
def fromFile(self,fid):
import cPickle as pickle
self.stack = pickle.load(fid)
def check_grad(self,data,epsilon=1e-6):
cost, grad = self.costAndGrad(data)
err1 = 0.0
count = 0.0
cc = 0
print "Checking dWs, dW1 and dW2..."
for W,dW in zip(self.stack[1:],grad[1:]):
W = W[...,None] # add dimension since bias is flat
dW = dW[...,None]
cc += 1
for i in xrange(W.shape[0]):
for j in xrange(W.shape[1]):
W[i,j] += epsilon
costP,_ = self.costAndGrad(data)
W[i,j] -= epsilon
numGrad = (costP - cost)/epsilon
err = np.abs(dW[i,j] - numGrad)
if err > 1e-5:
print cc, " -> ",W.shape,"W[%d, %d] = %.9f" %(i,j, err)
err1+=err
count+=1
if 0.001 > err1/count:
print "Grad Check Passed for dW Sum of Error = %.12f" % (err1/count)
else:
print "Grad Check Failed for dW: Sum of Error = %.12f" % (err1/count)
# check dL separately since dict
dL = grad[0]
L = self.stack[0]
err2 = 0.0
count = 0.0
print "Checking dL..."
for j in dL.iterkeys():
for i in xrange(L.shape[0]):
L[i,j] += epsilon
costP,_ = self.costAndGrad(data)
L[i,j] -= epsilon
numGrad = (costP - cost)/epsilon
err = np.abs(dL[j][i] - numGrad)
err2+=err
count+=1
if 0.001 > err2/count:
print "Grad Check Passed for dL Sum of Error = %.12f" % (err2/count)
else:
print "Grad Check Failed for dL: Sum of Error = %.12f" % (err2/count)
if __name__ == '__main__':
import tree as treeM
train = treeM.loadTrees()
numW = len(treeM.loadWordMap())
print "numW = ", numW
wvecDim = 10
middleDim = 8
outputDim = 5
rnn = RNN2(wvecDim,middleDim,outputDim,numW,mbSize=4)
rnn.initParams()
mbData = train[:4]
print "Numerical gradient check..."
rnn.check_grad(mbData)
|
|
from direct.distributed import DistributedObject
from toontown.toonbase import ToontownGlobals
import MailboxGlobals
from toontown.catalog import CatalogItem
from toontown.catalog import CatalogItemList
from toontown.toontowngui import TTDialog
from toontown.toonbase import TTLocalizer
from toontown.catalog import MailboxScreen
from direct.directnotify.DirectNotifyGlobal import *
from direct.distributed.ClockDelta import *
from pandac.PandaModules import *
import random
from direct.interval.IntervalGlobal import SoundInterval
FlagPitchEmpty = -70
FlagPitchFull = 0
class DistributedMailbox(DistributedObject.DistributedObject):
notify = directNotify.newCategory('DistributedMailbox')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.model = None
self.flag = None
self.flagIval = None
self.nameText = None
self.fullIndicator = 0
self.mailboxGui = None
self.mailboxDialog = None
self.mailboxSphereEvent = None
self.mailboxSphereEnterEvent = None
self.mailboxGuiDoneEvent = 'mailboxGuiDone'
return
def announceGenerate(self):
DistributedMailbox.notify.debug('announceGenerate')
DistributedObject.DistributedObject.announceGenerate(self)
self.mailboxSphereEvent = self.taskName('mailboxSphere')
self.mailboxSphereEnterEvent = 'enter' + self.mailboxSphereEvent
if self.houseId == base.localAvatar.houseId:
self.accept(self.mailboxSphereEnterEvent, self.__handleEnterSphere)
self.load()
def load(self):
DistributedMailbox.notify.debug('load')
randomGenerator = random.Random()
randomGenerator.seed(self.houseId)
r = randomGenerator.random()
g = randomGenerator.random()
b = randomGenerator.random()
self.nameColor = (r,
g,
b,
1)
houseNode = self.cr.playGame.hood.loader.houseNode[self.housePosInd]
estateNode = houseNode.getParent()
zOffset = 0
if self.housePosInd == 3:
zOffset = -1
elif self.housePosInd == 2:
zOffset = 0.5
self.model = loader.loadModel('phase_5.5/models/estate/mailboxHouse')
self.model.reparentTo(estateNode)
self.model.setPos(houseNode, 19, -4, 0 + zOffset)
self.model.setH(houseNode, 90)
self.flag = self.model.find('**/mailbox_flag')
if self.fullIndicator:
self.flag.setP(FlagPitchFull)
else:
self.flag.setP(FlagPitchEmpty)
self.__setupName()
collision = self.model.find('**/mailbox_collision')
collision.setName(self.mailboxSphereEvent)
def disable(self):
DistributedMailbox.notify.debug('disable')
self.notify.debug('disable')
self.ignoreAll()
if self.flagIval:
self.flagIval.finish()
self.flagIval = None
if self.model:
self.model.removeNode()
self.model = None
if self.nameText:
self.nameText.removeNode()
self.nameText = None
if self.mailboxGui:
self.mailboxGui.hide()
self.mailboxGui.unload()
self.mailboxGui = None
if self.mailboxDialog:
self.mailboxDialog.cleanup()
self.mailboxDialog = None
self.mailboxSphereEvent = None
self.mailboxSphereEnterEvent = None
DistributedObject.DistributedObject.disable(self)
return
def setHouseId(self, houseId):
DistributedMailbox.notify.debug('setHouseId( houseId=%d )' % houseId)
self.houseId = houseId
def setHousePos(self, housePosInd):
DistributedMailbox.notify.debug('setHousePos')
self.housePosInd = housePosInd
def setName(self, name):
DistributedMailbox.notify.debug('setName( name=%s )' % name)
self.name = name
def setFullIndicator(self, full):
DistributedMailbox.notify.debug('setFullIndicator( full=%s )' % full)
if self.fullIndicator != full:
self.fullIndicator = full
if self.flag:
if self.flagIval:
self.flagIval.pause()
self.flagIval = None
p = FlagPitchEmpty
if self.fullIndicator:
p = FlagPitchFull
self.flagIval = self.flag.hprInterval(0.5, VBase3(0, p, 0), blendType='easeInOut')
self.flagIval.start()
return
def __handleEnterSphere(self, collEntry):
DistributedMailbox.notify.debug('Entering Mailbox Sphere....')
self.ignore(self.mailboxSphereEnterEvent)
self.cr.playGame.getPlace().detectedMailboxCollision()
self.accept('mailboxAsleep', self.__handleMailboxSleep)
self.sendUpdate('avatarEnter', [])
def __handleMailboxSleep(self):
DistributedMailbox.notify.debug('Mailbox Sleep')
if self.mailboxGui:
self.mailboxGui.hide()
self.mailboxGui.unload()
self.mailboxGui = None
if self.mailboxDialog:
self.mailboxDialog.cleanup()
self.mailboxDialog = None
self.__handleMailboxDone()
return
def __handleMailboxDone(self):
DistributedMailbox.notify.debug('Mailbox Done')
self.sendUpdate('avatarExit', [])
self.ignore(self.mailboxGuiDoneEvent)
self.mailboxGui = None
return
def freeAvatar(self):
DistributedMailbox.notify.debug('freeAvatar')
self.notify.debug('freeAvatar')
curState = base.cr.playGame.getPlace().getState()
self.notify.debug('Estate.getState() == %s' % curState)
if not curState == 'stopped':
base.cr.playGame.getPlace().setState('walk')
self.ignore('mailboxAsleep')
self.accept(self.mailboxSphereEnterEvent, self.__handleEnterSphere)
def setMovie(self, mode, avId):
isLocalToon = avId == base.localAvatar.doId
if isLocalToon:
DistributedMailbox.notify.debug('setMovie( mode=%d, avId=%d ) called on a local toon' % (mode, avId))
else:
DistributedMailbox.notify.debug('setMovie( mode=%d, avId=%d ) called on a non-local toon' % (mode, avId))
if mode == MailboxGlobals.MAILBOX_MOVIE_CLEAR:
DistributedMailbox.notify.debug('setMovie: clear')
return
elif mode == MailboxGlobals.MAILBOX_MOVIE_EXIT:
if random.random() < 0.5:
sfx = base.loadSfx('phase_5.5/audio/sfx/mailbox_close_1.mp3')
else:
sfx = base.loadSfx('phase_5.5/audio/sfx/mailbox_close_2.mp3')
sfxTrack = SoundInterval(sfx, node=self.model)
sfxTrack.start()
DistributedMailbox.notify.debug('setMovie: exit')
return
elif mode == MailboxGlobals.MAILBOX_MOVIE_EMPTY:
DistributedMailbox.notify.debug('setMovie: empty')
if isLocalToon:
self.mailboxDialog = TTDialog.TTDialog(dialogName='MailboxEmpty', style=TTDialog.Acknowledge, text=TTLocalizer.DistributedMailboxEmpty, text_wordwrap=15, fadeScreen=1, command=self.__clearDialog)
return
elif mode == MailboxGlobals.MAILBOX_MOVIE_WAITING:
DistributedMailbox.notify.debug('setMovie: waiting')
if isLocalToon:
self.mailboxDialog = TTDialog.TTDialog(dialogName='MailboxWaiting', style=TTDialog.Acknowledge, text=TTLocalizer.DistributedMailboxWaiting, text_wordwrap=15, fadeScreen=1, command=self.__clearDialog)
return
elif mode == MailboxGlobals.MAILBOX_MOVIE_READY:
DistributedMailbox.notify.debug('setMovie: ready')
if random.random() < 0.5:
sfx = base.loadSfx('phase_5.5/audio/sfx/mailbox_open_1.mp3')
else:
sfx = base.loadSfx('phase_5.5/audio/sfx/mailbox_open_2.mp3')
sfxTrack = SoundInterval(sfx, node=self.model)
sfxTrack.start()
if isLocalToon:
self.mailboxGui = MailboxScreen.MailboxScreen(self, base.localAvatar, self.mailboxGuiDoneEvent)
self.mailboxGui.show()
self.accept(self.mailboxGuiDoneEvent, self.__handleMailboxDone)
return
elif mode == MailboxGlobals.MAILBOX_MOVIE_NOT_OWNER:
DistributedMailbox.notify.debug('setMovie: not owner')
if isLocalToon:
self.mailboxDialog = TTDialog.TTDialog(dialogName='MailboxNotOwner', style=TTDialog.Acknowledge, text=TTLocalizer.DistributedMailboxNotOwner, text_wordwrap=15, fadeScreen=1, command=self.__clearDialog)
return
else:
DistributedMailbox.notify.warning('unknown mode in setMovie: %s' % mode)
def acceptItem(self, item, index, callback, optional = -1):
DistributedMailbox.notify.debug('acceptItem')
blob = item.getBlob(store=CatalogItem.Customization)
context = self.getCallbackContext(callback, [item, index])
self.sendUpdate('acceptItemMessage', [context,
blob,
index,
optional])
def acceptInvite(self, item, acceptingIndex, callback, optional = -1):
DistributedMailbox.notify.debug('acceptInvite')
context = self.getCallbackContext(callback, [item, acceptingIndex])
self.sendUpdate('acceptInviteMessage', [context, item.inviteKey])
def acceptItemResponse(self, context, retcode):
DistributedMailbox.notify.debug('acceptItemResponse')
if retcode == ToontownGlobals.P_UserCancelled:
print 'DistributedMailbox User Canceled'
self.doCallbackContext(context, [retcode])
def discardItem(self, item, index, callback, optional = -1):
DistributedMailbox.notify.debug('discardItem')
blob = item.getBlob(store=CatalogItem.Customization)
context = self.getCallbackContext(callback, [item, index])
self.sendUpdate('discardItemMessage', [context,
blob,
index,
optional])
def rejectInvite(self, item, acceptingIndex, callback, optional = -1):
DistributedMailbox.notify.debug('rejectInvite')
context = self.getCallbackContext(callback, [item, acceptingIndex])
self.sendUpdate('rejectInviteMessage', [context, item.inviteKey])
def discardItemResponse(self, context, retcode):
DistributedMailbox.notify.debug('discardItemResponse')
self.doCallbackContext(context, [retcode])
def __setupName(self):
DistributedMailbox.notify.debug('__setupName')
if self.nameText:
self.nameText.removeNode()
self.nameText = None
nameOrigin = self.model.find('**/nameLocator')
if not nameOrigin.isEmpty():
text = TextNode('nameText')
text.setTextColor(*self.nameColor)
text.setAlign(TextNode.ACenter)
text.setFont(ToontownGlobals.getToonFont())
text.setWordwrap(7.5)
text.setText(self.name)
self.nameText = nameOrigin.attachNewNode(text)
self.nameText.setH(90)
self.nameText.setScale(0.2)
return
def __clearDialog(self, event):
DistributedMailbox.notify.debug('__clearDialog')
self.mailboxDialog.cleanup()
self.mailboxDialog = None
self.freeAvatar()
return
def sendInviteReadButNotReplied(self, inviteKey):
self.sendUpdate('markInviteReadButNotReplied', [inviteKey])
|
|
import logging
from collections import defaultdict
import networkx
import pyvex
from . import Analysis
from .code_location import CodeLocation
from ..annocfg import AnnotatedCFG
from ..errors import AngrBackwardSlicingError
from ..utils.constants import DEFAULT_STATEMENT
l = logging.getLogger(name=__name__)
class BackwardSlice(Analysis):
"""
Represents a backward slice of the program.
"""
# FIXME: BackwardSlice does not work with the engines refactoring. It will be brought back to life after the
# FIXME: DDG refactoring, which will happen shortly.
def __init__(self, cfg, cdg, ddg,
targets=None,
cfg_node=None,
stmt_id=None,
control_flow_slice=False,
same_function=False,
no_construct=False):
"""
Create a backward slice from a specific statement based on provided control flow graph (CFG), control
dependence graph (CDG), and data dependence graph (DDG).
The data dependence graph can be either CFG-based, or Value-set analysis based. A CFG-based DDG is much faster
to generate, but it only reflects those states while generating the CFG, and it is neither sound nor accurate.
The VSA based DDG (called VSA_DDG) is based on static analysis, which gives you a much better result.
:param cfg: The control flow graph.
:param cdg: The control dependence graph.
:param ddg: The data dependence graph.
:param targets: A list of "target" that specify targets of the backward slices. Each target can be a
tuple in form of (cfg_node, stmt_idx), or a CodeLocation instance.
:param cfg_node: Deprecated. The target CFGNode to reach. It should exist in the CFG.
:param stmt_id: Deprecated. The target statement to reach.
:param control_flow_slice: True/False, indicates whether we should slice only based on CFG. Sometimes when
acquiring DDG is difficult or impossible, you can just create a slice on your CFG.
Well, if you don't even have a CFG, then...
:param no_construct: Only used for testing and debugging to easily create a BackwardSlice object.
"""
self._cfg = cfg
self._cdg = cdg
self._ddg = ddg
self._same_function = same_function
# All targets
self._targets = [ ]
if cfg_node is not None or stmt_id is not None:
l.warning('"cfg_node" and "stmt_id" are deprecated. Please use "targets" to pass in one or more targets.')
self._targets = [ (cfg_node, stmt_id) ]
if targets is not None:
for t in targets:
if isinstance(t, CodeLocation):
node = self._cfg.model.get_any_node(t.block_addr)
self._targets.append((node, t.stmt_idx))
elif type(t) is tuple:
self._targets.append(t)
else:
raise AngrBackwardSlicingError('Unsupported type of target %s' % t)
# Save a list of taints to begin with at the beginning of each SimRun
self.initial_taints_per_run = None
self.runs_in_slice = None
self.cfg_nodes_in_slice = None
# IDs of all chosen statement for each SimRun
self.chosen_statements = defaultdict(set)
# IDs for all chosen exit statements as well as their corresponding targets
self.chosen_exits = defaultdict(list)
if not no_construct:
self._construct(self._targets, control_flow_slice=control_flow_slice)
#
# Public methods
#
def __repr__(self):
s = "BackwardSlice (to %s)" % self._targets
return s
def dbg_repr(self, max_display=10):
"""
Debugging output of this slice.
:param max_display: The maximum number of SimRun slices to show.
:return: A string representation.
"""
s = repr(self) + "\n"
if len(self.chosen_statements) > max_display:
s += "%d SimRuns in program slice, displaying %d.\n" % (len(self.chosen_statements), max_display)
else:
s += "%d SimRuns in program slice.\n" % len(self.chosen_statements)
# Pretty-print the first `max_display` basic blocks
if max_display is None:
# Output all
run_addrs = sorted(self.chosen_statements.keys())
else:
# Only output the first "max_display" ones
run_addrs = sorted(self.chosen_statements.keys())[ : max_display]
for run_addr in run_addrs:
s += self.dbg_repr_run(run_addr) + "\n"
return s
def dbg_repr_run(self, run_addr):
"""
Debugging output of a single SimRun slice.
:param run_addr: Address of the SimRun.
:return: A string representation.
"""
if self.project.is_hooked(run_addr):
ss = "%#x Hooked\n" % run_addr
else:
ss = "%#x\n" % run_addr
# statements
chosen_statements = self.chosen_statements[run_addr]
vex_block = self.project.factory.block(run_addr).vex
statements = vex_block.statements
for i in range(0, len(statements)):
if i in chosen_statements:
line = "+"
else:
line = "-"
line += "[% 3d] " % i
line += str(statements[i])
ss += line + "\n"
# exits
targets = self.chosen_exits[run_addr]
addr_strs = [ ]
for exit_stmt_id, target_addr in targets:
if target_addr is None:
addr_strs.append("default")
else:
addr_strs.append("%#x" % target_addr)
ss += "Chosen exits: " + ", ".join(addr_strs)
return ss
def annotated_cfg(self, start_point=None):
"""
Returns an AnnotatedCFG based on slicing result.
"""
# TODO: Support context-sensitivity
targets = [ ]
for simrun, stmt_idx in self._targets:
targets.append((simrun.addr, stmt_idx))
l.debug("Initializing AnnoCFG...")
anno_cfg = AnnotatedCFG(self.project, self._cfg)
for simrun, stmt_idx in self._targets:
if stmt_idx != -1:
anno_cfg.set_last_statement(simrun.addr, stmt_idx)
for n in self._cfg.graph.nodes():
run = n
if run.addr in self.chosen_statements:
if self.chosen_statements[run.addr] is True:
anno_cfg.add_block_to_whitelist(run.addr)
else:
anno_cfg.add_statements_to_whitelist(run.addr, self.chosen_statements[run.addr])
for src, dst in self._cfg.graph.edges():
run = src
if dst.addr in self.chosen_statements and src.addr in self.chosen_statements:
anno_cfg.add_exit_to_whitelist(run.addr, dst.addr)
return anno_cfg
def is_taint_related_to_ip(self, simrun_addr, stmt_idx, taint_type, simrun_whitelist=None):
"""
Query in taint graph to check if a specific taint will taint the IP in the future or not.
The taint is specified with the tuple (simrun_addr, stmt_idx, taint_type).
:param simrun_addr: Address of the SimRun.
:param stmt_idx: Statement ID.
:param taint_type: Type of the taint, might be one of the following: 'reg', 'tmp', 'mem'.
:param simrun_whitelist: A list of SimRun addresses that are whitelisted, i.e. the tainted exit will be
ignored if it is in those SimRuns.
:returns: True/False
"""
if simrun_whitelist is None:
simrun_whitelist = set()
if type(simrun_whitelist) is not set:
simrun_whitelist = set(simrun_whitelist)
# Find the specific taint in our graph
taint = None
for n in self.taint_graph.nodes():
if n.type == taint_type and n.addr == simrun_addr and n.stmt_id == stmt_idx:
taint = n
break
if taint is None:
raise AngrBackwardSlicingError('The specific taint is not found')
bfs_tree = networkx.bfs_tree(self.taint_graph, taint)
# A node is tainting the IP if one of the following criteria holds:
# - a descendant tmp variable is used as a default exit or a conditional exit of its corresponding SimRun
# - a descendant register is the IP itself
for descendant in bfs_tree.nodes():
if descendant.type == 'exit':
if descendant.addr not in simrun_whitelist:
return True
elif descendant.type == 'reg' and descendant.reg == self.project.arch.ip_offset:
return True
return False
def is_taint_impacting_stack_pointers(self, simrun_addr, stmt_idx, taint_type, simrun_whitelist=None):
"""
Query in taint graph to check if a specific taint will taint the stack pointer in the future or not.
The taint is specified with the tuple (simrun_addr, stmt_idx, taint_type).
:param simrun_addr: Address of the SimRun.
:param stmt_idx: Statement ID.
:param taint_type: Type of the taint, might be one of the following: 'reg', 'tmp', 'mem'.
:param simrun_whitelist: A list of SimRun addresses that are whitelisted.
:returns: True/False.
"""
if simrun_whitelist is None:
simrun_whitelist = set()
if type(simrun_whitelist) is not set:
simrun_whitelist = set(simrun_whitelist)
# Find the specific taint in our graph
taint = None
for n in self.taint_graph.nodes():
if n.type == taint_type and n.addr == simrun_addr and n.stmt_id == stmt_idx:
taint = n
break
if taint is None:
raise AngrBackwardSlicingError('The specific taint is not found')
bfs_tree = networkx.bfs_tree(self.taint_graph, taint)
# A node is tainting the stack pointer if one of the following criteria holds:
# - a descendant register is the sp/bp itself
for descendant in bfs_tree.nodes():
if descendant.type == 'reg' and (
descendant.reg in (self.project.arch.sp_offset, self.project.arch.bp_offset)
):
return True
return False
#
# Private methods
#
def _construct(self, targets, control_flow_slice=False):
"""
Construct a dependency graph based on given parameters.
:param targets: A list of tuples like (CFGNode, statement ID)
:param control_flow_slice: Is the backward slicing only depends on CFG or not.
"""
if control_flow_slice:
simruns = [ r for r, _ in targets ]
self._construct_control_flow_slice(simruns)
else:
self._construct_default(targets)
def _construct_control_flow_slice(self, simruns):
"""
Build a slice of the program without considering the effect of data dependencies.
This is an incorrect hack, but it should work fine with small programs.
:param simruns: A list of SimRun targets. You probably wanna get it from the CFG somehow. It must exist in the
CFG.
"""
# TODO: Support context-sensitivity!
if self._cfg is None:
l.error('Please build CFG first.')
cfg = self._cfg.graph
for simrun in simruns:
if simrun not in cfg:
l.error('SimRun instance %s is not in the CFG.', simrun)
stack = [ ]
for simrun in simruns:
stack.append(simrun)
self.runs_in_slice = networkx.DiGraph()
self.cfg_nodes_in_slice = networkx.DiGraph()
self.chosen_statements = { }
while stack:
# Pop one out
block = stack.pop()
if block.addr not in self.chosen_statements:
self.chosen_statements[block.addr] = True
# Get all predecessors of that block
predecessors = cfg.predecessors(block)
for pred in predecessors:
stack.append(pred)
self.cfg_nodes_in_slice.add_edge(pred, block)
self.runs_in_slice.add_edge(pred.addr, block.addr)
def _construct_default(self, targets):
"""
Create a backward slice from a specific statement in a specific block. This is done by traverse the CFG
backwards, and mark all tainted statements based on dependence graphs (CDG and DDG) provided initially. The
traversal terminated when we reach the entry point, or when there is no unresolved dependencies.
:param targets: A list of tuples like (cfg_node, stmt_idx), where cfg_node is a CFGNode instance where the
backward slice starts, and it must be included in CFG and CDG. stmt_idx is the ID of the target
statement where the backward slice starts.
"""
# TODO: Support context-sensitivity
l.debug("Constructing a default backward program slice")
self.taint_graph = networkx.DiGraph()
taints = set()
accessed_taints = set()
# Fill in the taint set
for cfg_node, stmt_idx in targets:
if cfg_node not in self._cfg.graph:
raise AngrBackwardSlicingError('Target CFGNode %s is not in the CFG.' % cfg_node)
if stmt_idx == -1:
new_taints = self._handle_control_dependence(cfg_node)
taints |= new_taints
else:
cl = CodeLocation(cfg_node.addr, stmt_idx)
taints.add(cl)
while taints:
# Pop a tainted code location
tainted_cl = taints.pop()
l.debug("Checking taint %s...", tainted_cl)
# Mark it as picked
if tainted_cl.block_addr is not None and tainted_cl.stmt_idx is not None:
# Skip SimProcedures
self._pick_statement(tainted_cl.block_addr, tainted_cl.stmt_idx)
# Mark it as accessed
accessed_taints.add(tainted_cl)
# Pick all its data dependencies from data dependency graph
if self._ddg is not None and tainted_cl in self._ddg:
if isinstance(self._ddg, networkx.DiGraph):
predecessors = list(self._ddg.predecessors(tainted_cl))
else:
# angr.analyses.DDG
predecessors = list(self._ddg.get_predecessors(tainted_cl))
l.debug("Returned %d predecessors for %s from data dependence graph", len(predecessors), tainted_cl)
for p in predecessors:
if p not in accessed_taints:
taints.add(p)
self.taint_graph.add_edge(p, tainted_cl)
# Handle the control dependence
for n in self._cfg.model.get_all_nodes(tainted_cl.block_addr):
new_taints = self._handle_control_dependence(n)
l.debug("Returned %d taints for %s from control dependence graph", len(new_taints), n)
for taint in new_taints:
if taint not in accessed_taints:
taints.add(taint)
self.taint_graph.add_edge(taint, tainted_cl)
# In the end, map the taint graph onto CFG
self._map_to_cfg()
def _find_exits(self, src_block, target_block):
"""
Source block has more than one exit, and through some of those exits, the control flow can eventually go to
the target block. This method returns exits that lead to the target block.
:param src_block: The block that has multiple exits.
:param target_block: The target block to reach.
:returns: a dict of statement ID -> a list of target IPs (or None if the exit should not be taken), each
corresponds to an exit to take in order to reach the target.
For example, it returns the following dict:
{
'default': None, # It has a default exit, but shouldn't be taken
15: [ 0x400080 ], # Statement 15 is an exit statement, and should be taken when the target is
# 0x400080
28: None # Statement 28 is an exit statement, but shouldn't be taken
}
"""
# Enumerate all statements and find exit statements
# Since we don't have a state, we have to rely on the pyvex block instead of SimIRSB
# Just create the block from pyvex again - not a big deal
if self.project.is_hooked(src_block.addr):
# Just return all exits for now
return { -1: [ target_block.addr ] }
block = self.project.factory.block(src_block.addr)
vex_block = block.vex
exit_stmt_ids = { }
for stmt_idx, stmt in enumerate(vex_block.statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
exit_stmt_ids[stmt_idx] = None
# And of course, it has a default exit
# Don't forget about it.
exit_stmt_ids[DEFAULT_STATEMENT] = None
# Find all paths from src_block to target_block
# FIXME: This is some crappy code written in a hurry. Replace the all_simple_paths() later.
all_simple_paths = list(networkx.all_simple_paths(self._cfg.graph, src_block, target_block, cutoff=3))
for simple_path in all_simple_paths:
if len(simple_path) <= 1:
# Oops, it looks that src_block and target_block are the same guy?
continue
if self._same_function:
# Examine this path and make sure it does not have call or return edge
for i in range(len(simple_path) - 1):
jumpkind = self._cfg.graph[simple_path[i]][simple_path[i + 1]]['jumpkind']
if jumpkind in ('Ijk_Call', 'Ijk_Ret'):
return { }
# Get the first two nodes
a, b = simple_path[0], simple_path[1]
# Get the exit statement ID from CFG
exit_stmt_id = self._cfg.model.get_exit_stmt_idx(a, b)
if exit_stmt_id is None:
continue
# Mark it!
if exit_stmt_ids[exit_stmt_id] is None:
exit_stmt_ids[exit_stmt_id] = [ b.addr ]
else:
exit_stmt_ids[exit_stmt_id].append(b.addr)
return exit_stmt_ids
def _handle_control_dependence(self, target_node):
"""
Based on control dependence graph, pick all exits (statements) that lead to the target.
:param target_node: A CFGNode instance.
:returns: A set of new tainted code locations.
"""
new_taints = set()
# Query the CDG and figure out all control flow transitions to reach this target
cdg_guardians = self._cdg.get_guardians(target_node)
if not cdg_guardians:
# this block is directly reachable from the entry point
pass
else:
# For each predecessor on CDG, find the correct exit to take, and continue slicing from those exits
for predecessor in cdg_guardians:
exits = self._find_exits(predecessor, target_node)
for stmt_idx, target_addresses in exits.items():
if stmt_idx is not None:
# If it's an exit statement, mark it as picked
self._pick_statement(predecessor.addr,
self._normalize_stmt_idx(predecessor.addr, stmt_idx)
)
# If it's the default statement, we should also pick other conditional exit statements
if stmt_idx == DEFAULT_STATEMENT:
conditional_exits = self._conditional_exits(predecessor.addr)
for conditional_exit_stmt_id in conditional_exits:
cl = CodeLocation(predecessor.addr,
self._normalize_stmt_idx(predecessor.addr, conditional_exit_stmt_id)
)
new_taints.add(cl)
self._pick_statement(predecessor.addr,
self._normalize_stmt_idx(predecessor.addr, conditional_exit_stmt_id)
)
if target_addresses is not None:
if stmt_idx is not None:
# If it's an exit statement, we create a new tainted code location
cl = CodeLocation(predecessor.addr,
self._normalize_stmt_idx(predecessor.addr, stmt_idx)
)
new_taints.add(cl)
# Mark those exits as picked
for target_address in target_addresses:
self._pick_exit(predecessor.addr, stmt_idx, target_address)
# On CFG, pick default exits of all nodes between predecessor and our target node
# Usually this is not required if basic blocks strictly end at control flow transitions. But this is
# not always the case for some architectures
all_simple_paths = list(networkx.all_simple_paths(self._cfg.graph, predecessor, target_node, cutoff=3))
previous_node = None
for path in all_simple_paths:
for node in path:
self._pick_statement(node.addr,
self._normalize_stmt_idx(node.addr, DEFAULT_STATEMENT))
if previous_node is not None:
self._pick_exit(previous_node.addr, DEFAULT_STATEMENT, node.addr)
return new_taints
def _map_to_cfg(self):
"""
Map our current slice to CFG.
Based on self._statements_per_run and self._exit_statements_per_run, this method will traverse the CFG and
check if there is any missing block on the path. If there is, the default exit of that missing block will be
included in the slice. This is because Slicecutor cannot skip individual basic blocks along a path.
"""
exit_statements_per_run = self.chosen_exits
new_exit_statements_per_run = defaultdict(list)
while len(exit_statements_per_run):
for block_address, exits in exit_statements_per_run.items():
for stmt_idx, exit_target in exits:
if exit_target not in self.chosen_exits:
# Oh we found one!
# The default exit should be taken no matter where it leads to
# Add it to the new set
tpl = (DEFAULT_STATEMENT, None)
if tpl not in new_exit_statements_per_run[exit_target]:
new_exit_statements_per_run[exit_target].append(tpl)
# Add the new ones to our global dict
for block_address, exits in new_exit_statements_per_run.items():
for ex in exits:
if ex not in self.chosen_exits[block_address]:
self.chosen_exits[block_address].append(ex)
# Switch them so we can process the new set
exit_statements_per_run = new_exit_statements_per_run
new_exit_statements_per_run = defaultdict(list)
def _pick_statement(self, block_address, stmt_idx):
"""
Include a statement in the final slice.
:param int block_address: Address of the basic block.
:param int stmt_idx: Statement ID.
"""
# TODO: Support context-sensitivity
# Sanity check
if not isinstance(block_address, int):
raise AngrBackwardSlicingError("Invalid block address %s." % block_address)
if not isinstance(stmt_idx, int):
raise AngrBackwardSlicingError("Invalid statement ID %s." % stmt_idx)
self.chosen_statements[block_address].add(stmt_idx)
def _pick_exit(self, block_address, stmt_idx, target_ips):
"""
Include an exit in the final slice.
:param block_address: Address of the basic block.
:param stmt_idx: ID of the exit statement.
:param target_ips: The target address of this exit statement.
"""
# TODO: Support context-sensitivity
tpl = (stmt_idx, target_ips)
if tpl not in self.chosen_exits[block_address]:
self.chosen_exits[block_address].append(tpl)
#
# Helper functions
#
def _conditional_exits(self, block_addr):
"""
Return a list of conditional statement exits with respect to a basic block.
:param block_addr: The address of the basic block.
:return: A list of statement IDs.
"""
vex_block = self.project.factory.block(block_addr).vex
lst = [ ]
for i, stmt in enumerate(vex_block.statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
lst.append(i)
return lst
def _normalize_stmt_idx(self, block_addr, stmt_idx):
"""
For each statement ID, convert 'default' to (last_stmt_idx+1)
:param block_addr: The block address.
:param stmt_idx: Statement ID.
:returns: New statement ID.
"""
if type(stmt_idx) is int:
return stmt_idx
if stmt_idx == DEFAULT_STATEMENT:
vex_block = self.project.factory.block(block_addr).vex
return len(vex_block.statements)
raise AngrBackwardSlicingError('Unsupported statement ID "%s"' % stmt_idx)
@staticmethod
def _last_branching_statement(statements):
"""
Search for the last branching exit, just like
# if (t12) { PUT(184) = 0xBADF00D:I64; exit-Boring }
and then taint the temp variable inside if predicate
"""
cmp_stmt_id = None
cmp_tmp_id = None
total_stmts = len(statements)
statements = reversed(statements)
for stmt_rev_idx, stmt in enumerate(statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
stmt_idx = total_stmts - stmt_rev_idx - 1
cmp_stmt_id = stmt_idx
cmp_tmp_id = stmt.guard.tmp
return cmp_stmt_id, cmp_tmp_id
from angr.analyses import AnalysesHub
AnalysesHub.register_default('BackwardSlice', BackwardSlice)
|
|
from django.db import connection, transaction, DatabaseError
from xformmanager.tests.util import *
from xformmanager.models import Metadata, MetaDataValidationError
from xformmanager.manager import XFormManager, FormDefError
from receiver.models import Submission, Attachment, SubmissionHandlingOccurrence
import unittest
from datetime import datetime, timedelta
class MetaTestCase(unittest.TestCase):
def setUp(self):
# clean up, in case some other tests left some straggling
# form data, we want to start with a clean test environment
# each time.
clear_data()
def testMetaData_1(self):
create_xsd_and_populate("data/brac_chw.xsd", "data/brac_chw_1.xml")
populate("data/brac_chw_1.xml")
cursor = connection.cursor()
cursor.execute("SELECT * FROM xformmanager_metadata where formname='BRAC CHW visiting CHP'" )
row = cursor.fetchone()
self.assertEquals(row[1],"BRAC CHW visiting CHP")
self.assertEquals(row[2],"0.0.1")
self.assertEquals(row[3],"P6PH9SR0TKCO6RVDL4YML1D2Y")
if settings.DATABASE_ENGINE=='mysql' :
self.assertEquals(row[4],datetime(2008,1,8,11,55,49))
self.assertEquals(row[5],datetime(2008,1,8,12,8,39))
else:
self.assertEquals(row[4],datetime(2008,1,8,11,55,49,977))
self.assertEquals(row[5],datetime(2008,1,8,12,8,39,258))
self.assertEquals(row[6],"cary")
self.assertEquals(row[7],"99")
self.assertEquals(row[8],"Z6WRHCRXYQO1C1V6B2SB3RBG8")
cursor.execute("SELECT * FROM schema_brac_chw_chwvisit_v0_0_1")
row = cursor.fetchone()
self.assertEquals(row[0],1)
# checks to make sure that non-standard meta fields remain in the generated data table
self.assertEquals(row[3],"0.0.5") # this is commcareversion number
self.assertEquals(row[10],"worker")
self.assertEquals(row[11],3)
""" use these when we finally remove meta info from generate data tables
self.assertEquals(row[1],"0.0.5") # this is commcareversion number
self.assertEquals(row[2],"worker")
self.assertEquals(row[3],3)
"""
def testMetaData_2(self):
create_xsd_and_populate("data/brac_chp.xsd", "data/brac_chp_1.xml")
cursor = connection.cursor()
cursor.execute("SELECT * FROM xformmanager_metadata where formname='BRACCHPHomeVisit'" )
row = cursor.fetchone()
self.assertEquals(row[1],"BRACCHPHomeVisit")
self.assertEquals(row[2],"0.0.1")
self.assertEquals(row[3],"WK13O6ST8SWZVXLAI68B9YZWK")
if settings.DATABASE_ENGINE=='mysql' :
self.assertEquals(row[4],datetime(2009,4,30,11,17,25))
self.assertEquals(row[5],datetime(2009,4,30,11,21,29))
else:
self.assertEquals(row[4],datetime(2009,4,30,11,17,25,89))
self.assertEquals(row[5],datetime(2009,4,30,11,21,29,512))
self.assertEquals(row[6],"lucy")
self.assertEquals(row[7],"6")
self.assertEquals(row[8],"RW07SHOPTWGAOJKUQJJJN215D")
def testMetaData_3(self):
create_xsd_and_populate("data/pf_followup.xsd", "data/pf_followup_1.xml")
populate("data/pf_followup_2.xml")
create_xsd_and_populate("data/pf_new_reg.xsd", "data/pf_new_reg_1.xml")
populate("data/pf_new_reg_2.xml")
create_xsd_and_populate("data/pf_ref_completed.xsd", "data/pf_ref_completed_1.xml")
populate("data/pf_ref_completed_2.xml")
create_xsd_and_populate("data/mvp_mother_reg.xsd", "data/mvp_mother_reg_1.xml")
populate("data/mvp_mother_reg_2.xml")
populate("data/mvp_mother_reg_3.xml")
cursor = connection.cursor()
cursor.execute("SELECT * FROM xformmanager_metadata order by id")
row = cursor.fetchall()
latest_attachment_id = ( Attachment.objects.latest('id') ).id
latest_formdefmodel_id = ( FormDefModel.objects.latest('id') ).id
self.assertEquals(row[0][1],"PathfinderFollowUpVisit")
self.assertEquals(row[0][9],latest_attachment_id-8)
self.assertEquals(row[0][10],1)
self.assertEquals(row[0][11],latest_formdefmodel_id-3)
self.assertEquals(row[1][1],"PathfinderFollowUpVisit")
self.assertEquals(row[2][1],"PathfinderRegistratonVisit")
self.assertEquals(row[3][1],"PathfinderRegistratonVisit")
self.assertEquals(row[3][9],latest_attachment_id-5)
self.assertEquals(row[3][10],2)
self.assertEquals(row[3][11],latest_formdefmodel_id-2)
self.assertEquals(row[4][1],"PathfinderReferralVisit")
self.assertEquals(row[5][1],"PathfinderReferralVisit")
self.assertEquals(row[6][1],"XOLIJZVDJKLORBQUABFLVGLEA")
self.assertEquals(row[7][1],"XOLIJZVDJKLORBQUABFLVGLEA")
self.assertEquals(row[8][1],"XOLIJZVDJKLORBQUABFLVGLEA")
self.assertEquals(row[8][9],latest_attachment_id)
self.assertEquals(row[8][10],3)
self.assertEquals(row[8][11],latest_formdefmodel_id)
def testSubmissionCount(self):
create_xsd_and_populate("data/pf_followup.xsd")
today = datetime.now().date()
tomorrow = today + timedelta(days=1)
day_after_tomorrow = today + timedelta(days=2)
yesterday = today - timedelta(days=1)
for i in range(1, 6):
submission = populate("data/pf_followup_%s.xml" % i)
meta = Metadata.objects.get(attachment=submission.xform)
self.assertEqual(i, meta.get_submission_count(today, tomorrow, False))
self.assertEqual(1, meta.get_submission_count(today, tomorrow, True))
self.assertEqual(0, meta.get_submission_count(yesterday, today, False))
self.assertEqual(0, meta.get_submission_count(tomorrow, day_after_tomorrow, False))
self.assertEqual(i, meta.get_submission_count(yesterday, day_after_tomorrow, False))
self.assertEqual(1, meta.get_submission_count(yesterday, day_after_tomorrow, True))
def testDuplicates(self):
create_xsd_and_populate("data/pf_followup.xsd")
running_count = 0
self.assertEqual(running_count, len(Metadata.objects.all()))
for i in range(1, 6):
populate("data/pf_followup_%s.xml" % i)
# the first one should update the count. The rest should not
running_count = running_count + 1
self.assertEqual(running_count, len(Metadata.objects.all()))
for j in range(0, 3):
logging.warn("EXPECTING A 'duplicate submission' ERROR NOW:")
populate("data/pf_followup_%s.xml" % i)
self.assertEqual(running_count, len(Metadata.objects.all()))
def testReSubmit(self):
# original submission
submission = populate("data/pf_followup_1.xml")
self.assertEquals(submission.is_orphaned(),True)
# register schema
create_xsd_and_populate("data/pf_followup.xsd")
# xformmanagger resubmission
xformmanager = XFormManager()
status = xformmanager.save_form_data(submission.xform.filepath, submission.xform)
self.assertEquals(status,True)
def testSubmitHandling(self):
create_xsd_and_populate("data/pf_followup.xsd")
self.assertEqual(0, len(Metadata.objects.all()))
self.assertEqual(0, len(Submission.objects.all()))
self.assertEqual(0, len(SubmissionHandlingOccurrence.objects.all()))
# this should create a linked submission
populate("data/pf_followup_1.xml")
self.assertEqual(1, len(Metadata.objects.all()))
self.assertEqual(1, len(Submission.objects.all()))
submission = Submission.objects.all()[0]
self.assertEqual(1, len(SubmissionHandlingOccurrence.objects.all()))
way_handled = SubmissionHandlingOccurrence.objects.all()[0]
self.assertEqual(submission, way_handled.submission)
# add check for a count from this user, equal to one
self.assertEqual("1", way_handled.message)
self.assertEqual("xformmanager", way_handled.handled.app)
self.assertEqual("instance_data", way_handled.handled.method)
self.assertFalse(submission.is_orphaned())
# these should NOT create a linked submission. No schema
logging.warn("\nEXPECTING AN ERROR NOW:")
populate("data/pf_new_reg_1.xml")
logging.warn("EXPECTING AN ERROR NOW:")
populate("data/pf_new_reg_2.xml")
logging.warn("EXPECTING AN ERROR NOW:")
populate("data/pf_ref_completed_1.xml")
self.assertEqual(1, len(Metadata.objects.all()))
self.assertEqual(4, len(Submission.objects.all()))
for new_submission in Submission.objects.all():
if new_submission == submission:
self.assertFalse(new_submission.is_orphaned())
else:
self.assertTrue(new_submission.is_orphaned())
self.assertEqual(1, len(SubmissionHandlingOccurrence.objects.all()))
self.assertEqual(way_handled, SubmissionHandlingOccurrence.objects.all()[0])
def testSubmissionHandling(self):
count = len(SubmissionHandlingOccurrence.objects.all())
self.assertEquals(0,count)
formdefmodel_6 = create_xsd_and_populate("data/pf_followup.xsd", "data/pf_followup_1.xml")
count = len(SubmissionHandlingOccurrence.objects.all())
self.assertEquals(1,count)
def testNoMetadata(self):
logging.warn("EXPECTING A 'No metadata found' ERROR NOW:")
create_xsd_and_populate("data/brac_chp.xsd", "data/brac_chp_nometa.xml")
# raises a Metadata.DoesNotExist error on fail
metadata = Metadata.objects.get()
cursor = connection.cursor()
cursor.execute("SELECT * FROM schema_test_no_meta")
row = cursor.fetchone()
self.assertEquals(row[0],1)
self.assertEquals(int(row[10]),132) # this is commcareversion number
self.assertEquals(row[11],"EDINA KEJO")
def testEmptySubmission(self):
logging.warn("EXPECTING A 'No metadata found' ERROR NOW:")
create_xsd_and_populate("data/brac_chp.xsd", "data/brac_chp_nothing.xml")
# raises a Metadata.DoesNotExist error on fail
metadata = Metadata.objects.get()
# empty submissions do not create rows in the data tables
"""
Since we're not failing on bad meta, these unit tests are deprecated.
But we'll keep them around in case they come in handy later.
def testSchemaNoNamespace(self):
try:
create_xsd_and_populate("data/no_xmlns.xml")
self.fail("Missing namespace did not raise an exception")
except FormDefError, e:
# we expect this error to say something about no namespace
self.assertTrue( "no namespace" in unicode(e).lower() )
def testSchemaNoMeta(self):
try:
create_xsd_and_populate("data/no_meta.xml")
self.fail("Missing meta did not raise an exception")
except FormDefError, e:
# we expect this error to say something about no meta
self.assertTrue( "no meta" in unicode(e).lower() )
def testSchemaDuplicateMeta(self):
try:
create_xsd_and_populate("data/duplicate_meta.xml")
self.fail("Duplicate XMLNS did not raise an exception")
except MetaDataValidationError, e:
# we expect this error to say something about a duplicate meta
self.assertTrue( "duplicate" in unicode(e).lower() )
def testSchemaMissingMeta(self):
try:
create_xsd_and_populate("data/missing_meta.xml")
self.fail("Missing XMLNS did not raise an exception")
except MetaDataValidationError, e:
# we expect this error to say something about a duplicate meta
self.assertTrue( "missing" in unicode(e).lower() )
def testSchemaExtraMeta(self):
logging.warn("EXPECTING A 'No metadata found' warning now:")
create_xsd_and_populate("data/extra_meta.xml")
# this should not raise an error
"""
def tearDown(self):
# duplicates setUp, but at least we know we're being clean
clear_data()
|
|
# python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the Orchestrate API Service."""
import uuid
from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials
from orchestrateapi import orchestrate_pb2
from google.cloud import error_reporting
error_client = error_reporting.Client()
# Connect to Google Cloud Compute Engine API using the environment's service
# account.
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials,
cache_discovery=False)
class OrchestrateTemplateCreationError(Exception):
"""Provides detailed message on error occurred during template creation.
"""
pass
def run(request, context):
"""Creates a template.
Args:
request (orchestrate_pb2.CreateTemplateRequest): Request payload.
context: Context.
Returns:
A orchestrate_pb2.CreateTemplate with the status of the request.
"""
template = request.template
print('Orchestrate.CreateTemplate name={name} project={project}'.format(
name=template.name,
project=template.project,
))
request_id = uuid.uuid4().hex
try:
# Make sure data is valid before creating individual sizes - don't want to
# clean-up half-way or leave incomplete template families.
for size in template.sizes:
validate_metadata(template, size)
# Data checks out. let's create all template sizes.
for size in template.sizes:
create_template_size(template, size)
return orchestrate_pb2.CreateTemplateResponse(
status='CREATED',
request_id=str(request_id),
)
except errors.HttpError as exception:
if exception.resp.status == 409:
message = 'A template with name {name} already exists.'.format(
name=template.name)
raise OrchestrateTemplateCreationError(message)
else:
raise
def create_template_size(template, size):
"""Creates instance template for the given size.
Args:
template: Creation parameters.
size: Size parameters to use.
Returns:
Operation performing template creation.
"""
print('Creating template {name} size {size_name}'.format(
name=template.name, size_name=size.name))
payload = build_template_payload(template, size)
operation = compute.instanceTemplates().insert(
project=template.project,
body=payload,
).execute()
print('Started operation {name}'.format(name=operation['name']))
return operation
def build_template_payload(template, size):
"""Returns a dict with all creation parameters.
Payload format required by the POST instances.insert endpoint.
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/insert
Args:
template: Creation parameters.
size: Size parameters.
"""
name = '{name}-{size_name}'.format(name=template.name, size_name=size.name)
# Find latest image
response = compute.images().getFromFamily(
project=template.image_project,
family=template.image_family,
).execute()
source_image = response['selfLink']
# Normalize size parameters
memory = size.memory*1024 # gb to mb
disk_size = size.disk_size # gb
# InstanceTemplates.insert expects machineType to be a name,
# it does NOT support URL-based custom machines, e.g.
# projects/orchestrate-test-1/zones/us-central1-a/machineTypes/custom-6-32768
# Therefore, store this metadata as orchestrate_machine_type
# TODO(b/137211294) orchestrate instances create would have to pay attention to
# this value and override the machineType from this template.
machine_type = 'custom-{cpus}-{memory}'.format(
cpus=size.cpus,
memory=memory,
)
is_default_size = size.name == template.default_size_name
subnetwork_name = template.subnetwork or template.network
# Prepare metadata
metadata = []
# Metadata intended for the instance itself
for item in template.metadata:
metadata.append(dict(key=item.key, value=item.value))
# Orchestrate-specific metadata that extends the properties stored in the
# instanceTemplate itself. Insert after the instance metadata to ensure
# that clients do not accidentally override orchestrate-specific entries.
metadata += [
dict(key='orchestrate_template', value=True),
dict(key='orchestrate_default_size', value=is_default_size),
dict(key='orchestrate_machine_type', value=machine_type),
dict(key='orchestrate_gpu_type', value=size.gpu_type),
dict(key='orchestrate_gpu_count', value=size.gpu_count),
dict(key='orchestrate_network', value=template.network),
dict(key='orchestrate_subnetwork', value=subnetwork_name),
]
if template.instance_name_pattern:
metadata.append(dict(key='orchestrate_instance_name_pattern',
value=template.instance_name_pattern))
region = '-'.join(template.zone.split('-')[:2])
region_url = 'projects/{project}/regions/{region}'.format(
project=template.project,
region=region,
)
network = 'projects/{project}/global/networks/{network}'.format(
project=template.project,
network=template.network,
)
subnetwork = '{region_url}/subnetworks/{subnetwork}'.format(
region_url=region_url,
subnetwork=subnetwork_name,
)
guest_accelerators = []
if size.gpu_type:
guest_accelerators.append(dict(
acceleratorType='{gpu_type}'.format(gpu_type=size.gpu_type),
acceleratorCount=size.gpu_count,
))
# POST https://www.googleapis.com/compute/v1/
# projects/{project}/zones/us-central1-a/instanceTemplates
payload = dict(
name=name,
description='Orchestrate template {name} size {size_name}'.format(
name=name, size_name=size.name),
properties=dict(
metadata=dict(items=metadata),
tags=dict(
items=[
'https-server',
],
),
canIpForward=True,
networkInterfaces=[
dict(
network=network,
subnetwork=subnetwork,
accessConfigs=[
dict(
name='External NAT',
type='ONE_TO_ONE_NAT',
networkTier='PREMIUM',
)
],
aliasIpRanges=[],
),
],
labels=dict(),
scheduling=dict(
preemptible=False,
onHostMaintenance='TERMINATE',
automaticRestart=True,
nodeAffinities=[],
),
deletionProtection=False,
serviceAccounts=[
# TODO(b/138243681) Ideally this should be configured to run
# with the "orchestrate" service account from the main Orchestrate
# project. Use the one from orchestrated project for now.
dict(
email='orchestrate@{project}.iam.gserviceaccount.com'.format(
project=template.project),
scopes=[
'https://www.googleapis.com/auth/devstorage.read_only',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/monitoring.write',
'https://www.googleapis.com/auth/servicecontrol',
'https://www.googleapis.com/auth/service.management.readonly',
'https://www.googleapis.com/auth/trace.append',
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/cloud-platform',
],
),
],
# Size-related parameters
machineType='n1-standard-8',
guestAccelerators=guest_accelerators,
disks=[
dict(
type='PERSISTENT',
boot=True,
mode='READ_WRITE',
autoDelete=True,
initializeParams=dict(
sourceImage=source_image,
diskType=size.disk_type,
diskSizeGb=disk_size,
),
),
],
),
)
return payload
def validate_metadata(template, size):
"""Validates metadata.
Catch any errors or invalid input that would cause a template with incorrect
information being created and propagated down to instances.
Args:
template: Creation parameters.
size: Size parameters to use.
Returns:
Nothing. The function returns normally if everything is correct. Raises an
exception otherwise.
Raises:
OrchestrateTemplateCreationError: If any of the metadata is invalid.
"""
print('Validating metadata for template {name} size {size_name}'.format(
name=template.name, size_name=size.name))
# (b/148229648) Does gpu_type exist?
if size.gpu_type:
response = compute.acceleratorTypes().list(
project=template.project,
zone=template.zone,
).execute()
gpu_types = [gpu['name'] for gpu in response['items']]
if size.gpu_type not in gpu_types:
message = (
'{gpu_type} is not a valid GPU type or is not available in project'
' {project} zone {zone}. Available options are: {gpu_types}'
).format(
project=template.image_project,
zone=template.zone,
gpu_type=size.gpu_type,
gpu_types=', '.join(gpu_types),
)
raise OrchestrateTemplateCreationError(message)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs, csv, datetime, itertools, json, logging, operator, pprint, shutil, urllib
import requests
from django.utils.http import urlquote_plus
from rapid_app import settings_app
from rapid_app.models import PrintTitleDev, ProcessorTracker
log = logging.getLogger(__name__)
class RapidFileProcessor( object ):
""" Handles processing of file from Rapid.
Main worker function: parse_file_from_rapid() """
def __init__(self, from_rapid_filepath, from_rapid_utf8_filepath ):
log.debug( 'initialized source-path, ```{source}```; destination-utf8-path, ```{destination}```'.format(source=from_rapid_filepath, destination=from_rapid_utf8_filepath) )
self.from_rapid_utf8_filepath = from_rapid_utf8_filepath # converted utf8-filepath
self.updated_holdings_defs_dct = {
'key': 0, 'issn': 1, 'title': 2, 'url': 3, 'location': 4, 'building': 5, 'callnumber': 6, 'year_start': 7, 'year_end': 8 }
self.utf8_maker = Utf8Maker( from_rapid_filepath, from_rapid_utf8_filepath )
def parse_file_from_rapid( self ):
""" Extracts print holdings from the file-from-rapid.
That file contains both print and online holdings.
Steps...
- a file from-rapid is created that is known to be utf8-good
- iterates through that file looking for `Print` entries; for those entries... # HoldingsDctBuilder.build_holdings_dct()
- valid and massaged row-elements are obtained (sometimes a title contains unescaped commas)... # HoldingsDctBuilder.process_file_row()
- if the entry doesn't exist, it's added to a holdings-dct (unique key on modified-issn & location & modified-callnumber)
- a list is created from the dct of all print holdings, primarily making year-ranges # build_holdings_lst()
- the preview-db is updated # update_dev_db()
- the list is returned to the view in case the user requests a json response; othewise, the response is the preview admin screen.
Called by viewhelper_processfile.ProcessFileFromRapidHelper.initiate_work() """
log.debug( 'starting parse' )
if self.utf8_maker.check_utf8() is False:
self.utf8_maker.make_utf8()
else:
self.utf8_maker.copy_utf8()
holdings_dct_builder = HoldingsDctBuilder( self.from_rapid_utf8_filepath )
holdings_dct = holdings_dct_builder.build_holdings_dct()
holdings_lst = self.build_holdings_lst( holdings_dct )
self.update_dev_db( holdings_lst )
return holdings_lst
def build_holdings_lst( self, holdings_dct ):
""" Converts the holdings_dct into a list of entries ready for db update.
Main work is taking the multiple year entries and making ranges.
Called by parse_file_from_rapid() """
holdings_lst = []
for ( key, dct_val ) in holdings_dct.items():
year_lst = dct_val['years']
log.debug( 'year_lst, `%s`' % year_lst )
holdings_dct[key]['years_contig'] = self._contigify_list( year_lst )
holdings_dct[key]['years_held'] = self._build_years_held( holdings_dct[key]['years_contig'] )
holdings_lst = self._update_holdings_lst( holdings_lst, dct_val )
sorted_lst = sorted( holdings_lst )
log.info( 'holdings_lst, ```%s```' % pprint.pformat(sorted_lst) )
return sorted_lst
def _contigify_list( self, lst ):
""" Converts sorted list entries into sub-lists that are contiguous.
Eg: [ 1, 2, 4, 5 ] -> [ [1, 2], [4, 5] ]
Credit: <http://stackoverflow.com/questions/3149440/python-splitting-list-based-on-missing-numbers-in-a-sequence>
Called by build_holdings_list() """
contig_lst = []
if lst == ['']:
return contig_lst
int_lst = [ int(x) for x in lst ]
for k, g in itertools.groupby( enumerate(int_lst), lambda (i,x):i-x ):
contig_lst.append( map(operator.itemgetter(1), g) )
log.debug( 'contig_lst, `%s`' % contig_lst )
return contig_lst
def _build_years_held( self, contig_lst ):
""" Converts contig_list to list of [ {'start': year-a, 'end': 'year-b'}, {'start': year-c, 'end': 'year-d'} ] entries.
Called by build_holdings_list() """
years_held_lst = []
for lst in contig_lst:
for year in lst:
start = lst[0]
end = lst[-1]
# end = lst[-1] if lst[-1] is not start else ( lst[-1] + 1 )
start_end_dct = {'start': start, 'end':end}
if start_end_dct not in years_held_lst:
years_held_lst.append( start_end_dct )
log.debug( 'years_held_lst, `%s`' % years_held_lst )
return years_held_lst
def _update_holdings_lst( self, holdings_lst, issn_dct ):
""" Builds final data lst entry.
Called by build_holdings_lst() """
( issn, location, building ) = ( issn_dct['issn'], issn_dct['location'], issn_dct['building'] )
( callnumber, title, url ) = ( issn_dct['call_number'], issn_dct['title'], issn_dct['url'] )
for period_dct in issn_dct['years_held']:
new_key = '%s%s' % ( issn.replace('-', ''), period_dct['start'] )
update_lst = [ new_key, issn, title, url, location, building, callnumber, period_dct['start'], period_dct['end'] ]
log.debug( 'update_lst, `%s`' % update_lst )
if update_lst not in holdings_lst:
log.debug( 'gonna update' )
holdings_lst.append( update_lst )
return holdings_lst
def update_dev_db( self, holdings_lst ):
""" Adds and removes dev-db title entries.
Called by parse_file_from_rapid() """
self._run_dev_db_adds( holdings_lst )
self._run_dev_db_deletes( holdings_lst )
return
def _run_dev_db_adds( self, holdings_lst ):
""" Populates a db table that can be viewed before replacing the production db table.
Assumes an index on unique `key` field.
Called by update_dev_db() """
for row in holdings_lst:
log.debug( 'row, ```{}```'.format(pprint.pformat(row)) )
title = PrintTitleDev()
title.key = row[self.updated_holdings_defs_dct['key']]
title.issn = row[self.updated_holdings_defs_dct['issn']]
title.title = row[self.updated_holdings_defs_dct['title']]
title.url = row[self.updated_holdings_defs_dct['url']]
title.start = row[self.updated_holdings_defs_dct['year_start']]
title.end = row[self.updated_holdings_defs_dct['year_end']]
title.location = row[self.updated_holdings_defs_dct['location']]
title.building = row[self.updated_holdings_defs_dct['building']]
title.call_number = row[self.updated_holdings_defs_dct['callnumber']]
title.updated = unicode( datetime.date.today() )
title.save()
return
def _run_dev_db_deletes( self, holdings_lst ):
""" Removes outdated dev-db title entries.
Called by update_dev_db() """
key_list = []
for row in holdings_lst:
holdings_key = row[self.updated_holdings_defs_dct['key']]
if holdings_key not in key_list:
key_list.append( holdings_key )
titles = PrintTitleDev.objects.all()
for title in titles:
if title.key not in key_list:
title.delete()
return
# end class RapidFileProcessor
class Utf8Maker( object ):
""" Ensures file contains utf-8 data.
Non-django class. """
def __init__(self, from_rapid_filepath, from_rapid_utf8_filepath ):
self.from_rapid_filepath = from_rapid_filepath # actual initial file from rapid
self.from_rapid_utf8_filepath = from_rapid_utf8_filepath # converted utf8-filepath
def check_utf8( self, filepath=None ):
""" Ensures file is utf-8 readable.
Will error and return False if not.
Called by parse_file_from_rapid() """
path = filepath if filepath else self.from_rapid_filepath
log.debug( 'checked path, `%s`' % path )
utf8 = False
with codecs.open( path, 'rb', 'utf-8' ) as myfile:
try:
for line in myfile: # reads line-by-line; doesn't tax memory on big files
pass
utf8 = True
except Exception as e:
log.error( 'EXPECTED exception, `%s`' % unicode(repr(e)) )
log.debug( 'utf8 check, `{}`'.format(utf8) )
return utf8
def make_utf8( self ):
""" Iterates through each line; ensures it can be converted to utf-8.
Called by parse_file_from_rapid() """
try:
log.debug( 'src-path, `%s`; dest-path, `%s`' % (self.from_rapid_filepath, self.from_rapid_utf8_filepath) )
with codecs.open( self.from_rapid_filepath, 'rb', 'utf-16' ) as input_file:
with open( self.from_rapid_utf8_filepath, 'wb' ) as output_file:
self._run_utf8_write( input_file, output_file )
log.debug( 'utf8 file now at, `%s`' % self.from_rapid_utf8_filepath )
except Exception as e:
log.error( 'exception on source or destination file, `%s`' % unicode(repr(e)) )
raise Exception( unicode(repr(e)) )
return
def _run_utf8_write( self, input_file, output_file ):
""" Runs the line-by-line utf8 transform.
Called by make_utf8() """
for line in input_file:
try:
# assert( type(line) == unicode )
output_file.write( line.encode('utf-8') )
except Exception as e:
log.error( 'exception, `%s`' % unicode(repr(e)) )
raise Exception( unicode(repr(e)) )
return
def copy_utf8( self ):
""" Copies good utf8 source file to utf8-filepath.
Called by parse_file_from_rapid() """
shutil.copy2( self.from_rapid_filepath, self.from_rapid_utf8_filepath )
return
# end class Utf8Maker
class HoldingsDctBuilder( object ):
""" Builds dct of holdings from file.
Non-django class. """
def __init__(self, from_rapid_utf8_filepath ):
self.from_rapid_utf8_filepath = from_rapid_utf8_filepath # converted utf8-filepath
self.defs_dct = { # proper row field-definitions
'library': 0,
'branch': 1,
'location': 2,
'callnumber': 3,
'title': 4,
'format': 5,
'issn_num': 6,
'issn_type': 7,
'vol_start': 8,
'vol_end': 9,
'year': 10
}
self.row_fixer = RowFixer( self.defs_dct )
self.locations_dct = self.update_locations_dct()
self.tracker_updater = TrackerUpdater()
self.title_maker = TitleMaker()
def update_locations_dct( self ):
""" Populates class attribute with locations dct, used to populate `building` field.
Called by __init__() """
r = requests.get( settings_app.LOCATIONS_URL )
dct = r.json()
return dct
def build_holdings_dct( self ):
""" Iterates through file, grabbing normalized print holdings.
Sample print entries:
`RBN,Main Library,sci,TR1 .P58,Photographic abstracts,Print,0031-8701,ISSN,,,1962`
`RBN,Main Library,qs,QP1 .E7,Ergebnisse der Physiologie, biologischen Chemie und experimentellen Pharmakologie...,Print,0080-2042,ISSN,1,69,1938`
Note: there are unescaped commas in some of the titles. Grrr.
Builds and returns a dict like {
u'00029629sciR11A6': {
u'call_number': 'R11 .A6',
u'issn': '0002-9629',
u'location': 'sci',
u'years': ['1926', '1928'] }, # years are sorted
u'abc123': {
... },
}
Called by RapidFileProcessor.parse_file_from_rapid() """
log.debug( 'starting build_holdings_dct()' )
( holdings_dct, csv_ref, entries_count ) = self.prep_holdings_dct_processing()
for (idx, row) in enumerate(csv_ref): # row is type() `list`
self.track_row( idx, entries_count )
if 'Print' not in row:
continue
( key, issn, title, location, building, callnumber, year ) = self.process_file_row( row )
holdings_dct = self.update_holdings_dct( holdings_dct, key, issn, title, location, building, callnumber, year )
log.info( 'non-matched unicode titles, ```{}```'.format(pprint.pformat(self.title_maker.non_matches)) )
log.debug( 'len(holdings_dct), `{len}`; holdings_dct, ```{dct}```'.format(len=len(holdings_dct), dct=pprint.pformat(holdings_dct)) )
return holdings_dct
def prep_holdings_dct_processing( self ):
""" Sets initial vars.
Called by build_holdings_dct() """
log.debug( 'using utf8-filepath, ```{}```'.format(self.from_rapid_utf8_filepath) )
holdings_dct = {}
tmp_csv_ref = csv.reader( open(self.from_rapid_utf8_filepath), dialect=csv.excel, delimiter=','.encode('utf-8') )
entries_count = sum( 1 for row in tmp_csv_ref ) # runs through file, so have to open again
csv_ref = csv.reader( open(self.from_rapid_utf8_filepath), dialect=csv.excel, delimiter=','.encode('utf-8') )
log.debug( 'entries_count, `%s`' % entries_count )
return ( holdings_dct, csv_ref, entries_count )
def track_row( self, row_idx, entries_count ):
""" Logs progress and updates status-db.
Called by build_holdings_dct() """
tn_prcnt = int( entries_count * .1 ) # ten percent
if row_idx % tn_prcnt == 0: # uses modulo
prcnt_done = row_idx / (tn_prcnt/10)
log.info( '%s percent done (on row %s of %s)' % (prcnt_done, row_idx+1, entries_count) ) # +1 for 0 index
self.tracker_updater.update_db_tracker( prcnt_done, entries_count )
elif row_idx == 0:
self.tracker_updater.update_db_tracker( 0, entries_count )
elif row_idx + 1 == entries_count:
self.tracker_updater.update_db_tracker( 100, entries_count )
return
def process_file_row( self, row ):
""" Fixes row if necessary and builds elements.
Called by build_holdings_dct() """
row = [ field.decode('utf-8') for field in row ]
if len( row ) > 11: # titles with commas
row = self.row_fixer.fix_row( row )
( key, issn, title, location, building, callnumber, year ) = self._build_holdings_elements( row )
return ( key, issn, title, location, building, callnumber, year )
def _build_holdings_elements( self, row ):
""" Extracts data from row-list.
Called by _process_file_row() """
callnumber = row[self.defs_dct['callnumber']]
issn = row[self.defs_dct['issn_num']]
title = self.title_maker.build_title( issn, row[self.defs_dct['title']] )
location = row[self.defs_dct['location']]
building = self._make_building( location )
year = row[self.defs_dct['year']]
normalized_issn = issn.replace( '-', '' )
normalized_callnumber = callnumber.replace( '-', '' ).replace( ' ', '' ).replace( '.', '' )
key = '%s%s%s' % ( normalized_issn, building, normalized_callnumber )
return ( key, issn, title, location, building, callnumber, year )
def _make_building( self, location ):
""" Adds building-location.
Called by _build_holdings_elements() """
building = None
try:
building = self.locations_dct['result']['items'][location]['building']
except KeyError:
if location.startswith('r'):
building = 'Rock'
elif location.startswith('h'):
building = 'Hay'
elif location.startswith('q'):
building = 'Annex'
else:
log.warning( 'location code {} not recognized'.format(location) )
building = location
return building
def update_holdings_dct( self, holdings, key, issn, title, location, building, callnumber, year ):
""" Updates holdings dct.
Called by: build_holdings_dct() """
if key not in holdings.keys():
holdings[key] = {
'issn': issn, 'title': title, 'url': self._build_url(title), 'location': location, 'building': building, 'call_number': callnumber, 'years': [year] }
else:
if year and year not in holdings[key]['years']:
holdings[key]['years'].append( year )
holdings[key]['years'].sort()
# log.debug( 'holdings, ```%s```' % pprint.pformat(holdings) )
return holdings
def _build_url( self, title ):
""" Builds search-url.
Called by update_holdings_dct() """
params = {
'f[format][]': 'Periodical Title',
'q': title.encode( 'utf-8' )
}
url = 'https://search.library.brown.edu/catalog/?{}'.format( urllib.urlencode(params) )
return url
# def _build_url( self, title ):
# """ Builds search-url.
# Eventually should be able to use good url as-is -- this works around current encoding issue.
# Testing shows that an incorrectly encoded search will not return results, but eliminating the problemmatic world will. """
# new_word_list = []
# for word in title.split():
# try:
# word.encode( 'ascii' )
# new_word_list.append( word )
# except:
# pass
# search_title = ' '.join( new_word_list )
# params = { 'f[format][]': 'Periodical Title', 'q': search_title }
# url = 'https://search.library.brown.edu/catalog/?{}'.format( urllib.urlencode(params) )
# return url
# end class HoldingsDctBuilder
class TitleMaker( object ):
""" Tries to reliably get a unicode-friendly title from issn.
Main controller: build_title() """
def __init__( self ):
self.good_titles_dct = {} # updated by build_title()
self.initialize_good_titles_dct()
self.non_matches = {}
def initialize_good_titles_dct( self ):
""" Loads json.
Called by __init__() """
with open( settings_app.ISSN_JSON_PATH, 'r' ) as f:
self.good_titles_dct = json.loads( f.read() )
log.debug( 'len(self.good_titles_dct.keys()), `{}`'.format(len(self.good_titles_dct.keys())) )
return
def build_title( self, issn, title ):
""" Checks issn against built-dct or hits blacklight-solr.
Called by HoldingsDctBuilder._build_holdings_elements() """
if self.is_ascii( title ):
return title
( found_title, dct_check ) = self.check_dct( issn )
if dct_check:
return found_title
( found_title, solr_check ) = self.check_solr( issn )
if solr_check:
return found_title
self.non_matches[ issn ] = title # for later logging
return title
def is_ascii( self, title ):
""" Yup; checks ascii.
Called by build_title() """
try:
title.encode( 'ascii' )
log.debug( 'skipping plain title' )
return title
except Exception as e:
return None
def check_dct( self, issn ):
""" Sees if a match has already been found.
Called by build_title() """
( title, dct_check ) = ( None, False )
if issn in self.good_titles_dct.keys():
title = self.good_titles_dct[ issn ]
dct_check = True
log.debug( 'found in dct' )
return ( title, dct_check )
def check_solr( self, issn ):
""" Looks up issn in discovery-solr.
Called by build_title() """
params = { 'wt': 'json', 'indent': 'on', 'fq': 'issn_t:"{}"'.format( issn ) }
r = requests.get( settings_app.DISCOVERY_SOLR_URL, params=params )
log.debug( 'url, ```{}```'.format(r.url) )
dct = r.json()
( title, solr_check ) = self._parse_solr( dct, issn )
return ( title, solr_check )
def _parse_solr( self, dct, issn ):
""" Parses issn-query response.
Called by check_solr() """
( title, solr_check ) = ( None, False )
if dct['response']['numFound'] > 1:
log.debug( 'multiples found, ```{}```'.format(pprint.pformat(dct)) )
try:
title = dct['response']['docs'][0]['title_display']
solr_check = True
self.good_titles_dct[issn] = title
log.info( 'adding to dct, and returning, title, ```{}```'.format(title) )
except Exception as e:
log.debug( 'e, ```{}```'.format(unicode(repr(e))) )
log.debug( 'no `title_display` found' )
return ( title, solr_check )
# end class TitleMaker
class TrackerUpdater( object ):
""" Manages updating of ProcessorTracker table.
Main controller: update_db_tracker() """
def update_db_tracker( self, prcnt_done, entries_count ):
""" Updates db processing tracker.
Called by track_row() """
tracker = ProcessorTracker.objects.all()[0]
recent_processing_dct = json.loads( tracker.recent_processing ); log.debug( 'recent_processing_dct initially, ```{}```'.format(pprint.pformat(recent_processing_dct)) )
( start_timestamp, end_timestamp, recent_times_per_record, average_time_per_record ) = (
tracker.processing_started, tracker.processing_ended, recent_processing_dct['recent_times_per_record'] , recent_processing_dct['average_time_per_record'] ) # existing info
( status, records_left, start_timestamp, end_timestamp, recent_times_per_record, average_time_per_record ) = self._check_percent_done(
prcnt_done, entries_count, start_timestamp, end_timestamp, recent_times_per_record, average_time_per_record ) # updated info
time_left = ( records_left * average_time_per_record ) / 60 # seconds-left / 60
recent_processing_jsn = self._update_recent_processing(
recent_processing_dct, prcnt_done, recent_times_per_record, time_left, average_time_per_record )
self._update_tracker_object( tracker, recent_processing_jsn, status, start_timestamp, end_timestamp )
return
def _check_percent_done( self, prcnt_done, entries_count, start_timestamp, end_timestamp, recent_times_per_record, average_time_per_record ):
""" Updates vars based on percent done.
Called by update_db_tracker() """
if prcnt_done == 0:
( status, start_timestamp, records_left ) = ( 'started', datetime.datetime.now(), entries_count )
elif prcnt_done == 100:
( status, end_timestamp, records_left ) = ( 'complete', datetime.datetime.now(), 0 )
recent_times_per_record = self._update_recent_times_per_record( recent_times_per_record, start_timestamp, end_timestamp, entries_count )
average_time_per_record = sum(recent_times_per_record) / float( len(recent_times_per_record) )
else:
( status, records_done ) = ( 'in_process', (entries_count * (prcnt_done/100.0)) )
records_left = entries_count - records_done
return ( status, records_left, start_timestamp, end_timestamp, recent_times_per_record, average_time_per_record )
def _update_recent_times_per_record( self, recent_times_per_record, start_timestamp, end_timestamp, entries_count ):
""" Updates list of recent-times-per-record (seconds).
Called by: _check_percent_done() """
time_taken = end_timestamp - start_timestamp
time_taken_string = '{sec}.{microsec}'.format( sec=time_taken.seconds, microsec=time_taken.microseconds )
f = float( time_taken_string )
time_per_record = f / entries_count
recent_times_per_record.append( time_per_record )
recent_times_per_record = recent_times_per_record [0:4]
log.debug( 'recent_times_per_record, ```{}```'.format(recent_times_per_record) )
return recent_times_per_record
def _update_recent_processing( self, recent_processing_dct, prcnt_done, recent_times_per_record, time_left, average_time_per_record ):
""" Updates recent_processing_dct and returns json.
Called by update_db_tracker() """
recent_processing_dct['percent_done'] = prcnt_done
recent_processing_dct['recent_times_per_record'] = recent_times_per_record
recent_processing_dct['time_left'] = time_left
recent_processing_dct['average_time_per_record'] = average_time_per_record
log.debug( 'recent_processing_dct after update, ```{}```'.format(pprint.pformat(recent_processing_dct)) )
jsn = json.dumps( recent_processing_dct )
return jsn
def _update_tracker_object( self, tracker, recent_processing_jsn, status, start_timestamp, end_timestamp ):
""" Updates and saves tracker record.
Called by update_db_tracker() """
tracker.recent_processing = recent_processing_jsn
tracker.current_status = status
tracker.processing_started = start_timestamp
tracker.processing_ended = end_timestamp
tracker.save()
log.debug( 'tracker updated' )
return
# end class TrackerUpdater()
class RowFixer( object ):
""" Fixes non-escaped csv strings.
Non-django class. """
def __init__(self, defs_dct ):
self.defs_dct = defs_dct # { 'label 1': 'index position 1', ... }
def fix_row( self, row ):
""" Handles row containing non-escaped commas in title.
Called by RapidFileProcessor.build_holdings_dct() """
fixed_row = self.initialize_fixed_row( row )
for field in row:
current_element_num = row.index(field)
fixed_row = self.update_title( fixed_row, row, current_element_num, field )
if row[current_element_num + 1] == 'Print':
problem_defs_dct = self.make_problem_defs_dct( current_element_num )
fixed_row = self.finish_fixed_row( fixed_row, row, problem_defs_dct )
break
log.debug( 'fixed_row finally, ```%s```' % fixed_row )
return fixed_row
def initialize_fixed_row( self, row ):
""" Initializes fixed row with known correct row data.
Called by fix_row() """
fixed_row = []
fixed_row.append( row[self.defs_dct['library']] )
fixed_row.append( row[self.defs_dct['branch']] )
fixed_row.append( row[self.defs_dct['location']] )
fixed_row.append( row[self.defs_dct['callnumber']] )
fixed_row.append( row[self.defs_dct['title']] )
# log.debug( 'fixed_row initially, ```%s```' % fixed_row )
return fixed_row
def update_title( self, fixed_row, row, current_element_num, field ):
""" Processes additional title fields.
Called by fix_row() """
main_title_element_num = row.index( row[self.defs_dct['title']] )
if current_element_num > main_title_element_num:
if field[0:1] == ' ': # additional title fields start with space
fixed_row[self.defs_dct['title']] = fixed_row[self.defs_dct['title']] + field + ','
# log.debug( 'fixed_row title updated, ```%s```' % fixed_row )
return fixed_row
def make_problem_defs_dct( self, current_element_num ):
""" Creates remaining definition-dct elements, given known current_element_num.
Called by fix_row() """
problem_defs_dct = {
'format': current_element_num + 1,
'issn_num': current_element_num + 2,
'issn_type': current_element_num + 3,
'vol_start': current_element_num + 4,
'vol_end': current_element_num + 5,
'year': current_element_num + 6
}
log.debug( 'problem_defs_dct, ```%s```' % problem_defs_dct )
return problem_defs_dct
def finish_fixed_row( self, fixed_row, row, problem_defs_dct ):
""" Updates remaining fixed-row elements.
Called by fix_row() """
fixed_row[self.defs_dct['title']] = fixed_row[self.defs_dct['title']][0:-1] # slice off that last comma
fixed_row.append( row[problem_defs_dct['format']] )
fixed_row.append( row[problem_defs_dct['issn_num']] )
fixed_row.append( row[problem_defs_dct['issn_type']] )
fixed_row.append( row[problem_defs_dct['vol_start']] )
fixed_row.append( row[problem_defs_dct['vol_end']] )
fixed_row.append( row[problem_defs_dct['year']] )
# log.debug( 'fixed_row finished, ```%s```' % fixed_row )
return fixed_row
# end class RowFixer
|
|
from __future__ import absolute_import
from bokeh.io import save
from bokeh.plotting import figure
from bokeh.models import CustomJS, Range1d, DataRange1d
from selenium.webdriver.common.action_chains import ActionChains
import pytest
pytestmark = pytest.mark.integration
def make_pan_plot_with_callback(xr=None, yr=None):
if xr is None:
x_range = Range1d(0, 3, bounds=None)
else:
x_range = xr
x_callback = CustomJS(args=dict(x_range=x_range), code="""
window.get_x_range_start = function() {
return x_range.get('start');
}
window.get_x_range_end = function() {
return x_range.get('end');
}
""")
x_range.callback = x_callback
if yr is None:
y_range = Range1d(0, 3, bounds=None)
else:
y_range = yr
y_callback = CustomJS(args=dict(y_range=y_range), code="""
window.get_y_range_start = function() {
return y_range.get('start');
}
window.get_y_range_end = function() {
return y_range.get('end');
}
""")
y_range.callback = y_callback
plot = figure(
height=400, width=400, tools='pan,box_zoom,reset', x_range=x_range, y_range=y_range
)
plot.min_border = 0
plot.rect(x=[1, 2], y=[1, 1], width=0.9, height=0.9)
return plot
def pan_plot(selenium, pan_x=None, pan_y=None):
# Enable the pan tool
pan_buttons = selenium.find_elements_by_css_selector('.bk-button-bar-list[type="pan"] button')
pan_button = pan_buttons[0]
if 'active' not in pan_button.get_attribute('class'):
pan_button.click()
canvas = selenium.find_element_by_tag_name('canvas')
actions = ActionChains(selenium)
actions.move_to_element_with_offset(canvas, 200, 200)
actions.click_and_hold()
actions.move_by_offset(pan_x, pan_y)
actions.release()
actions.perform()
def zoom_plot(selenium):
# Enable the box zoom tool
pan_buttons = selenium.find_elements_by_css_selector('.bk-button-bar-list[type="pan"] button')
zoom_button = pan_buttons[1]
if 'active' not in zoom_button.get_attribute('class'):
zoom_button.click()
canvas = selenium.find_element_by_tag_name('canvas')
actions = ActionChains(selenium)
actions.move_to_element_with_offset(canvas, 10, 10)
actions.click_and_hold()
actions.move_by_offset(200, 200)
actions.release()
actions.perform()
def test_range_with_callback_triggers_alert(output_file_url, selenium):
# Simple test to ensure range callbacks are working
# Rest of tests in this file depend on range callback.
plot = make_pan_plot_with_callback()
initial_start = plot.x_range.start
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=100, pan_y=100)
new_range_start = float(selenium.execute_script("""alert(window.get_x_range_start())"""))
selenium.switch_to_alert().dismiss()
assert new_range_start < initial_start
def test_x_range_does_not_pan_left_of_x_min(output_file_url, selenium):
x_range_min = -1
plot = make_pan_plot_with_callback(xr=Range1d(0, 3, bounds=(x_range_min, None)))
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=200, pan_y=0)
new_range_start = float(selenium.execute_script("""alert(window.get_x_range_start())"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == x_range_min
def test_x_range_does_not_pan_right_of_x_max(output_file_url, selenium):
x_range_max = 4
plot = make_pan_plot_with_callback(xr=Range1d(0, 3, bounds=(None, x_range_max)))
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=-200, pan_y=0)
new_range_end = float(selenium.execute_script("""alert(window.get_x_range_end())"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == x_range_max
def test_y_range_does_not_pan_below_y_min(output_file_url, selenium):
y_range_min = -1
plot = make_pan_plot_with_callback(yr=Range1d(0, 3, bounds=(y_range_min, None)))
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=-150)
new_range_start = float(selenium.execute_script("""alert(window.get_y_range_start())"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_start) == y_range_min
def test_y_range_does_not_pan_above_y_max(output_file_url, selenium):
y_range_max = 4
plot = make_pan_plot_with_callback(yr=Range1d(0, 3, bounds=(None, y_range_max)))
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=150)
new_range_end = float(selenium.execute_script("""alert(window.get_y_range_end())"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == y_range_max
############################
# Test reversed ranges
############################
def test_reversed_x_range_does_not_pan_right_of_x_min(output_file_url, selenium):
x_range_min = -1
plot = make_pan_plot_with_callback(xr=Range1d(3, 0, bounds=(x_range_min, None)))
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=-200, pan_y=0)
new_range_start = float(selenium.execute_script("""alert(window.get_x_range_end())"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == x_range_min
def test_reversed_x_range_does_not_pan_left_of_x_max(output_file_url, selenium):
x_range_max = 4
plot = make_pan_plot_with_callback(xr=Range1d(3, 0, bounds=(None, x_range_max)))
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=200, pan_y=0)
new_range_end = float(selenium.execute_script("""alert(window.get_x_range_start())"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == x_range_max
def test_reversed_y_range_does_not_pan_above_y_min(output_file_url, selenium):
y_range_min = -1
plot = make_pan_plot_with_callback(yr=Range1d(3, 0, bounds=(y_range_min, None)))
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=150)
new_range_start = float(selenium.execute_script("""alert(window.get_y_range_end())"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == y_range_min
def test_reversed_y_range_does_not_pan_below_y_max(output_file_url, selenium):
y_range_max = 4
plot = make_pan_plot_with_callback(yr=Range1d(3, 0, bounds=(None, y_range_max)))
save(plot)
selenium.get(output_file_url)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=-150)
new_range_end = float(selenium.execute_script("""alert(window.get_y_range_start())"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_end) == y_range_max
############################
# Test auto bounds
############################
def _assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium):
selenium.get(output_file_url)
# Zoom into plot so we can pan around a little
zoom_plot(selenium)
# Now the plot is zoomed in, try a little to the right
pan_plot(selenium, pan_x=-50, pan_y=0)
x_range_start = float(selenium.execute_script("""alert(window.get_x_range_start())"""))
selenium.switch_to_alert().dismiss()
assert x_range_start > 0.5
# Now try panning far to left to check bounds
pan_plot(selenium, pan_x=200, pan_y=0)
x_range_start = float(selenium.execute_script("""alert(window.get_x_range_start())"""))
selenium.switch_to_alert().dismiss()
assert x_range_start > 0.4
assert x_range_start < 0.5
def test_autorange_prevents_panning_but_can_zoom_in_with_datarange1d(output_file_url, selenium):
plot = make_pan_plot_with_callback(xr=DataRange1d(bounds='auto'), yr=DataRange1d(bounds='auto'))
save(plot)
_assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium)
def test_autorange_prevents_panning_but_can_zoom_in_with_range1d(output_file_url, selenium):
plot = make_pan_plot_with_callback(xr=Range1d(0.45, 3, bounds='auto'), yr=DataRange1d(0, 3, bounds='auto'))
save(plot)
_assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium)
############################
# Test no bounds
############################
#def _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium):
# selenium.get(output_file_url)
#
# pan_plot(selenium, pan_x=-1000, pan_y=2000)
#
# x_range_start = float(selenium.execute_script("""alert(window.get_x_range_start())"""))
# selenium.switch_to_alert().dismiss()
# assert x_range_start > 5
#
# y_range_start = float(selenium.execute_script("""alert(window.get_y_range_start())"""))
# selenium.switch_to_alert().dismiss()
# assert y_range_start > 5
#
#
#def test_no_bounds_allows_unlimited_panning_with_datarange1d(output_file_url, selenium):
# plot = make_pan_plot_with_callback(xr=DataRange1d(bounds=None), yr=DataRange1d(bounds=None))
# save(plot)
# _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium)
#
#
#def test_no_bounds_allows_unlimited_panning_with_range1d(output_file_url, selenium):
# plot = make_pan_plot_with_callback(xr=Range1d(0.45, 3, bounds=None), yr=DataRange1d(0, 3, bounds=None))
# save(plot)
# _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium)
|
|
"""
Regression tests for the Test Client, especially the customized assertions.
"""
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
import os
class AssertContainsTests(TestCase):
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertContains(response, 'never', 1)
except AssertionError, e:
self.assertEquals(str(e), "Found 0 instances of 'never' in response (expected 1)")
try:
self.assertContains(response, 'once', 0)
except AssertionError, e:
self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 0)")
try:
self.assertContains(response, 'once', 2)
except AssertionError, e:
self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 2)")
try:
self.assertContains(response, 'twice', 1)
except AssertionError, e:
self.assertEquals(str(e), "Found 2 instances of 'twice' in response (expected 1)")
try:
self.assertContains(response, 'thrice')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't find 'thrice' in response")
try:
self.assertContains(response, 'thrice', 3)
except AssertionError, e:
self.assertEquals(str(e), "Found 0 instances of 'thrice' in response (expected 3)")
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError, e:
self.assertEquals(str(e), "No templates used to render the response")
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
#
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty GET Template' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template")
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': '[email protected]',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError, e:
self.assertEquals(str(e), "Template 'form_view.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError, e:
self.assertEquals(str(e), "Template 'base.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError, e:
self.assertEquals(str(e), "Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html")
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)")
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'")
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)")
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)")
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'wrong_form' was not used to render the response")
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the field 'some_field'")
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'value' on form 'form' in context 0 contains no errors")
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])")
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )")
class FileUploadTests(TestCase):
def test_simple_upload(self):
fd = open(os.path.join(os.path.dirname(__file__), "views.py"))
post_data = {
'name': 'Ringo',
'file_field': fd,
}
response = self.client.post('/test_client_regress/file_upload/', post_data)
self.assertEqual(response.status_code, 200)
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
|
|
# -*- coding: utf-8 -*-
""" Update an Attendify speakers XLSX file with the current list of
speakers.
Usage: manage.py attendify_speakers_xlsx ep2016 speakers.xlsx
Note that for Attendify you have to download the speakers before
running this script, since they add meta data to the downloaded
file which has to be kept around when uploading it again.
The script updates speakers.xlsx in place. Unfortunately, Attendify
currently has a bug in that it doesn't accept the file format
generated by openpyxl. Opening the file in LibreOffice and saving
it (without changes) fixes this as work-around.
Attendify Worksheet "Schedule" format
-------------------------------------
Row A4: First Name, Last Name, Company (Optional), Position
(Optional), Group (Optional). Profile (Optional), Email
(Optional), Phone (Optional), Twitter (Optional), Facebook
(Optional), LinkedIn (Optional), Google+ (Optional), UID (do not
delete)
Row A6: Start of data
"""
from django.core.management.base import BaseCommand, CommandError
from django.core import urlresolvers
from django.conf import settings
from django.utils.html import strip_tags
from conference import models as cmodels
from conference import utils
from p3 import models
import datetime
from collections import defaultdict
from optparse import make_option
import operator
import markdown2
import openpyxl
### Globals
# Debug output ?
_debug = 1
# These must match the talk .type or .admin_type
from accepted_talks import TYPE_NAMES
### Helpers
def profile_url(user):
return urlresolvers.reverse('conference-profile',
args=[user.attendeeprofile.slug])
def format_text(text, remove_tags=False, output_html=True):
# Remove whitespace
text = text.strip()
if not text:
return text
# Remove links, tags, etc.
if remove_tags:
text = strip_tags(text)
# Remove quotes
if text[0] == '"' and text[-1] == '"':
text = text[1:-1]
# Convert markdown markup to HTML
if output_html:
text = markdown2.markdown(text)
return text
def add_speaker(data, speaker):
# Get speaker profile
user = speaker.user
profile = cmodels.AttendeeProfile.objects.get(user=user)
p3profile = models.P3Profile.objects.get(profile=profile)
# Skip speakers without public profile. Speaker profiles must be
# public, but you never know. See conference/models.py
if profile.visibility != 'p':
return
# Collect data
first_name = speaker.user.first_name.title()
last_name = speaker.user.last_name.title()
company = profile.company
position = profile.job_title
profile_text = (u'<a href="%s%s">Profile on EuroPython Website</a>' %
(settings.DEFAULT_URL_PREFIX, profile_url(user)))
twitter = p3profile.twitter
if twitter.startswith(('https://twitter.com/', 'http://twitter.com/')):
twitter = twitter.split('/')[-1]
# Skip special entries
full_name = first_name + last_name
if first_name == 'To Be' and last_name == 'Announced':
return
# UID
uid = u''
data.append((
first_name,
last_name,
company,
position,
u'', # group
profile_text,
u'', # email: not published
u'', # phone: not published
twitter,
u'', # facebook
u'', # linkedin
u'', # google+
uid))
# Start row of data in spreadsheet (Python 0-based index)
SPEAKERS_WS_START_DATA = 5
# Column number of UID columns (Python 0-based index)
SPEAKERS_UID_COLUMN = 12
# Number of columns to make row unique (first, last, company)
SPEAKERS_UNIQUE_COLS = 3
def update_speakers(speakers_xlsx, new_data, updated_xlsx=None):
# Load workbook
wb = openpyxl.load_workbook(speakers_xlsx)
assert wb.sheetnames == [u'Instructions', u'Speakers', u'System']
ws = wb['Speakers']
# Extract data values
ws_data = list(ws.values)[SPEAKERS_WS_START_DATA:]
print ('read %i data lines' % len(ws_data))
print ('first line: %r' % ws_data[:1])
print ('last line: %r' % ws_data[-1:])
# Reconcile UIDs / talks
uids = {}
for line in ws_data:
uid = line[SPEAKERS_UID_COLUMN]
if not uid:
continue
uids[tuple(line[:SPEAKERS_UNIQUE_COLS])] = uid
# Add UID to new data
new_speakers = []
for line in new_data:
key = tuple(line[:SPEAKERS_UNIQUE_COLS])
if key not in uids:
print ('New speaker %s found' % (key,))
uid = u''
else:
uid = uids[key]
line = tuple(line[:SPEAKERS_UID_COLUMN]) + (uid,)
new_speakers.append(line)
new_data = new_speakers
# Replace old data with new data
old_data_rows = len(ws_data)
new_data_rows = len(new_data)
print ('new data: %i data lines' % new_data_rows)
offset = SPEAKERS_WS_START_DATA + 1
print ('new_data = %i rows' % len(new_data))
for j, row in enumerate(ws[offset: offset + new_data_rows - 1]):
new_row = new_data[j]
if _debug:
print ('updating row %i with %r' % (j, new_row))
if len(row) > len(new_row):
row = row[:len(new_row)]
for i, cell in enumerate(row):
cell.value = new_row[i]
# Overwrite unused cells with None
if new_data_rows < old_data_rows:
for j, row in enumerate(ws[offset + new_data_rows + 1:
offset + old_data_rows + 1]):
if _debug:
print ('clearing row %i' % (j,))
for i, cell in enumerate(row):
cell.value = None
# Write updated data
if updated_xlsx is None:
updated_xlsx = speakers_xlsx
wb.save(updated_xlsx)
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
args = '<conference> <xlsx-file>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
try:
speakers_xlsx = args[1]
except IndexError:
raise CommandError('XLSX file not specified')
# Get speaker records
speakers = set()
talks = cmodels.Talk.objects.accepted(conference)
for t in talks:
speakers |= set(t.get_all_speakers())
# Collect profiles
data = []
for speaker in speakers:
add_speaker(data, speaker)
data.sort()
# Update spreadsheet with new data
update_speakers(speakers_xlsx, data)
|
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for MarkovChain."""
from absl.testing import parameterized
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
@test_util.test_graph_and_eager_modes
class MarkovChainTest(test_util.TestCase):
def test_error_when_transition_modifies_batch_shape(self):
loses_batch_shape = tfd.MarkovChain(
initial_state_prior=tfd.Normal(loc=0., scale=[1., 1.]),
transition_fn=lambda _, x: tfd.Independent( # pylint: disable=g-long-lambda
tfd.Normal(loc=0., scale=tf.ones_like(x)),
reinterpreted_batch_ndims=1),
num_steps=5)
x = self.evaluate(loses_batch_shape.sample([2], seed=test_util.test_seed()))
with self.assertRaisesRegexp(ValueError, 'batch shape is incorrect'):
loses_batch_shape.log_prob(x)
gains_batch_shape = tfd.MarkovChain(
initial_state_prior=tfd.Independent(
tfd.Normal(loc=0., scale=[1., 1.]),
reinterpreted_batch_ndims=1),
transition_fn=lambda _, x: tfd.Normal(loc=0., scale=tf.ones_like(x)),
num_steps=5)
x = self.evaluate(gains_batch_shape.sample([2], seed=test_util.test_seed()))
with self.assertRaisesRegexp(ValueError, 'batch shape is incorrect'):
gains_batch_shape.log_prob(x)
def test_log_prob_matches_linear_gaussian_ssm(self):
dim = 2
batch_shape = [3, 1]
seed, *model_seeds = samplers.split_seed(test_util.test_seed(), n=6)
# Sample a random linear Gaussian process.
prior_loc = self.evaluate(
tfd.Normal(0., 1.).sample(batch_shape + [dim], seed=model_seeds[0]))
prior_scale = self.evaluate(
tfd.InverseGamma(1., 1.).sample(batch_shape + [dim],
seed=model_seeds[1]))
transition_matrix = self.evaluate(
tfd.Normal(0., 1.).sample([dim, dim], seed=model_seeds[2]))
transition_bias = self.evaluate(
tfd.Normal(0., 1.).sample(batch_shape + [dim], seed=model_seeds[3]))
transition_scale_tril = self.evaluate(
tf.linalg.cholesky(
tfd.WishartTriL(df=dim, scale_tril=tf.eye(dim)).sample(
seed=model_seeds[4])))
initial_state_prior = tfd.MultivariateNormalDiag(
loc=prior_loc, scale_diag=prior_scale, name='initial_state_prior')
lgssm = tfd.LinearGaussianStateSpaceModel(
num_timesteps=7,
transition_matrix=transition_matrix,
transition_noise=tfd.MultivariateNormalTriL(
loc=transition_bias, scale_tril=transition_scale_tril),
# Trivial observation model to pass through the latent state.
observation_matrix=tf.eye(dim),
observation_noise=tfd.MultivariateNormalDiag(loc=tf.zeros(dim),
scale_diag=tf.zeros(dim)),
initial_state_prior=initial_state_prior)
markov_chain = tfd.MarkovChain(
initial_state_prior=initial_state_prior,
transition_fn=lambda _, x: tfd.MultivariateNormalTriL( # pylint: disable=g-long-lambda
loc=tf.linalg.matvec(transition_matrix, x) + transition_bias,
scale_tril=transition_scale_tril),
num_steps=7)
x = markov_chain.sample(5, seed=seed)
self.assertAllClose(lgssm.log_prob(x), markov_chain.log_prob(x), rtol=1e-5)
@test_util.numpy_disable_test_missing_functionality(
'JointDistributionNamedAutoBatched')
def test_docstring_example_autoregressive_process(self):
def transition_fn(_, previous_state):
return tfd.JointDistributionNamedAutoBatched(
# The previous state may include batch dimensions. Since the log scale
# is a scalar quantity, its shape is the batch shape.
batch_ndims=ps.rank(previous_state['log_scale']),
model={
# The autoregressive coefficients and the `log_scale` each follow
# an independent slow-moving random walk.
'coefs': tfd.Normal(loc=previous_state['coefs'], scale=0.01),
'log_scale': tfd.Normal(loc=previous_state['log_scale'],
scale=0.01),
# The level is a linear combination of the previous *two* levels,
# with additional noise of scale `exp(log_scale)`.
'level': lambda coefs, log_scale: tfd.Normal( # pylint: disable=g-long-lambda
loc=(coefs[..., 0] * previous_state['level'] +
coefs[..., 1] * previous_state['previous_level']),
scale=tf.exp(log_scale)),
# Store the previous level to access at the next step.
'previous_level': tfd.Deterministic(previous_state['level'])})
process = tfd.MarkovChain(
# For simplicity, define the prior as a 'transition' from fixed values.
initial_state_prior=transition_fn(
0, previous_state={
'coefs': [0.7, -0.2],
'log_scale': -1.,
'level': 0.,
'previous_level': 0.}),
transition_fn=transition_fn,
num_steps=100)
self.assertAllEqualNested(process.event_shape,
{'coefs': [100, 2], 'log_scale': [100],
'level': [100], 'previous_level': [100]})
self.assertAllEqual(process.batch_shape, [])
x = process.sample(5, seed=test_util.test_seed())
self.assertAllEqual(x['coefs'].shape, [5, 100, 2])
self.assertAllEqual(x['log_scale'].shape, [5, 100])
self.assertAllEqual(x['level'].shape, [5, 100])
self.assertAllEqual(x['previous_level'].shape, [5, 100])
lp = process.log_prob(x)
self.assertAllEqual(lp.shape, [5])
x2, lp2 = process.experimental_sample_and_log_prob(
2, seed=test_util.test_seed())
self.assertAllClose(lp2, process.log_prob(x2))
@parameterized.named_parameters(
('float32_dynamic', tf.float32, True),
('float64_static', tf.float64, False))
def test_docstring_example_batch_gaussian_walk(self,
float_dtype,
use_dynamic_shapes):
if tf.executing_eagerly() and use_dynamic_shapes:
self.skipTest('No dynamic shapes in eager mode.')
def _as_tensor(x, dtype=None):
x = ps.cast(x, dtype=dtype if dtype else float_dtype)
if use_dynamic_shapes:
x = tf1.placeholder_with_default(x, shape=None)
return x
scales = _as_tensor([0.5, 0.3, 0.2, 0.2, 0.3, 0.2, 0.7])
batch_gaussian_walk = tfd.MarkovChain(
# The prior distribution determines the batch shape for the chain.
# Transitions must respect this batch shape.
initial_state_prior=tfd.Normal(loc=_as_tensor([-10., 0., 10.]),
scale=_as_tensor([1., 1., 1.])),
transition_fn=lambda t, x: tfd.Normal( # pylint: disable=g-long-lambda
loc=x,
# The `num_steps` dimension will always be leftmost in `x`, so we
# pad the scale to the same rank as `x` so that the shapes line up.
scale=tf.reshape(
tf.gather(scales, t),
ps.concat([[-1],
ps.ones(ps.rank(x) - 1, dtype=tf.int32)], axis=0))),
# Limit to eight steps since we only specified scales for seven
# transitions.
num_steps=8)
self.assertAllEqual(batch_gaussian_walk.event_shape_tensor(), [8])
self.assertAllEqual(batch_gaussian_walk.batch_shape_tensor(), [3])
x = batch_gaussian_walk.sample(5, seed=test_util.test_seed())
self.assertAllEqual(ps.shape(x), [5, 3, 8])
lp = batch_gaussian_walk.log_prob(x)
self.assertAllEqual(ps.shape(lp), [5, 3])
x2, lp2 = batch_gaussian_walk.experimental_sample_and_log_prob(
[2], seed=test_util.test_seed())
self.assertAllClose(lp2, batch_gaussian_walk.log_prob(x2))
def test_docstring_example_gaussian_walk(self):
gaussian_walk = tfd.MarkovChain(
initial_state_prior=tfd.Normal(loc=0., scale=1.),
transition_fn=lambda _, x: tfd.Normal(loc=x, scale=1.),
num_steps=100)
self.assertAllEqual(gaussian_walk.event_shape, [100])
self.assertAllEqual(gaussian_walk.batch_shape, [])
x = gaussian_walk.sample(5, seed=test_util.test_seed())
self.assertAllEqual(x.shape, [5, 100])
lp = gaussian_walk.log_prob(x)
self.assertAllEqual(lp.shape, [5])
n = tfd.Normal(0., 1.)
expected_lp = (n.log_prob(x[:, 0]) +
tf.reduce_sum(n.log_prob(x[:, 1:] - x[:, :-1]), axis=-1))
self.assertAllClose(lp, expected_lp)
x2, lp2 = gaussian_walk.experimental_sample_and_log_prob(
[2], seed=test_util.test_seed())
self.assertAllClose(lp2, gaussian_walk.log_prob(x2))
def test_non_autobatched_joint_distribution(self):
def transition_fn(_, previous_state):
return tfd.JointDistributionNamed(
{
# The autoregressive coefficients and the `log_scale` each follow
# an independent slow-moving random walk.
'coefs': tfd.Independent(
tfd.Normal(loc=previous_state['coefs'], scale=0.01),
reinterpreted_batch_ndims=1),
'log_scale': tfd.Normal(loc=previous_state['log_scale'],
scale=0.01),
# The level is a linear combination of the previous *two* levels,
# with additional noise of scale `exp(log_scale)`.
'level': lambda coefs, log_scale: tfd.Normal( # pylint: disable=g-long-lambda
loc=(coefs[..., 0] * previous_state['level'] +
coefs[..., 1] * previous_state['previous_level']),
scale=tf.exp(log_scale)),
# Store the previous level to access at the next step.
'previous_level': tfd.Deterministic(previous_state['level'])})
process = tfd.MarkovChain(
# For simplicity, define the prior as a 'transition' from fixed values.
initial_state_prior=transition_fn(
0, previous_state={
'coefs': [0.7, -0.2],
'log_scale': -1.,
'level': 0.,
'previous_level': 0.}),
transition_fn=transition_fn,
num_steps=100)
self.assertAllEqualNested(process.event_shape,
{'coefs': [100, 2], 'log_scale': [100],
'level': [100], 'previous_level': [100]})
self.assertAllEqual(process.batch_shape,
{'coefs': [], 'log_scale': [],
'level': [], 'previous_level': []})
x = process.sample(5, seed=test_util.test_seed())
self.assertAllEqual(x['coefs'].shape, [5, 100, 2])
self.assertAllEqual(x['log_scale'].shape, [5, 100])
self.assertAllEqual(x['level'].shape, [5, 100])
self.assertAllEqual(x['previous_level'].shape, [5, 100])
lp = process.log_prob(x)
self.assertAllEqual(lp.shape, [5])
x2, lp2 = process.experimental_sample_and_log_prob(
2, seed=test_util.test_seed())
self.assertAllClose(lp2, process.log_prob(x2))
def test_log_prob_ratio(self):
p = tfd.MarkovChain(
initial_state_prior=tfd.Normal(0., 1.),
transition_fn=lambda _, x: tfd.Normal(x, tf.nn.softplus(x)),
num_steps=10)
q = tfd.MarkovChain(
initial_state_prior=tfd.Normal(-10, 3.),
transition_fn=lambda _, x: tfd.Normal(x, tf.abs(x)),
num_steps=10)
x = self.evaluate(p.sample(4, seed=test_util.test_seed()))
y = self.evaluate(q.sample(4, seed=test_util.test_seed()))
self.assertAllClose(
p.log_prob(x) - q.log_prob(y),
log_prob_ratio.log_prob_ratio(p, x, q, y), atol=1e-5)
def test_unexpected_num_steps_raises(self):
p = tfd.MarkovChain(
initial_state_prior=tfd.Normal(0., 1.),
transition_fn=lambda _, x: tfd.Normal(x, tf.nn.softplus(x)),
num_steps=10,
validate_args=True)
with self.assertRaisesRegex(
(ValueError, tf.errors.InvalidArgumentError),
'does not match the expected num_steps'):
p.log_prob(tf.zeros([11]))
@test_util.test_graph_and_eager_modes
class MarkovChainBijectorTest(test_util.TestCase):
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
dict(testcase_name='deterministic_prior',
prior_fn=lambda: tfd.Deterministic([-100., 0., 100.]),
transition_fn=lambda _, x: tfd.Normal(loc=x, scale=1.)),
dict(testcase_name='deterministic_transition',
prior_fn=lambda: tfd.Normal(loc=[-100., 0., 100.], scale=1.),
transition_fn=lambda _, x: tfd.Deterministic(x)),
dict(testcase_name='fully_deterministic',
prior_fn=lambda: tfd.Deterministic([-100., 0., 100.]),
transition_fn=lambda _, x: tfd.Deterministic(x)),
dict(testcase_name='mvn_diag',
prior_fn=(
lambda: tfd.MultivariateNormalDiag(loc=[[2.], [2.]],
scale_diag=[1.])),
transition_fn=lambda _, x: tfd.VectorDeterministic(x)),
dict(testcase_name='docstring_dirichlet',
prior_fn=lambda: tfd.JointDistributionNamedAutoBatched(
{'probs': tfd.Dirichlet([1., 1.])}),
transition_fn=lambda _, x: tfd.JointDistributionNamedAutoBatched(
{'probs': tfd.MultivariateNormalDiag(loc=x['probs'],
scale_diag=[0.1, 0.1])},
batch_ndims=ps.rank(x['probs']))),
dict(testcase_name='uniform_step',
prior_fn=lambda: tfd.Exponential(tf.ones([4, 1])),
transition_fn=lambda _, x: tfd.Uniform(low=x, high=x + 1.)),
dict(testcase_name='joint_distribution',
prior_fn=lambda: tfd.JointDistributionNamedAutoBatched(
batch_ndims=2,
model={
'a': tfd.Gamma(tf.zeros([5]), 1.),
'b': lambda a: (
tfb.Reshape(
event_shape_in=[4, 3],
event_shape_out=[2, 3, 2])(
tfd.Independent(
tfd.Normal(
loc=tf.zeros([5, 4, 3]),
scale=a[..., tf.newaxis, tf.newaxis]),
reinterpreted_batch_ndims=2)))}),
transition_fn=lambda _, x: tfd.JointDistributionNamedAutoBatched(
batch_ndims=ps.rank_from_shape(x['a'].shape),
model={'a': tfd.Normal(loc=x['a'], scale=1.),
'b': lambda a: tfd.Deterministic(
x['b'] + a[..., tf.newaxis, tf.newaxis, tf.newaxis])})
),
dict(testcase_name='nested_chain',
prior_fn=lambda: tfd.MarkovChain(
initial_state_prior=tfb.Split(2)(
tfd.MultivariateNormalDiag(0., [1., 2.])),
transition_fn=lambda _, x: tfb.Split(2)(
tfd.MultivariateNormalDiag(x[0], [1., 2.])),
num_steps=6),
transition_fn=(
lambda _, x: tfd.JointDistributionSequentialAutoBatched(
[
tfd.MultivariateNormalDiag(x[0], [1.]),
tfd.MultivariateNormalDiag(x[1], [1.])],
batch_ndims=ps.rank(x[0])))))
# pylint: enable=g-long-lambda
def test_default_bijector(self, prior_fn, transition_fn):
chain = tfd.MarkovChain(initial_state_prior=prior_fn(),
transition_fn=transition_fn,
num_steps=7)
y = self.evaluate(chain.sample(seed=test_util.test_seed()))
bijector = chain.experimental_default_event_space_bijector()
self.assertAllEqual(chain.batch_shape_tensor(),
bijector.experimental_batch_shape_tensor())
x = bijector.inverse(y)
yy = bijector.forward(
tf.nest.map_structure(tf.identity, x)) # Bypass bijector cache.
self.assertAllCloseNested(y, yy)
chain_event_ndims = tf.nest.map_structure(
ps.rank_from_shape, chain.event_shape_tensor())
self.assertAllEqualNested(bijector.inverse_min_event_ndims,
chain_event_ndims)
ildj = bijector.inverse_log_det_jacobian(
tf.nest.map_structure(tf.identity, y), # Bypass bijector cache.
event_ndims=chain_event_ndims)
if not bijector.is_constant_jacobian:
self.assertAllEqual(ildj.shape, chain.batch_shape)
fldj = bijector.forward_log_det_jacobian(
tf.nest.map_structure(tf.identity, x), # Bypass bijector cache.
event_ndims=bijector.inverse_event_ndims(chain_event_ndims))
self.assertAllClose(ildj, -fldj)
# Verify that event shapes are passed through and flattened/unflattened
# correctly.
inverse_event_shapes = bijector.inverse_event_shape(chain.event_shape)
x_event_shapes = tf.nest.map_structure(
lambda t, nd: t.shape[ps.rank(t) - nd:],
x, bijector.forward_min_event_ndims)
self.assertAllEqualNested(inverse_event_shapes, x_event_shapes)
forward_event_shapes = bijector.forward_event_shape(inverse_event_shapes)
self.assertAllEqualNested(forward_event_shapes, chain.event_shape)
# Verify that the outputs of other methods have the correct structure.
inverse_event_shape_tensors = bijector.inverse_event_shape_tensor(
chain.event_shape_tensor())
self.assertAllEqualNested(inverse_event_shape_tensors, x_event_shapes)
forward_event_shape_tensors = bijector.forward_event_shape_tensor(
inverse_event_shape_tensors)
self.assertAllEqualNested(forward_event_shape_tensors,
chain.event_shape_tensor())
if __name__ == '__main__':
test_util.main()
|
|
from __future__ import absolute_import
import os
import logging
import tarfile
import hashlib
from collections import namedtuple
from urlparse import urlparse
import toml
import json
import zinc.helpers as helpers
import zinc.utils as utils
from .catalog import ZincCatalog
from .defaults import defaults
from .coordinators import coordinator_for_url
from .formats import Formats
from .models import ZincModel, ZincIndex, ZincCatalogConfig
from .storages import storage_for_url
from .tasks.bundle_update import ZincBundleUpdateTask
from .utils import enum, memoized
log = logging.getLogger(__name__)
SymbolicBundleVersions = utils.enum(
ALL=':all',
UNREFERENCED=':unreferenced',
LATEST=':latest')
# TODO: why doesn't this work?
#SymbolicSingleBundleVersions = utils.enum(
# LATEST=SymbolicBundleVersions.LATEST)
SymbolicSingleBundleVersions = utils.enum(
LATEST=':latest')
BundleVersionDistroPrefix = '@'
class ZincClientConfig(ZincModel):
VARS = 'vars'
ENV = 'env'
def __init__(self, d=None, **kwargs):
super(ZincClientConfig, self).__init__(**kwargs)
self._d = d
@classmethod
def from_bytes(cls, b, mutable=True):
d = toml.loads(b)
return cls.from_dict(d, mutable=mutable)
@classmethod
def from_dict(cls, d, mutable=True):
replaced = cls._replace_vars(d, d.get(cls.VARS) or dict())
zincConfig = cls(replaced, mutable=mutable)
return zincConfig
@classmethod
def _replace_vars(cls, indict, vars):
# TODO: this could probably be a filter or something
outdict = dict()
for key, value in indict.iteritems():
if isinstance(value, dict):
outdict[key] = cls._replace_vars(value, vars)
elif isinstance(value, basestring) \
and (value.startswith(cls.VARS + ':')
or value.startswith(cls.ENV + ':')):
if value.startswith(cls.VARS + ':'):
varname = value[len(cls.VARS) + 1:]
var = vars[varname]
elif value.startswith(cls.ENV + ':'):
varname = value[len(cls.ENV) + 1:]
var = os.environ[varname]
outdict[key] = var
else:
outdict[key] = value
return outdict
@property
def vars(self):
return self._d.get('vars')
@property
def bookmarks(self):
return self._d.get('bookmark')
@property
def coordinators(self):
return self._d.get('coordinator')
@property
def storages(self):
return self._d.get('storage')
################################################################################
OutputType = enum(PRETTY='pretty', JSON='json')
class _Result(object):
def __init__(self, pretty=None):
self._pretty = pretty or str
def to_dict(self):
raise NotImplementedError()
def format(self, fmt):
if fmt == OutputType.JSON:
return json.dumps(self.to_dict())
elif fmt == OutputType.PRETTY:
return self._pretty(self)
else:
raise NotImplementedError()
class DictResult(_Result):
def __init__(self, d, **kwargs):
super(DictResult, self).__init__(**kwargs)
self._dict = d
def to_dict(self):
return self._dict
def __str__(self):
return str(self._dict)
def __getitem__(self, k):
return self._dict[k]
MessageTypes = enum(
INFO='info',
WARNING='warning',
ERROR='error')
class Message(_Result):
def __init__(self, type, text, **kwargs):
super(Message, self).__init__(**kwargs)
self._type = type
self._text = text
@classmethod
def info(cls, s):
return cls(MessageTypes.INFO, s)
@classmethod
def warn(cls, s):
return cls(MessageTypes.WARNING, s)
@classmethod
def error(cls, s):
return cls(MessageTypes.ERROR, s)
@property
def text(self):
return self._text
@property
def type(self):
return self._type
@type.setter
def type(self, val):
assert val in MessageTypes
self._type = val
def to_dict(self):
return {
'message': {
'type': self.type,
'text': self.text,
}
}
def __str__(self):
return '[%s] %s' % (self._type, self._text)
class ResultSet(object):
def __init__(self, items, pretty=None):
self._items = items
self.pretty = pretty or str
@property
@memoized
def items(self):
return self._items()
def __iter__(self):
return iter(self.items)
def __str__(self):
return str(self.items)
def errors(self):
return [i for i in self.items if isinstance(i, Message) and i.type == MessageTypes.ERROR]
# TODO: reimplement
#def dump(self, fmt):
# if fmt == OutputType.JSON:
# return json.dumps(list(self.items))
# elif fmt == OutputType.PRETTY:
# return string.join([self.pretty(x) for x in self], '\n')
# else:
# raise NotImplementedError()
################################################################################
def catalog_list(catalog, distro=None, print_versions=True, **kwargs):
index = catalog.get_index()
def pretty_without_versions(result):
return "%s" % (result['bundle_name'])
def pretty_with_versions(result):
distros = index.distributions_for_bundle_by_version(result['bundle_name'])
versions = index.versions_for_bundle(result['bundle_name'])
version_strings = list()
for version in versions:
version_string = str(version)
if distros.get(version) is not None:
distro_string = "(%s)" % (", ".join(sorted(distros.get(version))))
version_string += '=' + distro_string
version_strings.append(version_string)
final_version_string = "[%s]" % (", ".join(version_strings))
return "%s %s" % (result['bundle_name'], final_version_string)
pretty = pretty_with_versions if print_versions else pretty_without_versions
def results():
bundle_names = sorted(index.bundle_names())
for bundle_name in bundle_names:
d = dict()
if distro and distro not in index.distributions_for_bundle(bundle_name):
continue
d['bundle_name'] = bundle_name
d['versions'] = index.versions_for_bundle(bundle_name)
d['distros'] = index.distributions_for_bundle(bundle_name)
yield DictResult(d, pretty=pretty)
return ResultSet(results)
def bundle_list(catalog, bundle_name, version_ish, print_sha=False, flavor_name=None):
version = _resolve_single_bundle_version(catalog, bundle_name, version_ish)
manifest = catalog.manifest_for_bundle(bundle_name, version=version)
def pretty(r):
if print_sha:
return "%s sha=%s" % (r['file'], r['sha'])
else:
return "%s" % (r['file'])
def results():
all_files = sorted(manifest.get_all_files(flavor=flavor_name))
for f in all_files:
d = {
'file': f,
'sha': manifest.sha_for_file(f)
}
yield DictResult(d, pretty=pretty)
return ResultSet(results)
def bundle_verify(catalog, bundle_name, version_ish, check_shas=True,
should_lock=False, **kwargs):
version = _resolve_single_bundle_version(catalog, bundle_name, version_ish)
manifest = catalog.get_manifest(bundle_name, version)
def results():
for result in _verify_bundle_with_manifest(catalog, manifest,
check_shas=check_shas,
should_lock=should_lock,
**kwargs):
yield result
return ResultSet(results)
def verify_catalog(catalog, should_lock=False, **kwargs):
index = catalog.get_index()
manifests = list()
ph = catalog.path_helper
def results():
# TODO: fix private ref to _bundle_info_by_name
for (bundle_name, bundle_info) in index._bundle_info_by_name.iteritems():
for version in bundle_info['versions']:
manifest_name = ph.manifest_name(bundle_name, version)
yield Message.info("Loading %s" % (manifest_name))
manifest = catalog.get_manifest(bundle_name, version)
if manifest is None:
yield Message.error("manifest not found: %s" % (manifest_name))
continue
manifests.append(manifest)
verified_files = set()
for manifest in manifests:
yield Message.info("Verifying %s-%d" % (manifest.bundle_name,
manifest.version))
for result in _verify_bundle_with_manifest(catalog, manifest,
verified_files=verified_files):
yield result
return ResultSet(results)
def create_bundle_version(catalog, bundle_name, src_dir, flavor_spec=None,
force=False, skip_master_archive=False):
task = ZincBundleUpdateTask()
task.catalog = catalog
task.bundle_name = bundle_name
task.src_dir = src_dir
task.flavor_spec = flavor_spec
task.skip_master_archive = skip_master_archive
task.force = force
return task.run()
def delete_bundle_versions(catalog, bundle_name, version_ish):
version_list = _resolve_multiple_bundle_versions(catalog, bundle_name, version_ish)
with catalog.lock():
for version in version_list:
catalog.delete_bundle_version(bundle_name, version)
def update_distribution(catalog, distro_name, bundle_name, version,
save_previous=True):
catalog.update_distribution(distro_name, bundle_name, version,
save_previous=save_previous)
def delete_distribution(catalog, distribution_name, bundle_name,
delete_previous=True):
catalog.delete_distribution(distribution_name, bundle_name,
delete_previous=delete_previous)
def clone_bundle(catalog, bundle_name, version, root_path=None, bundle_dir_name=None, flavor=None):
assert catalog
assert bundle_name
assert version
if root_path is None:
root_path = '.'
if bundle_dir_name is None:
bundle_id = helpers.make_bundle_id(catalog.id, bundle_name)
bundle_dir_name = helpers.make_bundle_descriptor(bundle_id, version,
flavor=flavor)
manifest = catalog.manifest_for_bundle(bundle_name, version)
if manifest is None:
raise Exception("manifest not found: %s-%d" % (bundle_name, version))
if flavor is not None and flavor not in manifest.flavors:
raise Exception("manifest does not contain flavor '%s'" % (flavor))
all_files = manifest.get_all_files(flavor=flavor)
root_dir = os.path.join(root_path, bundle_dir_name)
utils.makedirs(root_dir)
for file in all_files:
dst_path = os.path.join(root_dir, file)
formats = manifest.formats_for_file(file)
sha = manifest.sha_for_file(file)
utils.makedirs(os.path.dirname(dst_path))
if formats.get(Formats.RAW) is not None:
format = Formats.RAW
elif formats.get(Formats.GZ) is not None:
format = Formats.GZ
else:
format = None
ext = helpers.file_extension_for_format(format)
with catalog._read_file(sha, ext=ext) as infile:
b = infile.read()
if format == Formats.GZ:
b = utils.gunzip_bytes(b)
with open(dst_path, 'w+b') as outfile:
outfile.write(b)
log.info("Exported %s --> %s" % (sha, dst_path))
log.info("Exported %d files to '%s'" % (len(all_files), root_dir))
################################################################################
def _catalog_connection_get_api_version(url):
import requests
ZINC_VERSION_HEADER = 'x-zinc-api-version'
resp = requests.head(url, allow_redirects=False)
# TODO is preventing redirects what we want?
api_version = resp.headers.get(ZINC_VERSION_HEADER)
if api_version is None:
raise Exception("Unknown Zinc API - '%s' header not found" %
(ZINC_VERSION_HEADER))
return api_version
def _catalog_connection_get_http(url):
ZINC_SUPPORTED_API_VERSIONS = ('1.0')
api_version = _catalog_connection_get_api_version(url)
if api_version not in ZINC_SUPPORTED_API_VERSIONS:
raise Exception("Unsupported Zinc API version '%s'" % (api_version))
else:
log.debug("Found Zinc API %s" % (api_version))
def catalog_ref_split(catalog_ref):
CatalogRefSplitResult = namedtuple('CatalogRefSplitResult',
'service catalog')
CatalogInfo = namedtuple('CatalogInfo', 'id loc')
urlcomps = urlparse(catalog_ref)
if urlcomps.scheme in ('http', 'https'):
catalog_id = os.path.split(urlcomps.path)[-1]
service = catalog_ref[:-len(catalog_id)]
return CatalogRefSplitResult(service, CatalogInfo(catalog_id, None))
elif urlcomps.scheme in ('file', ''):
return CatalogRefSplitResult(catalog_ref, CatalogInfo(None, '.'))
## TODO: fix cloning between this and zinc.services.simple
def create_catalog(catalog_id=None, storage_info=None):
assert catalog_id
assert storage_info
storage_class = storage_for_url(storage_info['url'])
storage = storage_class(**storage_info)
catalog_storage = storage.bind_to_catalog(id=catalog_id)
catalog_storage.puts(defaults['catalog_config_name'],
ZincCatalogConfig().to_bytes())
catalog_storage.puts(defaults['catalog_index_name'],
ZincIndex(catalog_id).to_bytes())
catalog = ZincCatalog(storage=catalog_storage)
catalog.save()
def get_service(service_url=None, coordinator_info=None, storage_info=None, **kwargs):
if service_url is not None:
urlcomps = urlparse(service_url)
if urlcomps.scheme in ('http', 'https'):
_catalog_connection_get_http(service_url)
from zinc.services.web import WebServiceConsumer
return WebServiceConsumer(service_url)
elif urlcomps.scheme in ('file', ''):
if urlcomps.scheme == '':
# assume it's a path and convert a file URL
file_url = 'file://%s' % (utils.canonical_path(service_url))
else:
file_url = service_url
from zinc.services.simple import SimpleServiceConsumer
return SimpleServiceConsumer(file_url)
elif coordinator_info is not None and storage_info is not None:
coord_class = coordinator_for_url(coordinator_info['url'])
coord = coord_class(**coordinator_info)
storage_class = storage_for_url(storage_info['url'])
storage = storage_class(**storage_info)
from zinc.services import CustomServiceConsumer
return CustomServiceConsumer(coordinator=coord, storage=storage)
raise NotImplementedError()
def connect(service_url=None, coordinator_info=None, storage_info=None, **kwargs):
return get_service(service_url=service_url,
coordinator_info=coordinator_info,
storage_info=storage_info, **kwargs)
################################################################################
## Verification Helpers
def _verify_bundle_with_manifest(catalog, manifest, check_shas=True,
should_lock=False, **kwargs):
if not check_shas:
yield Message.warn('Skipping SHA digest verification for bundle files.')
## Check individual files
for path, info in manifest.files.iteritems():
sha = manifest.sha_for_file(path)
## Note: it's important to used the reference in kwargs directly
## or else changes won't be propagated to the calling code
if kwargs.get('verified_files') is not None:
if sha in kwargs['verified_files']:
log.debug('Skipping %s' % (sha))
continue
else:
kwargs['verified_files'].add(sha)
meta = catalog._get_file_info(sha)
if meta is None:
#run.append(VerificationError('File %s not exist' % (sha)))
yield Message.error('File %s not exist' % (sha))
continue
format = meta['format']
meta_size = meta['size']
manifest_size = info['formats'][format]['size']
log.debug("file=%s format=%s meta_size=%s manifest_size=%s" % (sha, format, meta_size, manifest_size))
if meta_size != manifest_size:
yield Message.error('File %s wrong size' % (sha))
continue
if check_shas:
ext = helpers.file_extension_for_format(format)
with catalog._read_file(sha, ext=ext) as f:
b = f.read()
if format == Formats.GZ:
b = utils.gunzip_bytes(b)
digest = hashlib.sha1(b).hexdigest()
if digest != sha:
yield Message.error('File %s wrong hash' % (sha))
continue
yield Message.info('File %s OK' % (sha))
## Check archives
flavors = list(manifest.flavors)
flavors.append(None) # no flavor
for flavor in flavors:
archive_name = catalog.path_helper.archive_name(manifest.bundle_name, manifest.version, flavor=flavor)
# TODO: private reference to _get_archive_info
meta = catalog._get_archive_info(manifest.bundle_name, manifest.version, flavor=flavor)
if meta is None:
if len(manifest.get_all_files(flavor=flavor)) == 1:
# If there is only 1 file in the bundle there should not be
# an archive
continue
elif flavor is None and len(flavors) > 1:
# If there is more than 1 flavor, we usually don't need the
# master archive. This is probably OK, but warn anyway.
#log.warn('Archive %s not found.' % (archive_name))
Message.warn('Archive %s not found.' % (archive_name))
continue
else:
yield Message.error('Archive %s not found.' % (archive_name))
for result in _verify_archive(catalog, manifest, flavor=flavor, check_shas=check_shas):
yield result
def _verify_archive(catalog, manifest, flavor=None, check_shas=True):
if not check_shas:
yield Message.warn('Skipping SHA digest verification for archive members.')
archive_name = catalog.path_helper.archive_name(manifest.bundle_name, manifest.version, flavor=flavor)
all_files = manifest.get_all_files(flavor=flavor)
with catalog._read_archive(manifest.bundle_name, manifest.version, flavor=flavor) as fileobj:
tar = tarfile.open(fileobj=fileobj)
# Note: getmembers and getnames return objects in the same order
members = tar.getmembers()
member_names = tar.getnames()
found_error = False
for file in all_files:
sha = manifest.sha_for_file(file)
format, info = manifest.get_format_info_for_file(file, preferred_formats=defaults['catalog_preferred_formats'])
target_member_name = helpers.append_file_extension_for_format(sha, format)
if target_member_name not in member_names:
found_error = True
yield Message.error('File \'%s\' not found in %s.' % (target_member_name, helpers.make_bundle_descriptor(manifest.bundle_name, manifest.version, flavor=flavor)))
else:
member = members[member_names.index(target_member_name)]
if check_shas:
f = tar.extractfile(member)
b = f.read()
f.close()
if format == Formats.GZ:
b = utils.gunzip_bytes(b)
digest = hashlib.sha1(b).hexdigest()
if digest != sha:
found_error = True
yield Message.error('File \'%s\' digest does not match: %s.' % (target_member_name, digest))
else:
# check length only
if info['size'] != member.size:
found_error = True
yield Message.error('File \'%s\' has size %d, expected %d.' % (target_member_name, info['size'], member.size))
tar.close()
if not found_error:
yield Message.info('Archive %s OK' % (archive_name))
def _resolve_single_bundle_version(catalog, bundle_name, version_ish):
if isinstance(version_ish, int):
version = version_ish
elif version_ish == SymbolicBundleVersions.LATEST:
index = catalog.get_index()
version = index.versions_for_bundle(bundle_name)[-1]
elif version_ish.startswith('@'):
source_distro = version_ish[1:]
version = catalog.index.version_for_bundle(bundle_name, source_distro)
return version
################################################################################
## Version Resolution Helpers
def _resolve_multiple_bundle_versions(catalog, bundle_name, version_ish):
if version_ish == SymbolicBundleVersions.ALL:
index = catalog.get_index()
versions = index.versions_for_bundle(bundle_name)
elif version_ish == SymbolicBundleVersions.UNREFERENCED:
index = catalog.get_index()
all_versions = index.versions_for_bundle(bundle_name)
referenced_versions = catalog.index.distributions_for_bundle_by_version(bundle_name).keys()
versions = [v for v in all_versions if v not in referenced_versions]
single_version = _resolve_single_bundle_version(catalog, bundle_name, version_ish)
if single_version is not None:
versions = [single_version]
return versions
|
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import platform
import socket
import sys
from oslo.config import cfg
from nova.compute import flavors
import nova.context
import nova.db
from nova import exception
from nova.image import glance
from nova.network import minidns
from nova.network import model as network_model
from nova.objects import instance as instance_obj
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
def get_test_admin_context():
return nova.context.get_admin_context()
def get_test_image_info(context, instance_ref):
if not context:
context = get_test_admin_context()
image_ref = instance_ref['image_ref']
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
def get_test_flavor(context=None, options=None):
options = options or {}
if not context:
context = get_test_admin_context()
test_flavor = {'name': 'kinda.big',
'flavorid': 'someid',
'memory_mb': 2048,
'vcpus': 4,
'root_gb': 40,
'ephemeral_gb': 80,
'swap': 1024}
test_flavor.update(options)
try:
flavor_ref = nova.db.flavor_create(context, test_flavor)
except (exception.FlavorExists, exception.FlavorIdExists):
flavor_ref = nova.db.flavor_get_by_name(context, 'kinda.big')
return flavor_ref
def get_test_instance(context=None, flavor=None, obj=False):
if not context:
context = get_test_admin_context()
if not flavor:
flavor = get_test_flavor(context)
metadata = {}
flavors.save_flavor_info(metadata, flavor, '')
test_instance = {'memory_kb': '2048000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 4,
'root_gb': 40,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'instance_type_id': '5',
'system_metadata': metadata,
'extra_specs': {},
'user_id': context.user_id,
'project_id': context.project_id,
}
if obj:
instance = instance_obj.Instance(context, **test_instance)
instance.create()
else:
instance = nova.db.instance_create(context, test_instance)
return instance
def get_test_network_info(count=1):
ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0'
fake_netmask = '255.255.255.255'
fake_vlan = 100
fake_bridge_interface = 'eth0'
def current():
subnet_4 = network_model.Subnet(cidr=fake_ip,
dns=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
dhcp_server=fake_ip)
subnet_6 = network_model.Subnet(cidr=fake_ip,
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
version=6)
subnets = [subnet_4]
if ipv6:
subnets.append(subnet_6)
network = network_model.Network(id=None,
bridge=fake,
label=None,
subnets=subnets,
vlan=fake_vlan,
bridge_interface=fake_bridge_interface,
injected=False)
vif = network_model.VIF(id='vif-xxx-yyy-zzz',
address=fake,
network=network,
type=network_model.VIF_TYPE_BRIDGE,
devname=None,
ovs_interfaceid=None)
return vif
return network_model.NetworkInfo([current() for x in xrange(0, count)])
def is_osx():
return platform.mac_ver()[0] != ''
test_dns_managers = []
def dns_manager():
global test_dns_managers
manager = minidns.MiniDNS()
test_dns_managers.append(manager)
return manager
def cleanup_dns_managers():
global test_dns_managers
for manager in test_dns_managers:
manager.delete_dns_file()
test_dns_managers = []
def killer_xml_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
def is_ipv6_supported():
has_ipv6_support = socket.has_ipv6
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.close()
except socket.error as e:
if e.errno == errno.EAFNOSUPPORT:
has_ipv6_support = False
else:
raise
# check if there is at least one interface with ipv6
if has_ipv6_support and sys.platform.startswith('linux'):
try:
with open('/proc/net/if_inet6') as f:
if not f.read():
has_ipv6_support = False
except IOError:
has_ipv6_support = False
return has_ipv6_support
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, with_statement
from tornado.httputil import url_concat, parse_multipart_form_data, HTTPHeaders
from tornado.escape import utf8
from tornado.testing import LogTrapTestCase
from tornado.util import b
import logging
import unittest
class TestUrlConcat(unittest.TestCase):
def test_url_concat_no_query_params(self):
url = url_concat(
"https://localhost/path",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=y&z=z")
def test_url_concat_encode_args(self):
url = url_concat(
"https://localhost/path",
[('y', '/y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=%2Fy&z=z")
def test_url_concat_trailing_q(self):
url = url_concat(
"https://localhost/path?",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?y=y&z=z")
def test_url_concat_q_with_no_trailing_amp(self):
url = url_concat(
"https://localhost/path?x",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?x&y=y&z=z")
def test_url_concat_trailing_amp(self):
url = url_concat(
"https://localhost/path?x&",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?x&y=y&z=z")
def test_url_concat_mult_params(self):
url = url_concat(
"https://localhost/path?a=1&b=2",
[('y', 'y'), ('z', 'z')],
)
self.assertEqual(url, "https://localhost/path?a=1&b=2&y=y&z=z")
def test_url_concat_no_params(self):
url = url_concat(
"https://localhost/path?r=1&t=2",
[],
)
self.assertEqual(url, "https://localhost/path?r=1&t=2")
class MultipartFormDataTest(LogTrapTestCase):
def test_file_upload(self):
data = b("""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--""").replace(b("\n"), b("\r\n"))
args = {}
files = {}
parse_multipart_form_data(b("1234"), data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b("Foo"))
def test_unquoted_names(self):
# quotes are optional unless special characters are present
data = b("""\
--1234
Content-Disposition: form-data; name=files; filename=ab.txt
Foo
--1234--""").replace(b("\n"), b("\r\n"))
args = {}
files = {}
parse_multipart_form_data(b("1234"), data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b("Foo"))
def test_special_filenames(self):
filenames = ['a;b.txt',
'a"b.txt',
'a";b.txt',
'a;"b.txt',
'a";";.txt',
'a\\"b.txt',
'a\\b.txt',
]
for filename in filenames:
logging.info("trying filename %r", filename)
data = """\
--1234
Content-Disposition: form-data; name="files"; filename="%s"
Foo
--1234--""" % filename.replace('\\', '\\\\').replace('"', '\\"')
data = utf8(data.replace("\n", "\r\n"))
args = {}
files = {}
parse_multipart_form_data(b("1234"), data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], filename)
self.assertEqual(file["body"], b("Foo"))
def test_boundary_starts_and_ends_with_quotes(self):
data = b('''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--''').replace(b("\n"), b("\r\n"))
args = {}
files = {}
parse_multipart_form_data(b('"1234"'), data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b("Foo"))
def test_missing_headers(self):
data = b('''\
--1234
Foo
--1234--''').replace(b("\n"), b("\r\n"))
args = {}
files = {}
parse_multipart_form_data(b("1234"), data, args, files)
self.assertEqual(files, {})
def test_invalid_content_disposition(self):
data = b('''\
--1234
Content-Disposition: invalid; name="files"; filename="ab.txt"
Foo
--1234--''').replace(b("\n"), b("\r\n"))
args = {}
files = {}
parse_multipart_form_data(b("1234"), data, args, files)
self.assertEqual(files, {})
def test_line_does_not_end_with_correct_line_break(self):
data = b('''\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo--1234--''').replace(b("\n"), b("\r\n"))
args = {}
files = {}
parse_multipart_form_data(b("1234"), data, args, files)
self.assertEqual(files, {})
def test_content_disposition_header_without_name_parameter(self):
data = b("""\
--1234
Content-Disposition: form-data; filename="ab.txt"
Foo
--1234--""").replace(b("\n"), b("\r\n"))
args = {}
files = {}
parse_multipart_form_data(b("1234"), data, args, files)
self.assertEqual(files, {})
def test_data_after_final_boundary(self):
# The spec requires that data after the final boundary be ignored.
# http://www.w3.org/Protocols/rfc1341/7_2_Multipart.html
# In practice, some libraries include an extra CRLF after the boundary.
data = b("""\
--1234
Content-Disposition: form-data; name="files"; filename="ab.txt"
Foo
--1234--
""").replace(b("\n"), b("\r\n"))
args = {}
files = {}
parse_multipart_form_data(b("1234"), data, args, files)
file = files["files"][0]
self.assertEqual(file["filename"], "ab.txt")
self.assertEqual(file["body"], b("Foo"))
class HTTPHeadersTest(unittest.TestCase):
def test_multi_line(self):
# Lines beginning with whitespace are appended to the previous line
# with any leading whitespace replaced by a single space.
# Note that while multi-line headers are a part of the HTTP spec,
# their use is strongly discouraged.
data = """\
Foo: bar
baz
Asdf: qwer
\tzxcv
Foo: even
more
lines
""".replace("\n", "\r\n")
headers = HTTPHeaders.parse(data)
self.assertEqual(headers["asdf"], "qwer zxcv")
self.assertEqual(headers.get_list("asdf"), ["qwer zxcv"])
self.assertEqual(headers["Foo"], "bar baz,even more lines")
self.assertEqual(headers.get_list("foo"), ["bar baz", "even more lines"])
self.assertEqual(sorted(list(headers.get_all())),
[("Asdf", "qwer zxcv"),
("Foo", "bar baz"),
("Foo", "even more lines")])
|
|
import os
from flask import render_template, current_app, flash, redirect, session, url_for, request, g
from flask_login import login_user, logout_user, current_user, login_required
from app import db, lm, admin
from .forms import *
from . import main_bp
from app.models import User, ROLE_APPLICANT, ROLE_ADVISER, ROLE_ADMIN, Post, Comment, Preference, Favourite
from datetime import datetime
from app.emails import send_email
from werkzeug.utils import secure_filename
from flask_admin.contrib.sqla import ModelView
from flask_admin.contrib.fileadmin import FileAdmin
from pygeocoder import Geocoder
import os.path as op
from config import ADMINS
# file upload setting
UPLOAD_AGENT_FOLDER = 'app/static/agent_photo'
UPLOAD_HOUSE_FOLDER = 'app/static/house_photo'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
# admin management setup
admin.add_view(ModelView(User, db.session))
admin.add_view(ModelView(Post, db.session))
path = op.join(os.path.abspath(__file__ + "/../../"), 'static') # need to get parent path of this code
admin.add_view(FileAdmin(path, '/static/', name='Static Files'))
@main_bp.before_app_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@main_bp.route('/list_post', methods=['GET', 'POST'])
@main_bp.route('/list_post/<int:page>', methods=['GET', 'POST'])
@login_required
def list_post(page=1):
form = PeferForm()
page = request.args.get('page', 1, type=int)
pagination = Post.query.filter(Post.id == Post.id).order_by(Post.timestamp.desc()) \
.paginate(page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
if form.validate_on_submit() and current_user.role == ROLE_APPLICANT:
pref = Preference(style=form.style.data, bedroom_no=form.bedroom_no.data,
bathroom_no=form.bathroom_no.data, garage_no=form.garage_no.data,
location=form.location.data, price=form.price.data)
results = Post.query.filter(Post.style == pref.style).filter(Post.location == pref.location) \
.filter(Post.price >= 0.8 * float(pref.price)).filter(Post.price <= 1.2 * float(pref.price)) \
.filter(Post.bedroom_no >= pref.bedroom_no - 1).filter(Post.bedroom_no <= pref.bedroom_no + 1) \
.order_by(Post.timestamp.desc())
posts = results.paginate(page, current_app.config['FLASKY_POSTS_PER_PAGE'], False).items
flash('Find ' + str(results.count()) + ' matching results')
return render_template('list_post.html',
title='All the Houses',
posts=posts,
form=form,
pagination=pagination)
@main_bp.route('/list_agent', methods=['GET', 'POST'])
@main_bp.route('/list_agent/<int:page>', methods=['GET', 'POST'])
@login_required
def list_agent(page=1):
users = User.query.filter(User.role == ROLE_ADVISER).paginate(page, current_app.config['FLASKY_POSTS_PER_PAGE'], False)
return render_template('list_agent.html',
title='All the Agents',
users=users)
@main_bp.route('/', methods=['GET'])
@main_bp.route('/index', methods=['GET'])
def index():
return render_template('index.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@main_bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditForm(current_user.nickname)
if form.validate_on_submit():
current_user.nickname = form.nickname.data
current_user.phone = form.phone.data
current_user.address = form.address.data
current_user.about_me = form.about_me.data
file = form.fileName.data
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_path = op.join(UPLOAD_AGENT_FOLDER, filename)
file.save(file_path)
# only when file is not none, change it, otherwise keep the previous one
current_user.portrait = op.join('/static/agent_photo/', filename)
if current_user.portrait is None:
current_user.portrait = op.join('/static/agent_photo/', 'agent_default.gif')
db.session.add(g.user)
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('.user', nickname=g.user.nickname))
form.nickname.data = current_user.nickname
form.phone.data = current_user.phone
form.address.data = current_user.address
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main_bp.route('/preference', methods=['GET', 'POST'])
@login_required
def preference():
form = PeferForm()
user = g.user
if form.validate_on_submit() and user.role == ROLE_APPLICANT:
pref = Preference.query.filter_by(user_id=user.id).first()
if pref is None:
pref = Preference(style=form.style.data, bedroom_no=form.bedroom_no.data,
bathroom_no=form.bathroom_no.data, garage_no=form.garage_no.data,
location=form.location.data, price=form.price.data, user_id=user.id,
notify=form.notify.data)
else:
pref.style = form.style.data
pref.bedroom_no = form.bedroom_no.data
pref.bathroom_no = form.bathroom_no.data
pref.garage_no = form.garage_no.data
pref.location = form.location.data
pref.price = form.price.data
pref.notify = form.notify.data
db.session.add(pref)
db.session.commit()
flash('Your preference is set! ')
return redirect(url_for('.user', nickname=user.nickname))
elif request.method != "POST" and user.pref is not None:
form = PeferForm(obj=user.pref)
return render_template('edit_preference.html', form=form)
def map_address(address):
results = Geocoder.geocode(address)
return str(results[0].coordinates).strip('()')
@main_bp.route('/edit_post/', methods=['GET', 'POST'])
@main_bp.route('/edit_post/<int:pid>', methods=['GET', 'POST'])
@login_required
def edit_post(pid=0):
form = PostForm()
post = Post.query.filter_by(id=pid).first()
if form.validate_on_submit() and current_user.role == ROLE_ADVISER:
if post is None:
post = Post(title=form.title.data, body=form.body.data,
timestamp=datetime.utcnow(), user_id=current_user.id)
else:
post.title = form.title.data
post.body = form.body.data
post.timestamp = datetime.utcnow()
post.user_id = user.id
file = form.fileName.data
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file_path = op.join(UPLOAD_HOUSE_FOLDER, filename)
file.save(file_path)
post.img = op.join('/static/house_photo/', filename)
if post.img is None:
post.img = op.join('/static/house_photo/', 'house_default.jpeg')
post.location = form.location.data
post.price = form.price.data
post.style = form.style.data
post.bedroom_no = form.bedroom_no.data
post.bathroom_no = form.bathroom_no.data
post.garage_no = form.garage_no.data
post.address = form.address.data
post.coordinate = map_address(post.address + " " + post.location)
db.session.add(post)
db.session.commit()
flash("Your post is alive now. ")
return redirect(url_for('.user', nickname=current_user.nickname))
elif request.method != "POST":
form = PostForm(obj=post)
return render_template('edit_post.html', form=form)
@main_bp.route('/bookmark/<int:pid>', methods=['GET', 'POST'])
@login_required
def bookmark(pid):
if Favourite.query.filter_by(id=str(current_user.id) + ':' + str(pid)).first():
flash('The post was already in your collection.')
else:
fav = Favourite(current_user.id, pid)
db.session.add(fav)
db.session.commit()
flash('The post was added in your collection.')
return redirect(url_for('.list_post'))
@main_bp.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm()
if request.method == 'POST':
if form.validate() is False:
flash('All fields are required.')
return render_template('contact.html', form=form)
else:
#text_body = """
#From: %s < %s >
#%s """ % (form.name.data, form.email.data, form.message.data)
#send_email(ADMINS[0], form.subject.data, text_body)
send_email(ADMINS[0], form.subject.data, 'auth/contact', form=form)
return render_template('contact.html', success=True)
elif request.method == 'GET':
return render_template('contact.html', form=form)
@main_bp.route('/home/<int:pid>', methods=['GET', 'POST'])
@login_required
def home(pid):
post = Post.query.get_or_404(pid)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object()
)
db.session.add(comment)
flash('Your comment has been published.')
return redirect(url_for('.home', pid=post.id))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) // \
current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc())\
.paginate(page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'], error_out=False)
comments = pagination.items
return render_template("home.html", post=post, form=form,
comments=comments, pagination=pagination)
@main_bp.route('/user/<nickname>', methods=['GET', 'POST'])
@main_bp.route('/user/<nickname>/<int:page>')
@login_required
def user(nickname, page=1):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User ' + nickname + ' not found.')
return redirect(url_for('.index'))
if user.role == ROLE_ADVISER:
pagination = user.posts.paginate(page, current_app.config['FLASKY_POSTS_PER_PAGE'], False)
elif user.role == ROLE_APPLICANT:
favs = user.fav.all()
idlist = []
for fav in favs:
idlist.append(fav.post_id)
pagination = Post.query.filter(Post.id.in_(idlist)).paginate(page, current_app.config['FLASKY_POSTS_PER_PAGE'], False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts, pagination=pagination)
@main_bp.route('/signout')
def signout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('.index'))
@main_bp.route('/delete/<int:id>')
@login_required
def delete(id):
post = Post.query.get(id)
if post is None:
flash('Post not found.')
return redirect(url_for('.index'))
if post.author.id != g.user.id:
flash('You cannot delete this post.')
return redirect(url_for('.list_post'))
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted.')
return redirect(url_for('.list_post'))
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils as tu
import six
import webob
from senlin.api.common import serializers
from senlin.api.common import wsgi
from senlin.common import exception
from senlin.tests.unit.common import base
class JSONRequestDeserializerTest(base.SenlinTestCase):
def test_has_body_no_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = encodeutils.safe_encode('asdf')
request.headers.pop('Content-Length')
request.headers['Content-Type'] = 'application/json'
obj = serializers.JSONRequestDeserializer()
self.assertFalse(obj.has_body(request))
def test_has_body_zero_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = encodeutils.safe_encode('asdf')
request.headers['Content-Length'] = 0
request.headers['Content-Type'] = 'application/json'
obj = serializers.JSONRequestDeserializer()
self.assertFalse(obj.has_body(request))
def test_has_body_has_content_length_no_content_type(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = encodeutils.safe_encode('{"key": "value"}')
self.assertIn('Content-Length', request.headers)
obj = serializers.JSONRequestDeserializer()
self.assertTrue(obj.has_body(request))
def test_has_body_has_content_length_plain_content_type(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = encodeutils.safe_encode('{"key": "value"}')
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'text/plain'
obj = serializers.JSONRequestDeserializer()
self.assertTrue(obj.has_body(request))
def test_has_body_has_content_type_malformed(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = encodeutils.safe_encode('asdf')
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'application/json'
obj = serializers.JSONRequestDeserializer()
self.assertFalse(obj.has_body(request))
def test_has_body_has_content_type(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = encodeutils.safe_encode('{"key": "value"}')
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'application/json'
obj = serializers.JSONRequestDeserializer()
self.assertTrue(obj.has_body(request))
def test_has_body_has_wrong_content_type(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = encodeutils.safe_encode('{"key": "value"}')
self.assertIn('Content-Length', request.headers)
request.headers['Content-Type'] = 'application/xml'
obj = serializers.JSONRequestDeserializer()
self.assertFalse(obj.has_body(request))
def test_has_body_has_aws_content_type_only(self):
request = wsgi.Request.blank('/?ContentType=JSON')
request.method = 'GET'
request.body = encodeutils.safe_encode('{"key": "value"}')
self.assertIn('Content-Length', request.headers)
obj = serializers.JSONRequestDeserializer()
self.assertTrue(obj.has_body(request))
def test_has_body_content_type_with_get(self):
request = wsgi.Request.blank('/')
request.method = 'GET'
request.body = encodeutils.safe_encode('{"key": "value"}')
self.assertIn('Content-Length', request.headers)
obj = serializers.JSONRequestDeserializer()
self.assertTrue(obj.has_body(request))
def test_no_body_no_content_length(self):
request = wsgi.Request.blank('/')
obj = serializers.JSONRequestDeserializer()
self.assertFalse(obj.has_body(request))
def test_from_json(self):
fixture = '{"key": "value"}'
expected = {"key": "value"}
actual = serializers.JSONRequestDeserializer().from_json(fixture)
self.assertEqual(expected, actual)
def test_from_json_malformed(self):
fixture = 'kjasdklfjsklajf'
self.assertRaises(webob.exc.HTTPBadRequest,
serializers.JSONRequestDeserializer().from_json,
fixture)
def test_default_no_body(self):
request = wsgi.Request.blank('/')
actual = serializers.JSONRequestDeserializer().default(request)
expected = {}
self.assertEqual(expected, actual)
def test_default_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = encodeutils.safe_encode('{"key": "value"}')
actual = serializers.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(expected, actual)
def test_default_with_get_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'GET'
request.body = encodeutils.safe_encode('{"key": "value"}')
actual = serializers.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(expected, actual)
def test_default_with_get_with_body_with_aws(self):
request = wsgi.Request.blank('/?ContentType=JSON')
request.method = 'GET'
request.body = encodeutils.safe_encode('{"key": "value"}')
actual = serializers.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(expected, actual)
def test_from_json_exceeds_max_json_mb(self):
cfg.CONF.set_override('max_json_body_size', 10, group='senlin_api')
body = jsonutils.dumps(['a'] * cfg.CONF.senlin_api.max_json_body_size)
self.assertGreater(len(body), cfg.CONF.senlin_api.max_json_body_size)
obj = serializers.JSONRequestDeserializer()
error = self.assertRaises(exception.RequestLimitExceeded,
obj.from_json,
body)
msg = ('Request limit exceeded: JSON body size '
'(%s bytes) exceeds maximum allowed size (%s bytes).'
) % (len(body), cfg.CONF.senlin_api.max_json_body_size)
self.assertEqual(msg, six.text_type(error))
class JSONResponseSerializerTest(base.SenlinTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_date_format_value(self):
test_date = tu.parse_strtime("0001-03-08T02:00:00",
'%Y-%m-%dT%H:%M:%S')
fixture = {"date": test_date}
expected = '{"date": "0001-03-08T02:00:00"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_more_deep_format(self):
val = complex(1, 2)
fixture = {"is_public": True, "v": val}
expected = '{"is_public": true, "v": "(1+2j)"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
serializers.JSONResponseSerializer().default(response, fixture)
self.assertEqual(200, response.status_int)
content_types = [h for h in response.headerlist
if h[0] == 'Content-Type']
# NOTE: filter returns a iterator in python 3.
types = [t for t in content_types]
self.assertEqual(1, len(types))
self.assertEqual('application/json', response.content_type)
self.assertEqual('{"key": "value"}',
encodeutils.safe_decode(response.body))
|
|
import gameDefs
Game = gameDefs.Game
Drive = gameDefs.Drive
Play = gameDefs.Play
Event = gameDefs.Event
def getPlay(game, drive, play, item, itemWNames):
play.body = itemWNames
gain = 0
kickoffLoc = item.find(' kickoff')
onsideLoc = item.find('on-side kick')
fieldGoalLoc = item.find(' field goal')
extraPointLoc = item.find(' extra point')
if extraPointLoc == -1:
extraPointLoc = item.find('blocked pat')
##### parser2 changes #####
if extraPointLoc == -1:
extraPointLoc = item.find(' kick attempt')
##### end parser2 changes #####
puntLoc = item.find(' punt ')
rushLoc = item.find(' rush ')
sackLoc = item.find(' sacked')
scrambleLoc = item.find(' scramble')
passLoc = item.find(' pass ')
recoverLoc = item.find(' recover')
advanceLoc = -1
returnLoc = item.find(' return')
fumbleLoc = item.find(' fumble')
lateralLoc = item.find(' lateral')
penaltyLoc = item.find(' penalty')
nextPlayLoc = len(item)
if returnLoc != -1 and returnLoc < nextPlayLoc:
nextPlayLoc = returnLoc
if fumbleLoc != -1 and fumbleLoc < nextPlayLoc:
nextPlayLoc = fumbleLoc
if lateralLoc != -1 and lateralLoc < nextPlayLoc:
nextPlayLoc = lateralLoc
if penaltyLoc != -1 and penaltyLoc < nextPlayLoc:
nextPlayLoc = penaltyLoc
event = Event()
event.startingYard = play.startingYard
play.events.append(event)
# extra point
if extraPointLoc != -1:
event.eventType = gameDefs.EVENT_FIELD_GOAL_ATTEMPT
play.down = -1
play.distance = 3
play.startingYard = 97
event.startingYard = 97
kickerName = itemWNames[0:extraPointLoc]
event.offense1 = kickerName.strip()
if item.find('missed', extraPointLoc, nextPlayLoc) != -1:
play.result = gameDefs.RESULT_MISSED_FIELD_GOAL
event.endingYard = event.startingYard
elif item.find('good', 0, nextPlayLoc) != -1:
play.result = gameDefs.RESULT_FIELD_GOAL
event.endingYard = 100
elif item.find('blocked', extraPointLoc, nextPlayLoc) != -1:
#print "EP BLOCKED - " + item
play.result = gameDefs.RESULT_TURNOVER_ON_DOWNS
getDefenderNames(event, item, itemWNames, extraPointLoc, len(item), 'blocked by ')
# recovery
nextEvent = getRecovery(drive, play, event, item, itemWNames, recoverLoc, nextPlayLoc)
if nextEvent:
event = nextEvent
else:
event.endingYard = event.startingYard
# two-point conversion
elif item.find('conversion', 0, nextPlayLoc) != -1 or play.down == -1:
#print "2PC - " + item
play.down = -1
play.distance = 3
play.startingYard = 97
event.startingYard = 97
attemptLoc = item.find(' attempt,', 0, nextPlayLoc)
passLoc = item.find(' pass ', 0, nextPlayLoc)
rushLoc = item.find(' rush ', 0, nextPlayLoc)
if item.find(' failed', 0, nextPlayLoc) != -1:
play.result = gameDefs.RESULT_ADVANCE_DOWN
if passLoc != -1:
event.eventType = gameDefs.EVENT_PASS
qbName = itemWNames[attemptLoc+9:passLoc]
event.offense1 = qbName.strip()
elif rushLoc != -1:
event.eventType = gameDefs.EVENT_RUSH
rusherName = itemWNames[attemptLoc+9:rushLoc]
event.offense1 = rusherName.strip()
play.endingYard = play.startingYard
elif item.find(' good', 0, nextPlayLoc) != -1:
play.result = gameDefs.RESULT_TOUCHDOWN
if passLoc != -1:
event.eventType = gameDefs.EVENT_PASS
qbName = itemWNames[attemptLoc+9:passLoc]
event.offense1 = qbName.strip()
toLoc = item.find(' to ', attemptLoc, nextPlayLoc)
goodLoc = item.find(' good', attemptLoc, nextPlayLoc)
if toLoc != -1:
receiverName = itemWNames[toLoc+4:goodLoc]
event.offense2 = receiverName.strip()
elif rushLoc != -1:
event.eventType = gameDefs.EVENT_RUSH
rusherName = itemWNames[attemptLoc+9:rushLoc]
event.offense1 = rusherName.strip()
event.endingYard = 100
# field goal
elif fieldGoalLoc != -1:
event.eventType = gameDefs.EVENT_FIELD_GOAL_ATTEMPT
i = 0
while i <= fieldGoalLoc and not item[i].isdigit():
i += 1
kickerName = itemWNames[0:i-1]
event.offense1 = kickerName.strip()
if item.find('missed', fieldGoalLoc, nextPlayLoc) != -1:
play.result = gameDefs.RESULT_MISSED_FIELD_GOAL
if event.startingYard > 80:
event.endingYard = 80
else:
event.endingYard = event.startingYard
elif item.find('good', fieldGoalLoc, nextPlayLoc) != -1:
play.result = gameDefs.RESULT_FIELD_GOAL
event.endingYard = 100
elif item.find('blocked', fieldGoalLoc, nextPlayLoc) != -1:
play.result = gameDefs.RESULT_MISSED_FIELD_GOAL
getDefenderNames(event, item, itemWNames, fieldGoalLoc, len(item), 'blocked by ')
# recovery
nextEvent = getRecovery(drive, play, event, item, itemWNames, recoverLoc, nextPlayLoc)
if nextEvent:
event = nextEvent
else:
event.endingYard = event.startingYard
# kickoff
elif kickoffLoc != -1:
event.eventType = gameDefs.EVENT_KICKOFF
play.down = 0
kickerName = itemWNames[0:kickoffLoc]
event.offense1 = kickerName.strip()
kickLength = getGain(drive, event, item[kickoffLoc+8:nextPlayLoc])
if item.find('out-of-bounds', kickoffLoc, nextPlayLoc) != -1:
if game.year == 2007:
# except during 2007, when this number was 35
event.endingYard = event.startingYard + 35 # usually 65
else:
event.endingYard = event.startingYard + 30 # usually 60 now, though was 65 before 2007
else:
event.endingYard = event.startingYard + kickLength
play.result = gameDefs.RESULT_KICK_RECEIVED
# on-side kick
elif onsideLoc != -1:
event.eventType = gameDefs.EVENT_ONSIDE_KICK
play.down = 0
kickerName = itemWNames[0:onsideLoc]
event.offense1 = kickerName.strip()
play.result = gameDefs.RESULT_ADVANCE_DOWN
# recovery
nextEvent = getRecovery(drive, play, event, item, itemWNames, recoverLoc, nextPlayLoc)
if nextEvent:
event = nextEvent
else:
kickLength = getGain(drive, event, item[onsideLoc+13:nextPlayLoc])
event.endingYard = event.startingYard + kickLength
# punt
elif puntLoc != -1:
event.eventType = gameDefs.EVENT_PUNT
kickerName = itemWNames[0:puntLoc]
event.offense1 = kickerName.strip()
if item.find('blocked', puntLoc, nextPlayLoc) != -1:
event.eventType = gameDefs.EVENT_BLOCKED_PUNT
getDefenderNames(event, item, itemWNames, puntLoc, len(item), 'blocked by ')
play.result = gameDefs.RESULT_ADVANCE_DOWN
# recovery
nextEvent = getRecovery(drive, play, event, item, itemWNames, recoverLoc, nextPlayLoc)
if nextEvent:
event = nextEvent
else:
event.endingYard = event.startingYard
else:
getDefenderNames(event, item, itemWNames, puntLoc, nextPlayLoc, ' fair catch by ')
puntLength = getGain(drive, event, item[puntLoc+5:nextPlayLoc])
event.endingYard = event.startingYard + puntLength
play.result = gameDefs.RESULT_KICK_RECEIVED
# rush
elif rushLoc != -1:
event.eventType = gameDefs.EVENT_RUSH
rusherName = itemWNames[0:rushLoc]
event.offense1 = rusherName.strip()
getDefenderNames(event, item, itemWNames, rushLoc, nextPlayLoc, ' tackled by ')
gain = getGain(drive, event, item[rushLoc+5:nextPlayLoc])
event.endingYard = event.startingYard + gain
play.result = gameDefs.RESULT_ADVANCE_DOWN
# sack
elif sackLoc != -1:
event.eventType = gameDefs.EVENT_SACK
qbName = itemWNames[0:sackLoc]
event.offense1 = qbName.strip()
getDefenderNames(event, item, itemWNames, sackLoc, nextPlayLoc, ' by ')
gain = getGain(drive, event, item[sackLoc+7:nextPlayLoc])
if gain > 0:
gain = -gain
event.endingYard = event.startingYard + gain
play.result = gameDefs.RESULT_ADVANCE_DOWN
# scramble
elif scrambleLoc != -1:
event.eventType = gameDefs.EVENT_RUSH
qbName = itemWNames[0:scrambleLoc]
event.offense1 = qbName.strip()
getDefenderNames(event, item, itemWNames, scrambleLoc, nextPlayLoc, ' tackled by ')
gain = getGain(drive, event, item[rushLoc+9:nextPlayLoc])
event.endingYard = event.startingYard + gain
play.result = gameDefs.RESULT_ADVANCE_DOWN
# pass
elif passLoc != -1 and (penaltyLoc == -1 or passLoc < penaltyLoc):
event.eventType = gameDefs.EVENT_PASS
qbName = itemWNames[0:passLoc]
event.offense1 = qbName.strip()
getOffensiveNames(event, item, itemWNames, passLoc, nextPlayLoc, ' to ')
if item.find('incomplete', passLoc, nextPlayLoc) != -1:
event.eventType = gameDefs.EVENT_INCOMPLETE_PASS
gain = 0
# for other plays, it's obvious from the type of play how to credit the defenders
# listed. however, for an incomplete pass, there's no distinction between a
# defender hurring the quarterback and a defender breaking up the pass.
# if this becomes an issue, we'll have to modify our database.
getDefenderNames(event, item, itemWNames, passLoc, nextPlayLoc, ' hurry by ')
getDefenderNames(event, item, itemWNames, passLoc, nextPlayLoc, ' hurried by ')
getDefenderNames(event, item, itemWNames, passLoc, nextPlayLoc, ' broken up by ')
else:
gain = getGain(drive, event, item[passLoc+5:nextPlayLoc])
event.endingYard = event.startingYard + gain
interceptionLoc = item.find('intercepted', passLoc, nextPlayLoc)
if interceptionLoc != -1:
event.eventType = gameDefs.EVENT_INTERCEPTION
play.result = gameDefs.RESULT_TURNOVER
event.offense2 = ''
getDefenderNames(event, item, itemWNames, interceptionLoc, nextPlayLoc, ' by ')
else:
getDefenderNames(event, item, itemWNames, passLoc, nextPlayLoc, ' tackled by ')
play.result = gameDefs.RESULT_ADVANCE_DOWN
# timeout
elif item.find('timeout') != -1:
event.eventType = gameDefs.EVENT_NULL
event.endingYard = event.startingYard
# end of quarter
elif item.find('end of ') != -1 and (item.find(' quarter') != -1 or item.find(' ot') != -1):
event.eventType = gameDefs.EVENT_NULL
event.endingYard = event.startingYard
# nothing listed
elif len(item) == 0:
event.eventType = gameDefs.EVENT_NULL
event.endingYard = event.startingYard
# has a next play
if nextPlayLoc != len(item):
# don't worry
pass
# unknown play
#else:
# if item != ' ':
# print '[unknown] ' + item
if nextPlayLoc != len(item):
getNextEvent(drive, play, event, item, itemWNames, advanceLoc, returnLoc, fumbleLoc, lateralLoc, penaltyLoc, nextPlayLoc)
if event.eventType == gameDefs.EVENT_NULL:
tackledLoc = item.find(' tackled ')
if tackledLoc != -1:
event.eventType = gameDefs.EVENT_RUSH
name = itemWNames[0:tackledLoc]
event.offense1 = name
getDefenderNames(event, item, itemWNames, tackledLoc, nextPlayLoc, ' by ')
gain = getGain(drive, event, item[tackledLoc+9:nextPlayLoc])
event.endingYard = event.startingYard + gain
play.result = gameDefs.RESULT_ADVANCE_DOWN
resolveResult(play, event, item, 0, nextPlayLoc)
if play.down == 4 and play.result == gameDefs.RESULT_ADVANCE_DOWN:
play.result = gameDefs.RESULT_TURNOVER_ON_DOWNS
return 0
# get stats for a recovery, if there is one
# return the recovery event, or None
def getRecovery(drive, play, event, item, itemWNames, recoverLoc, nextPlayLoc):
if recoverLoc != -1:
recovery = item[recoverLoc+7:nextPlayLoc]
gain = getGain(drive, event, recovery)
event.endingYard = event.startingYard + gain
prevEvent = event
event = Event()
event.startingYard = prevEvent.endingYard
play.events.append(event)
event.eventType = gameDefs.EVENT_RECOVERY
event.endingYard = event.startingYard
duringReturn = play.duringReturn(event)
byLoc = item.find(' by ', recoverLoc+7, nextPlayLoc)
if byLoc != -1:
str = item[byLoc+4:nextPlayLoc]
strWNames = itemWNames[byLoc+4:nextPlayLoc]
recovererName = findName(str, strWNames, drive)
# look for which team recovered
spaceLoc = recovererName.find(' ')
if spaceLoc == -1:
spaceLoc = len(recovererName)
teamID = recovererName[0:spaceLoc].lower()
if teamID == drive.medPoss.lower() or teamID == drive.teamPoss.lower():
recovererName = recovererName[spaceLoc+1:]
if duringReturn:
# offense (now defense) got it back
event.eventType = gameDefs.EVENT_RECOVERY_DEFENSE
event.defense1 = recovererName
play.result = gameDefs.RESULT_DEFENSIVE_TURNOVER
else:
# offense retained it
event.eventType = gameDefs.EVENT_RECOVERY_OFFENSE
event.offense1 = recovererName
elif teamID == drive.medDef.lower() or teamID == drive.teamDef.lower():
recovererName = recovererName[spaceLoc+1:]
if duringReturn:
# defense (now offense) retained it
event.eventType = gameDefs.EVENT_RECOVERY_OFFENSE
event.offense1 = recovererName
else:
# defense recovered it
event.eventType = gameDefs.EVENT_RECOVERY_DEFENSE
event.defense1 = recovererName
play.result = gameDefs.RESULT_TURNOVER
elif recovererName == prevEvent.offense1 or recovererName == prevEvent.offense2:
# recovered by kicker/fumbler
if duringReturn:
# offense (now defense) got it back
event.eventType = gameDefs.EVENT_RECOVERY_DEFENSE
event.defense1 = recovererName
play.result = gameDefs.RESULT_DEFENSIVE_TURNOVER
else:
# offense retained it
event.eventType = gameDefs.EVENT_RECOVERY_OFFENSE
event.offense1 = recovererName
elif recovererName == prevEvent.defense1 or recovererName == prevEvent.defense1:
# recovered by player who blocked the kick/forced the fumble
if duringReturn:
# defense (now offense) retained it
event.eventType = gameDefs.EVENT_RECOVERY_OFFENSE
event.offense1 = recovererName
else:
# defense recovered it
event.eventType = gameDefs.EVENT_RECOVERY_DEFENSE
event.defense1 = recovererName
play.result = gameDefs.RESULT_TURNOVER
else:
# unknown recovery
event.offense1 = recovererName
getDefenderNames(event, item, itemWNames, byLoc, nextPlayLoc, ' tackled by ')
return event
else:
return None
def getNextEvent(drive, play, prevEvent, item, itemWNames, advanceLoc, returnLoc, fumbleLoc, lateralLoc, penaltyLoc, nextPlayLoc):
if prevEvent.eventType != gameDefs.EVENT_NULL:
event = Event()
event.startingYard = prevEvent.endingYard
if prevEvent.eventType == gameDefs.EVENT_RUSH or prevEvent.eventType == gameDefs.EVENT_SACK or prevEvent.eventType == gameDefs.EVENT_RETURN:
event.offense1 = prevEvent.offense1
elif prevEvent.eventType == gameDefs.EVENT_RECOVERY_OFFENSE or prevEvent.eventType == gameDefs.EVENT_RECOVERY:
event.offense1 = prevEvent.offense1
elif prevEvent.eventType == gameDefs.EVENT_RECOVERY_DEFENSE:
event.offense1 = prevEvent.defense1
elif prevEvent.eventType == gameDefs.EVENT_PASS or prevEvent.eventType == gameDefs.EVENT_LATERAL:
event.offense1 = prevEvent.offense2
elif prevEvent.eventType == gameDefs.EVENT_INTERCEPTION:
event.offense1 = prevEvent.defense1
play.events.append(event)
else:
event = prevEvent
if advanceLoc == nextPlayLoc:
event.eventType = gameDefs.EVENT_ADVANCE
getAdvance(drive, play, event, item, itemWNames, advanceLoc)
elif returnLoc == nextPlayLoc:
event.eventType = gameDefs.EVENT_RETURN
event.startingYard = 100 - prevEvent.endingYard
getReturn(drive, play, event, item, itemWNames, returnLoc)
elif fumbleLoc == nextPlayLoc:
event.eventType = gameDefs.EVENT_FUMBLE
getFumble(drive, play, event, item, itemWNames, fumbleLoc)
elif lateralLoc == nextPlayLoc:
event.eventType = gameDefs.EVENT_LATERAL
getLateral(drive, play, event, item, itemWNames, lateralLoc)
else:
event.offense1 = ''
if play.result == gameDefs.RESULT_NONE:
play.result = gameDefs.RESULT_REPEAT_DOWN
changePoss = False
if prevEvent.eventType == gameDefs.EVENT_KICKOFF or prevEvent.eventType == gameDefs.EVENT_PUNT:
changePoss = True
elif prevEvent.eventType == gameDefs.EVENT_INTERCEPTION or prevEvent.eventType == gameDefs.EVENT_RECOVERY_DEFENSE:
changePoss = True
event.eventType = gameDefs.EVENT_PENALTY
if changePoss:
event.startingYard = 100 - prevEvent.endingYard
getPenalty(drive, play, event, item, itemWNames, penaltyLoc)
def getAdvance(drive, play, event, item, itemWNames, advanceLoc):
advanceLoc2 = -1
returnLoc = item.find(' return', advanceLoc)
fumbleLoc = item.find(' fumble', advanceLoc)
lateralLoc = item.find(' lateral', advanceLoc)
penaltyLoc = item.find(' penalty', advanceLoc)
nextPlayLoc = len(item)
if returnLoc != -1 and returnLoc < nextPlayLoc:
nextPlayLoc = returnLoc
if fumbleLoc != -1 and fumbleLoc < nextPlayLoc:
nextPlayLoc = fumbleLoc
if lateralLoc != -1 and lateralLoc < nextPlayLoc:
nextPlayLoc = lateralLoc
if penaltyLoc != -1 and penaltyLoc < nextPlayLoc:
nextPlayLoc = penaltyLoc
gain = getGain(drive, event, item[advanceLoc+8:nextPlayLoc])
event.endingYard = event.startingYard + gain
getDefenderNames(event, item, itemWNames, advanceLoc, nextPlayLoc, ' tackled by ')
if nextPlayLoc != len(item):
getNextEvent(drive, play, event, item, itemWNames, advanceLoc2, returnLoc, fumbleLoc, lateralLoc, penaltyLoc, nextPlayLoc)
resolveResult(play, event, item, advanceLoc, nextPlayLoc)
def getReturn(drive, play, event, item, itemWNames, returnLoc):
advanceLoc = -1
returnLoc2 = item.find(' return', returnLoc+1)
fumbleLoc = item.find(' fumble', returnLoc)
lateralLoc = item.find(' lateral', returnLoc)
penaltyLoc = item.find(' penalty', returnLoc)
nextPlayLoc = len(item)
if returnLoc2 != -1 and returnLoc2 < nextPlayLoc:
nextPlayLoc = returnLoc2
if fumbleLoc != -1 and fumbleLoc < nextPlayLoc:
nextPlayLoc = fumbleLoc
if lateralLoc != -1 and lateralLoc < nextPlayLoc:
nextPlayLoc = lateralLoc
if penaltyLoc != -1 and penaltyLoc < nextPlayLoc:
nextPlayLoc = penaltyLoc
if event.offense1 == '':
getOffensiveNames(event, item, itemWNames, returnLoc, nextPlayLoc, ' by ')
#spaceLoc = item.rfind(' ', 0, returnLoc)
#commaLoc = item.rfind(',', 0, returnLoc)
#periodLoc = item.rfind('.', 0, returnLoc)
#if commaLoc != -1 and commaLoc+1 == spaceLoc:
# commaLoc = item.rfind(',', 0, commaLoc-1)
# periodLoc = item.rfind('.', 0, commaLoc)
#elif periodLoc != -1 and periodLoc+1 == spaceLoc:
# periodLoc = item.rfind(',', 0, periodLoc-1)
#if commaLoc > periodLoc:
# returnerName = itemWNames[commaLoc+1:returnLoc]
#elif periodLoc > commaLoc:
# returnerName = itemWNames[periodLoc+1:returnLoc]
#else:
# periodLoc can't equal commaLoc unless they both equal -1
# returnerName = ''
#event.offense1 = returnerName.strip()
spaceLoc = item.find(' ', returnLoc+1, nextPlayLoc)
returnLength = getGain(drive, event, item[spaceLoc:nextPlayLoc])
event.endingYard = event.startingYard + returnLength
#print 'return: ' + item[returnLoc:nextPlayLoc]
#print event.startingYard
#print event.endingYard
#print returnLength
getDefenderNames(event, item, itemWNames, returnLoc, nextPlayLoc, ' tackled by ')
if nextPlayLoc != len(item):
getNextEvent(drive, play, event, item, itemWNames, advanceLoc, returnLoc2, fumbleLoc, lateralLoc, penaltyLoc, nextPlayLoc)
resolveResult(play, event, item, returnLoc, nextPlayLoc)
def getFumble(drive, play, event, item, itemWNames, fumbleLoc):
advanceLoc = -1
returnLoc = item.find(' return', fumbleLoc)
fumbleLoc2 = item.find(' fumble', fumbleLoc+1)
lateralLoc = item.find(' lateral', fumbleLoc)
penaltyLoc = item.find(' penalty', fumbleLoc)
nextPlayLoc = len(item)
if returnLoc != -1 and returnLoc < nextPlayLoc:
nextPlayLoc = returnLoc
if fumbleLoc2 != -1 and fumbleLoc2 < nextPlayLoc:
nextPlayLoc = fumbleLoc2
if lateralLoc != -1 and lateralLoc < nextPlayLoc:
nextPlayLoc = lateralLoc
if penaltyLoc != -1 and penaltyLoc < nextPlayLoc:
nextPlayLoc = penaltyLoc
getDefenderNames(event, item, itemWNames, fumbleLoc, nextPlayLoc, ' forced by ')
recoverLoc = item.find(' recover', fumbleLoc, nextPlayLoc)
# recovery
nextEvent = getRecovery(drive, play, event, item, itemWNames, recoverLoc, nextPlayLoc)
if nextEvent:
if nextEvent.eventType == gameDefs.EVENT_RECOVERY_OFFENSE:
recovererName = nextEvent.offense1
elif nextEvent.eventType == gameDefs.EVENT_RECOVERY_DEFENSE:
recovererName = nextEvent.defense1
commaLoc = item.find(',', recoverLoc, nextPlayLoc)
if commaLoc != -1:
name = recovererName.lower() + ' for '
nameLoc = item.find(name, commaLoc, nextPlayLoc)
if nameLoc != -1 and nameLoc < nextPlayLoc:
if nextEvent.eventType == gameDefs.EVENT_RECOVERY_DEFENSE:
returnLoc = nameLoc
else:
advanceLoc = nameLoc
nextPlayLoc = nameLoc
gain = getGain(drive, event, item[recoverLoc+7:nextPlayLoc])
event.endingYard = event.startingYard + gain
nextEvent.startingYard = event.endingYard
nextEvent.endingYard = nextEvent.startingYard
event = nextEvent
if nextPlayLoc != len(item):
getNextEvent(drive, play, event, item, itemWNames, advanceLoc, returnLoc, fumbleLoc2, lateralLoc, penaltyLoc, nextPlayLoc)
resolveResult(play, event, item, fumbleLoc, nextPlayLoc)
def getLateral(drive, play, event, item, itemWNames, lateralLoc):
advanceLoc = -1
returnLoc = item.find(' return', lateralLoc)
fumbleLoc = item.find(' fumble', lateralLoc)
lateralLoc2 = item.find(' lateral', lateralLoc+1)
penaltyLoc = item.find(' penalty', lateralLoc)
nextPlayLoc = len(item)
if returnLoc != -1 and returnLoc < nextPlayLoc:
nextPlayLoc = returnLoc
if fumbleLoc != -1 and fumbleLoc < nextPlayLoc:
nextPlayLoc = fumbleLoc
if lateralLoc2 != -1 and lateralLoc2 < nextPlayLoc:
nextPlayLoc = lateralLoc2
if penaltyLoc != -1 and penaltyLoc < nextPlayLoc:
nextPlayLoc = penaltyLoc
getOffensiveNames(event, item, itemWNames, lateralLoc, nextPlayLoc, ' to ')
gain = getGain(drive, event, item[lateralLoc+8:nextPlayLoc])
event.endingYard = event.startingYard + gain
getDefenderNames(event, item, itemWNames, lateralLoc, nextPlayLoc, ' tackled by ')
if nextPlayLoc != len(item):
getNextEvent(drive, play, event, item, itemWNames, advanceLoc, returnLoc, fumbleLoc, lateralLoc2, penaltyLoc, nextPlayLoc)
resolveResult(play, event, item, lateralLoc, nextPlayLoc)
def getPenalty(drive, play, event, item, itemWNames, penaltyLoc):
penaltyLoc2 = item.find(' penalty', penaltyLoc+1)
if penaltyLoc2 == -1:
penaltyLoc2 = len(item)
acceptedLoc = item.find(' accepted', penaltyLoc, penaltyLoc2)
declinedLoc = item.find(' declined', penaltyLoc, penaltyLoc2)
offsettingLoc = item.find('off-setting', penaltyLoc, penaltyLoc2)
if item.find(' no play', penaltyLoc, penaltyLoc2) != -1:
play.events = []
event.startingYard = play.startingYard
play.events.append(event)
play.result = gameDefs.RESULT_REPEAT_DOWN
i = penaltyLoc+9
while i < len(item) and not item[i].isdigit():
i += 1
gain = getGain(drive, event, item[i:penaltyLoc2], True)
# look for which team committed the penalty
spaceLoc = item.find(' ', penaltyLoc+9)
if spaceLoc == -1:
spaceLoc = len(item)
teamID = itemWNames[penaltyLoc+9:spaceLoc]
if teamID == drive.medPoss:
# offense did it
offensivePenalty = True
elif teamID == drive.medDef:
# defense did it
offensivePenalty = False
else:
searchLoc = i - 25
if searchLoc < 0:
searchLoc = 0
penaltyString = item[searchLoc:penaltyLoc]
commaLoc = penaltyString.find(',')
if commaLoc != -1:
penaltyString = penaltyString[commaLoc+1:]
periodLoc = penaltyString.find('.')
if periodLoc != -1:
penaltyString = penaltyString[periodLoc+1:]
offensivePenalty = whichTeam(drive.teamPoss.lower(), drive.teamDef.lower(), penaltyString)
duringReturn = play.duringReturn(event)
if declinedLoc != -1:
gain = 0
elif offsettingLoc != -1:
gain = 0
#if play.result == gameDefs.RESULT_ADVANCE_DOWN:
# play.result = gameDefs.RESULT_REPEAT_DOWN
else:
#print 'PENALTY - ' + item[i:]
# check if there's a loss of down
lossOfDown = False
if item.find('intentional grounding', penaltyLoc, penaltyLoc2) != -1:
lossOfDown = True
elif item.find('illegal forward pass', penaltyLoc, penaltyLoc2) != -1:
lossOfDown = True
# check if there's an automatic first down
automaticFirstDown = False
if not (offensivePenalty or duringReturn):
if item.find('personal foul', penaltyLoc, penaltyLoc2) != -1:
automaticFirstDown = True
elif item.find('pass interference', penaltyLoc, penaltyLoc2) != -1:
automaticFirstDown = True
elif item.find('roughing the kicker', penaltyLoc, penaltyLoc2) != -1:
automaticFirstDown = True
if offensivePenalty:
#print "Offensive Penalty"
gain = -gain
if duringReturn:
#print "During Return"
gain = -gain
if item.find('1st down', penaltyLoc, penaltyLoc2) != -1:
if play.result < gameDefs.RESULT_FIRST_DOWN:
play.result = gameDefs.RESULT_FIRST_DOWN
elif play.result == gameDefs.RESULT_ADVANCE_DOWN:
if not lossOfDown:
play.result = gameDefs.RESULT_REPEAT_DOWN
if automaticFirstDown and play.down != 0:
play.result = gameDefs.RESULT_FIRST_DOWN
elif play.result == gameDefs.RESULT_REPEAT_DOWN:
if lossOfDown:
play.result = gameDefs.RESULT_ADVANCE_DOWN
if automaticFirstDown and play.down != 0:
play.result = gameDefs.RESULT_FIRST_DOWN
loc = penaltyLoc2
if acceptedLoc != -1 and acceptedLoc < loc:
loc = acceptedLoc
if declinedLoc != -1 and declinedLoc < loc:
loc = declinedLoc
if offsettingLoc != -1 and offsettingLoc < loc:
loc = offsettingLoc
getDefenderNames(event, item, itemWNames, penaltyLoc, loc, ' on ')
if offensivePenalty is not duringReturn:
event.offense1 = event.defense1
event.defense1 = ''
event.endingYard = event.startingYard + gain
# check to see if the penalty will result in a first down
if play.startingYard + int(play.distance) <= event.endingYard:
if play.result < gameDefs.RESULT_FIRST_DOWN and play.down != 0:
play.result = gameDefs.RESULT_FIRST_DOWN
if penaltyLoc2 < len(item):
nextEvent = Event()
nextEvent.startingYard = event.endingYard
play.events.append(nextEvent)
nextEvent.eventType = gameDefs.EVENT_PENALTY
getPenalty(drive, play, nextEvent, item, itemWNames, penaltyLoc2)
def resolveResult(play, event, item, playLoc, nextPlayLoc):
if item.find('1st down', playLoc, nextPlayLoc) != -1:
if play.result < gameDefs.RESULT_FIRST_DOWN:
play.result = gameDefs.RESULT_FIRST_DOWN
if item.find('touchdown', playLoc, nextPlayLoc) != -1:
event.endingYard = 100
if play.didChangePoss(event):
play.result = gameDefs.RESULT_DEFENSIVE_TOUCHDOWN
else:
play.result = gameDefs.RESULT_TOUCHDOWN
if (item.find('conversion', playLoc, nextPlayLoc) != -1) and (item.find('failed', playLoc, nextPlayLoc) == -1):
event.endingYard = 100
if play.didChangePoss(event):
play.result = gameDefs.RESULT_DEFENSIVE_TOUCHDOWN
else:
play.result = gameDefs.RESULT_TOUCHDOWN
if item.find('safety', playLoc, nextPlayLoc) != -1:
event.endingYard = 0
if play.didChangePoss(event):
play.result = gameDefs.RESULT_DEFENSIVE_SAFETY
else:
play.result = gameDefs.RESULT_SAFETY
if item.find('touchback', playLoc, nextPlayLoc) != -1:
if play.duringReturn(event):
if event.endingYard == 0:
play.result = gameDefs.RESULT_TOUCHBACK
else:
play.result = gameDefs.RESULT_DEFENSIVE_TOUCHBACK
event.endingYard = 100
else:
play.result = gameDefs.RESULT_TOUCHBACK
event.endingYard = 100
def getGain(drive, event, item, forPenalty = False):
#print 'GAIN - ' + item
gain = 0
penaltyLoc = item.find(' penalty')
if penaltyLoc == -1:
penaltyLoc = len(item)
forLoc = item.find(' for ')
if forPenalty:
yardLoc = item.find(' yard', 0, penaltyLoc)
if yardLoc == -1:
yardLoc = item.find(' yds ', 0, penaltyLoc)
forLoc = -1
#print 'penalty yardLoc = ' + str(yardLoc)
elif forLoc == -1:
yardLoc = item.find(' yard', 0, penaltyLoc)
if yardLoc == -1:
yardLoc = item.find(' yds ', 0, penaltyLoc)
elif forLoc == item.find(' for a touch') or forLoc == item.find(' for a safety') or forLoc == item.find(' for a 1st down'):
forLoc = -1
yardLoc = item.find(' yard', 0, penaltyLoc)
if yardLoc == -1:
yardLoc = item.find(' yds ', 0, penaltyLoc)
else:
yardLoc = item.find(' yard', forLoc, penaltyLoc)
if yardLoc == -1:
yardLoc = item.find(' yds ', forLoc, penaltyLoc)
fiftyYardLoc = item.find(' the 50 yard ')
if fiftyYardLoc != -1 and (fiftyYardLoc+7) == yardLoc:
yardLoc = -1
lossLoc = item.find('loss of', forLoc, yardLoc)
#print str(forLoc) + ' ' + str(yardLoc) + ' ' + str(lossLoc)
noGainLoc = item.find('no gain')
toTheLoc = item.find(' to the ')
atTheLoc = item.find(' at the ')
atLoc = item.find(' at ')
ballOnLoc = item.find(' ball on ')
endZoneLoc = item.find(' in the endzone')
if endZoneLoc == -1:
endZoneLoc = item.find(' in the end zone')
if toTheLoc != -1 and atTheLoc != -1:
if toTheLoc < atTheLoc:
atTheLoc = -1
else:
toTheLoc = -1
if yardLoc != -1 and (noGainLoc == -1 or yardLoc < noGainLoc):
if lossLoc != -1:
gainStr = item[lossLoc+8:yardLoc]
if gainStr.strip('- ').isdigit():
gain = -int(gainStr)
else:
print '[loss err] ' + item + ' - ' + gainStr
else:
if forLoc != -1:
gainStr = item[forLoc+5:yardLoc]
else:
gainStr = item[0:yardLoc]
parenLoc = gainStr.find(')')
if parenLoc != -1:
gainStr = gainStr[parenLoc+1:]
if gainStr.strip('- ').isdigit():
gain = int(gainStr)
else:
print '[gain err] ' + item + ' - ' + gainStr
#elif noGainLoc != -1:
# gain = 0
elif toTheLoc != -1:
if fiftyYardLoc == toTheLoc+3:
yardLine = 50
else:
yardLine = getYardLine(drive, event, item[toTheLoc+8:])
event.endingYard = yardLine
gain = event.endingYard - event.startingYard
elif atTheLoc != -1:
if fiftyYardLoc == atTheLoc+3:
yardLine = 50
else:
yardLine = getYardLine(drive, event, item[atTheLoc+8:])
event.endingYard = yardLine
gain = event.endingYard - event.startingYard
elif atLoc != -1:
yardLine = getYardLine(drive, event, item[atLoc+4:])
event.endingYard = yardLine
gain = event.endingYard - event.startingYard
elif ballOnLoc != -1:
# print 'ball on'
# print item[ballOnLoc+9:]
yardLine = getYardLine(drive, event, item[ballOnLoc+9:])
# print 'yardline = ' + str(yardline)
event.endingYard = yardLine
gain = event.endingYard - event.startingYard
elif endZoneLoc != -1:
# now, we know the ball ended up in the end zone
# but we don't know which end zone
# so, we guess. an educated guess, though:
# since the ball is unlikely to travel the distance
# of the field on a single play, we pick the closer endzone
if event.startingYard > 50:
event.endingYard = 100
else:
event.endingYard = 0
gain = event.endingYard - event.startingYard
if noGainLoc != -1 and not forPenalty:
gain = 0
#else:
#print '[lossorgain err] ' + item
#print 'gain = ' + str(gain) + ', item = ' + str(item)
return gain
def getYardLine(drive, event, item):
# decide team
ownSide = True
if drive.eventDuringReturn(event):
offense = drive.medDef.lower()
defense = drive.medPoss.lower()
else:
offense = drive.medPoss.lower()
defense = drive.medDef.lower()
spaceLoc = item.find(' ')
sideName = item[0:spaceLoc]
if sideName == offense:
ownSide = True
elif sideName == defense:
ownSide = False
if item[0] == offense[0] and item[0] != defense[0]:
ownSide = True
elif item[0] != offense[0] and item[0] == defense[0]:
ownSide = False
else:
i = 1
while i < 5:
letter = item[i]
offLoc = offense.find(letter)
defLoc = defense.find(letter)
if offLoc != -1 and defLoc == -1:
ownSide = True
break
elif offLoc == -1 and defLoc != -1:
ownSide = False
break
elif offLoc < defLoc:
ownSide = True
break
elif offLoc > defLoc:
ownSide = False
break
else:
i += 1
# get yardline
i = 1
while i < len(item) and not item[i].isdigit():
i += 1
numStart = i
while i < len(item) and item[i].isdigit():
i += 1
numEnd = i
numerals = item[numStart:numEnd]
if numerals.isdigit():
yardLine = int(numerals)
else:
print '[yardline err:' + str(numStart) + ':' + str(numEnd) + '] ' + item
yardLine = 50
if not ownSide:
yardLine = 100 - yardLine
return yardLine
def getOffensiveNames(event, item, itemWNames, playLoc, nextPlayLoc, phrase):
phraseLoc = item.find(phrase, playLoc, nextPlayLoc)
if phraseLoc != -1:
phraseLoc += len(phrase)
str = item[phraseLoc:nextPlayLoc]
strWNames = itemWNames[phraseLoc:nextPlayLoc]
offensiveName = findName(str, strWNames)
if event.offense1 != '':
event.offense2 = offensiveName
else:
event.offense1 = offensiveName
def getDefenderNames(event, item, itemWNames, playLoc, nextPlayLoc, phrase):
#print itemWNames
phraseLoc = item.find(phrase, playLoc, nextPlayLoc)
# in the case of finding names for penalties, make an extra check
ballOnLoc = item.find(' ball on ', playLoc, nextPlayLoc)
while phraseLoc == ballOnLoc+5:
phraseLoc = item.find(phrase, phraseLoc+1, nextPlayLoc)
ballOnLoc = item.find(' ball on ', ballOnLoc+1, nextPlayLoc)
openParenLoc = item.find('(', playLoc, nextPlayLoc)
if phraseLoc != -1:
phraseLoc += len(phrase)
str = item[phraseLoc:nextPlayLoc]
strWNames = itemWNames[phraseLoc:nextPlayLoc]
defenderName = findName(str, strWNames)
elif openParenLoc != -1:
closeParenLoc = item.find(')', openParenLoc, nextPlayLoc)
defenderName = itemWNames[openParenLoc+1:closeParenLoc]
# we've already gotten these names earlier; don't worry about it
if defenderName.find('blocked by ') == 0:
return
else:
return
#print defenderName
andLoc = defenderName.find(' and ')
semicolonLoc = defenderName.find(';')
if andLoc != -1:
defenderName1 = defenderName[0:andLoc]
defenderName2 = defenderName[andLoc+5:]
event.defense1 = defenderName1.strip()
event.defense2 = defenderName2.strip()
elif semicolonLoc != -1:
defenderName1 = defenderName[0:semicolonLoc]
defenderName2 = defenderName[semicolonLoc+1:]
event.defense1 = defenderName1.strip()
event.defense2 = defenderName2.strip()
else:
event.defense1 = defenderName.strip()
def findName(str, strWNames, drive = None):
atLoc = str.find(' at ')
inLoc = str.find(' in ')
forLoc = str.find(' for ')
#andLoc = str.find(' and ')
outOfBoundsLoc = str.find(' out-of-bounds')
openParenLoc = str.find('(')
closeParenLoc = str.find(')')
loc = len(str)
if atLoc != -1 and atLoc < loc:
loc = atLoc
if inLoc != -1 and inLoc < loc:
loc = inLoc
if forLoc != -1 and forLoc < loc:
loc = forLoc
#if andLoc != -1 and andLoc < loc:
# loc = andLoc
if outOfBoundsLoc != -1 and outOfBoundsLoc < loc:
loc = outOfBoundsLoc
if openParenLoc != -1 and openParenLoc < loc:
loc = openParenLoc
if closeParenLoc != -1 and closeParenLoc < loc:
loc = closeParenLoc
name = strWNames[0:loc].strip()
if name[0:3].lower() == 'n/a':
# don't return n/a, just assume name is unknown, return nothing
return ''
if name[0:4].lower() == 'team':
# don't go looking for anything else, just return 'team'
return name[0:4]
spaceLoc = name.find(' ')
if spaceLoc != -1:
if drive:
teamID = name[0:spaceLoc]
if teamID == drive.medPoss or teamID == drive.teamPoss:
spaceLoc = name.find(' ', spaceLoc+1)
elif teamID == drive.medDef or teamID == drive.teamDef:
spaceLoc = name.find(' ', spaceLoc+1)
if spaceLoc != -1:
commaLoc = name.find(',', spaceLoc)
if commaLoc > spaceLoc:
name = name[0:commaLoc].strip()
periodLoc = name.rfind('.')
length = len(name)
if periodLoc == length-1 and spaceLoc != length-3:
name = name[0:periodLoc].strip()
#print name
return name
def whichTeam(teamName, otherTeamName, string):
if string.find(teamName) != -1:
return True
elif string.find(otherTeamName) != -1:
return False
else:
str = string.strip()
i = len(str) - 1
while i >= 0:
letter = str[i]
teamLoc = teamName.rfind(letter)
otherLoc = otherTeamName.rfind(letter)
if teamLoc != -1 and otherLoc == -1:
return True
elif teamLoc == -1 and otherLoc != -1:
return False
elif teamLoc > otherLoc:
return True
elif teamLoc < otherLoc:
return False
else:
i -= 1
return True
if __name__ == '__main__':
import sys
print geturlstring(sys.argv[1])
|
|
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
import datetime
from io import BytesIO
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len)
from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if (not files) or isinstance(data, str):
return None
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
self.hooks = hooks
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
p.prepare_auth(self.auth, self.url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
p.prepare_hooks(self.hooks)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
headers = dict((name.encode('ascii'), value) for name, value in headers.items())
self.headers = CaseInsensitiveDict(headers)
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = False
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, list),
not isinstance(data, dict)
])
try:
length = super_len(data)
except (TypeError, AttributeError):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Requires that ``stream=True` on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
try:
# Special case for urllib3.
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except AttributeError:
# Standard file-like object.
while 1:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
return self.raw.release_conn()
|
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from sqlalchemy.orm import attributes as sql_attr
from oslo.config import cfg
from neutron.common import constants
from neutron.db import models_v2
from neutron.notifiers import nova
from neutron.tests import base
class TestNovaNotify(base.BaseTestCase):
def setUp(self, plugin=None):
super(TestNovaNotify, self).setUp()
class FakePlugin(object):
def get_port(self, context, port_id):
return {'device_id': 'instance_uuid',
'device_owner': 'compute:None'}
self.nova_notifier = nova.Notifier()
self.nova_notifier._plugin_ref = FakePlugin()
def test_notify_port_status_all_values(self):
states = [constants.PORT_STATUS_ACTIVE, constants.PORT_STATUS_DOWN,
constants.PORT_STATUS_ERROR, constants.PORT_STATUS_BUILD,
sql_attr.NO_VALUE]
# test all combinations
for previous_port_status in states:
for current_port_status in states:
port = models_v2.Port(id='port-uuid', device_id='device-uuid',
device_owner="compute:",
status=current_port_status)
self._record_port_status_changed_helper(current_port_status,
previous_port_status,
port)
def test_port_without_device_owner_no_notify(self):
port = models_v2.Port(id='port-uuid', device_id='device-uuid',
status=constants.PORT_STATUS_ACTIVE)
self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE,
sql_attr.NO_VALUE,
port)
def test_port_without_device_id_no_notify(self):
port = models_v2.Port(id='port-uuid', device_owner="network:dhcp",
status=constants.PORT_STATUS_ACTIVE)
self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE,
sql_attr.NO_VALUE,
port)
def test_port_without_id_no_notify(self):
port = models_v2.Port(device_id='device-uuid',
device_owner="compute:",
status=constants.PORT_STATUS_ACTIVE)
self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE,
sql_attr.NO_VALUE,
port)
def test_non_compute_instances_no_notify(self):
port = models_v2.Port(id='port-uuid', device_id='device-uuid',
device_owner="network:dhcp",
status=constants.PORT_STATUS_ACTIVE)
self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE,
sql_attr.NO_VALUE,
port)
def _record_port_status_changed_helper(self, current_port_status,
previous_port_status, port):
if not (port.device_id and port.id and port.device_owner and
port.device_owner.startswith('compute:')):
return
if (previous_port_status == constants.PORT_STATUS_ACTIVE and
current_port_status == constants.PORT_STATUS_DOWN):
event_name = nova.VIF_UNPLUGGED
elif (previous_port_status in [sql_attr.NO_VALUE,
constants.PORT_STATUS_DOWN,
constants.PORT_STATUS_BUILD]
and current_port_status in [constants.PORT_STATUS_ACTIVE,
constants.PORT_STATUS_ERROR]):
event_name = nova.VIF_PLUGGED
else:
return
status = nova.NEUTRON_NOVA_EVENT_STATUS_MAP.get(current_port_status)
self.nova_notifier.record_port_status_changed(port,
current_port_status,
previous_port_status,
None)
event = {'server_uuid': 'device-uuid', 'status': status,
'name': event_name, 'tag': 'port-uuid'}
self.assertEqual(event, port._notify_event)
def test_update_fixed_ip_changed(self):
returned_obj = {'port':
{'device_owner': u'compute:dfd',
'id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222',
'device_id': u'instance_uuid'}}
expected_event = {'server_uuid': 'instance_uuid',
'name': 'network-changed'}
event = self.nova_notifier.create_port_changed_event('update_port',
{}, returned_obj)
self.assertEqual(event, expected_event)
def test_create_floatingip_notify(self):
returned_obj = {'floatingip':
{'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}}
expected_event = {'server_uuid': 'instance_uuid',
'name': 'network-changed'}
event = self.nova_notifier.create_port_changed_event(
'create_floatingip', {}, returned_obj)
self.assertEqual(event, expected_event)
def test_create_floatingip_no_port_id_no_notify(self):
returned_obj = {'floatingip':
{'port_id': None}}
event = self.nova_notifier.create_port_changed_event(
'create_floatingip', {}, returned_obj)
self.assertFalse(event, None)
def test_delete_floatingip_notify(self):
returned_obj = {'floatingip':
{'port_id': u'bee50827-bcee-4cc8-91c1-a27b0ce54222'}}
expected_event = {'server_uuid': 'instance_uuid',
'name': 'network-changed'}
event = self.nova_notifier.create_port_changed_event(
'delete_floatingip', {}, returned_obj)
self.assertEqual(expected_event, event)
def test_delete_floatingip_no_port_id_no_notify(self):
returned_obj = {'floatingip':
{'port_id': None}}
event = self.nova_notifier.create_port_changed_event(
'delete_floatingip', {}, returned_obj)
self.assertEqual(event, None)
def test_associate_floatingip_notify(self):
returned_obj = {'floatingip':
{'port_id': u'5a39def4-3d3f-473d-9ff4-8e90064b9cc1'}}
original_obj = {'port_id': None}
expected_event = {'server_uuid': 'instance_uuid',
'name': 'network-changed'}
event = self.nova_notifier.create_port_changed_event(
'update_floatingip', original_obj, returned_obj)
self.assertEqual(expected_event, event)
def test_disassociate_floatingip_notify(self):
returned_obj = {'floatingip': {'port_id': None}}
original_obj = {'port_id': '5a39def4-3d3f-473d-9ff4-8e90064b9cc1'}
expected_event = {'server_uuid': 'instance_uuid',
'name': 'network-changed'}
event = self.nova_notifier.create_port_changed_event(
'update_floatingip', original_obj, returned_obj)
self.assertEqual(expected_event, event)
def test_no_notification_notify_nova_on_port_data_changes_false(self):
cfg.CONF.set_override('notify_nova_on_port_data_changes', False)
with mock.patch.object(self.nova_notifier,
'send_events') as send_events:
self.nova_notifier.send_network_change('update_floatingip',
{}, {})
self.assertFalse(send_events.called, False)
def test_nova_send_events_returns_bad_list(self):
with mock.patch.object(
self.nova_notifier.nclient.server_external_events,
'create') as nclient_create:
nclient_create.return_value = 'i am a string!'
self.nova_notifier.send_events()
def test_nova_send_events_raises(self):
with mock.patch.object(
self.nova_notifier.nclient.server_external_events,
'create') as nclient_create:
nclient_create.side_effect = Exception
self.nova_notifier.send_events()
def test_nova_send_events_returns_non_200(self):
with mock.patch.object(
self.nova_notifier.nclient.server_external_events,
'create') as nclient_create:
nclient_create.return_value = [{'code': 404,
'name': 'network-changed',
'server_uuid': 'uuid'}]
self.nova_notifier.pending_events.append(
{'name': 'network-changed', 'server_uuid': 'uuid'})
self.nova_notifier.send_events()
def test_nova_send_events_return_200(self):
with mock.patch.object(
self.nova_notifier.nclient.server_external_events,
'create') as nclient_create:
nclient_create.return_value = [{'code': 200,
'name': 'network-changed',
'server_uuid': 'uuid'}]
self.nova_notifier.pending_events.append(
{'name': 'network-changed', 'server_uuid': 'uuid'})
self.nova_notifier.send_events()
def test_nova_send_events_multiple(self):
with mock.patch.object(
self.nova_notifier.nclient.server_external_events,
'create') as nclient_create:
nclient_create.return_value = [{'code': 200,
'name': 'network-changed',
'server_uuid': 'uuid'},
{'code': 200,
'name': 'network-changed',
'server_uuid': 'uuid'}]
self.nova_notifier.pending_events.append(
{'name': 'network-changed', 'server_uuid': 'uuid'})
self.nova_notifier.pending_events.append(
{'name': 'network-changed', 'server_uuid': 'uuid'})
self.nova_notifier.send_events()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
import argparse
import textwrap
import timeit
import skflow
from sklearn import mixture
from sklearn import preprocessing as pp
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from src.dataset import *
from src.evaluation import *
from src.features import *
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
train_start = 0.0
train_end = 0.0
test_start = 0.0
test_end = 0.0
def main(argv):
numpy.random.seed(123456) # let's make randomization predictable
tot_start = timeit.default_timer()
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( [email protected] )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
parameter_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.splitext(os.path.basename(__file__))[0] + '.yaml')
params = load_parameters(parameter_file)
params = process_parameters(params)
make_folders(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
train_start = timeit.default_timer()
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
train_end = timeit.default_timer()
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
test_start = timeit.default_timer()
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
test_end = timeit.default_timer()
foot()
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'])
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at [" + params['path']['challenge_results'] + "]"
print " "
tot_end = timeit.default_timer()
print " "
print "Train Time : " + str(train_end - train_start)
print " "
print " "
print "Test Time : " + str(test_end - test_start)
print " "
print " "
print "Total Time : " + str(tot_end - tot_start)
print " "
final_result['train_time'] = train_end - train_start
final_result['test_time'] = test_end - test_start
final_result['tot_time'] = tot_end - tot_start
joblib.dump(final_result, 'result.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['data'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['data'])
params['path']['base'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['base'])
# Features
params['path']['features_'] = params['path']['features']
params['path']['features'] = os.path.join(params['path']['base'],
params['path']['features'],
params['features']['hash'])
# Feature normalizers
params['path']['feature_normalizers_'] = params['path']['feature_normalizers']
params['path']['feature_normalizers'] = os.path.join(params['path']['base'],
params['path']['feature_normalizers'],
params['features']['hash'])
# Models
params['path']['models_'] = params['path']['models']
params['path']['models'] = os.path.join(params['path']['base'],
params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
# Results
params['path']['results_'] = params['path']['results']
params['path']['results'] = os.path.join(params['path']['base'],
params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def make_folders(params, parameter_filename='parameters.yaml'):
"""Create all needed folders, and saves parameters in yaml-file for easier manual browsing of data.
Parameters
----------
params : dict
parameters in dict
parameter_filename : str
filename to save parameters used to generate the folder name
Returns
-------
nothing
"""
# Check that target path exists, create if not
check_path(params['path']['features'])
check_path(params['path']['feature_normalizers'])
check_path(params['path']['models'])
check_path(params['path']['results'])
# Save parameters into folders to help manual browsing of files.
# Features
feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)
if not os.path.isfile(feature_parameter_filename):
save_parameters(feature_parameter_filename, params['features'])
# Feature normalizers
feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)
if not os.path.isfile(feature_normalizer_parameter_filename):
save_parameters(feature_normalizer_parameter_filename, params['features'])
# Models
model_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(model_features_parameter_filename):
save_parameters(model_features_parameter_filename, params['features'])
model_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(model_models_parameter_filename):
save_parameters(model_models_parameter_filename, params['classifier'])
# Results
# Save parameters into folders to help manual browsing of files.
result_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(result_features_parameter_filename):
save_parameters(result_features_parameter_filename, params['features'])
result_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(result_models_parameter_filename):
save_parameters(result_models_parameter_filename, params['classifier'])
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True,
fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
if params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(audio_filename)[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
elif params['method'] == 'traps':
feature_data = feature_extraction_traps(y=y,
fs=fs,
traps_params=params['traps'],
mfcc_params=params['mfcc'])
else:
# feature_data['feat'].shape is (1501, 60)
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds',
overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'gmm':
model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
elif classifier_method == 'dnn':
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label, len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'])
clf.save('dnn/dnnmodel1')
# Save models
save_data(current_model_file, model_container)
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True,
fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
if feature_params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(item['file'])[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
elif feature_params['method'] == 'traps':
feature_data = feature_extraction_traps(y=y,
fs=fs,
traps_params=params['traps'],
mfcc_params=feature_params['mfcc'],
statistics=False)['feat']
else:
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'gmm':
current_result = do_classification_gmm(feature_data, model_container)
current_class = current_result['class']
elif classifier_method == 'dnn':
current_result = do_classification_dnn(feature_data, model_container)
current_class = dataset.scene_labels[current_result['class_id']]
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Store the result
if classifier_method == 'gmm':
results.append((dataset.absolute_to_relative(item['file']),
current_class))
elif classifier_method == 'dnn':
logs_in_tuple = tuple(lo for lo in current_result['logls'])
results.append((dataset.absolute_to_relative(item['file']),
current_class) + logs_in_tuple)
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn(feature_data, model_container):
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(15)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn/dnnmodel1')
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)), 0)
classification_result_id = numpy.argmax(logls)
return {'class_id': classification_result_id,
'logls': logls}
def do_classification_gmm(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(len(model_container['models']))
logls.fill(-numpy.inf)
for label_id, label in enumerate(model_container['models']):
logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
classification_result_id = numpy.argmax(logls)
return {'class': model_container['models'].keys()[classification_result_id],
'logls': logls}
def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
# Rewrite the result file
if os.path.isfile(result_filename):
with open(result_filename+'2', 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
y_true = (dataset.file_meta(result_item[0])[0]['scene_label'],)
#print type(y_true)
#print type(result_item)
writer.writerow(y_true + tuple(result_item))
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm)) / numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
final_result['result'] = results
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold' + str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy') + fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][
label] * 100) + fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100) + fold_values
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
|
|
""" implement the TimedeltaIndex """
from pandas._libs import index as libindex, lib
from pandas._libs.tslibs import Timedelta, to_offset
from pandas._typing import DtypeObj
from pandas.errors import InvalidIndexError
from pandas.core.dtypes.common import TD64NS_DTYPE, is_scalar, is_timedelta64_dtype
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.timedeltas import TimedeltaArray
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
@inherit_names(
["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"]
+ TimedeltaArray._field_ops,
TimedeltaArray,
wrap=True,
)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"_other_ops",
"components",
"to_pytimedelta",
"sum",
"std",
"median",
"_format_native_types",
],
TimedeltaArray,
)
class TimedeltaIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects.
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with.
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
Which is an integer/float number.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
mean
See Also
--------
Index : The base pandas Index type.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data.
PeriodIndex : Index of Period data.
timedelta_range : Create a fixed-frequency TimedeltaIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "timedeltaindex"
_data_cls = TimedeltaArray
_engine_type = libindex.TimedeltaEngine
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
_is_numeric_dtype = False
_data: TimedeltaArray
# -------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
unit=None,
freq=lib.no_default,
closed=None,
dtype=TD64NS_DTYPE,
copy=False,
name=None,
):
name = maybe_extract_name(name, data, cls)
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
if unit in {"Y", "y", "M"}:
raise ValueError(
"Units 'M', 'Y', and 'y' are no longer supported, as they do not "
"represent unambiguous timedelta values durations."
)
if isinstance(data, TimedeltaArray) and freq is lib.no_default:
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
if isinstance(data, TimedeltaIndex) and freq is lib.no_default and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence_not_strict(
data, freq=freq, unit=unit, dtype=dtype, copy=copy
)
return cls._simple_new(tdarr, name=name)
# -------------------------------------------------------------------
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
return is_timedelta64_dtype(dtype)
# -------------------------------------------------------------------
# Indexing Methods
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int, slice, or ndarray[int]
"""
if not is_scalar(key):
raise InvalidIndexError(key)
try:
key = self._data._validate_scalar(key, unbox=False)
except TypeError as err:
raise KeyError(key) from err
return Index.get_loc(self, key, method, tolerance)
def _maybe_cast_slice_bound(self, label, side: str, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
"""
assert kind in ["loc", "getitem", None]
if isinstance(label, str):
parsed = Timedelta(label)
lbound = parsed.round(parsed.resolution_string)
if side == "left":
return lbound
else:
return lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns")
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
# -------------------------------------------------------------------
@property
def inferred_type(self) -> str:
return "timedelta64"
def timedelta_range(
start=None, end=None, periods=None, freq=None, name=None, closed=None
) -> TimedeltaIndex:
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency.
Parameters
----------
start : str or timedelta-like, default None
Left bound for generating timedeltas.
end : str or timedelta-like, default None
Right bound for generating timedeltas.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
freq, _ = dtl.maybe_infer_freq(freq)
tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed)
return TimedeltaIndex._simple_new(tdarr, name=name)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.training.python.training import training
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib2
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
# pylint: enable=g-import-not-at-top
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def batchnorm_classifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class ClipGradsTest(test.TestCase):
def testClipGrads(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms(
gradients_to_variables, 3.0)
with self.test_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
def testClipGradsFn(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms_fn(3.0)(
gradients_to_variables)
with self.test_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
class CreateTrainOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
# Create an easy training set:
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testTrainOpInCollection(self):
with ops.Graph().as_default():
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testGlobalStepIsIncrementedByDefault(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# After 10 updates global_step should be 10.
self.assertAllClose(global_step.eval(), 10)
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
class TrainBatchNormClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
if gfile.Exists(logdir1): # For running on jenkins.
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2): # For running on jenkins.
gfile.DeleteRecursively(logdir2)
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=1),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
model_path = saver_lib.latest_checkpoint(logdir1)
assign_fn = variables_lib.assign_from_checkpoint_fn(
model_path, model_variables)
def init_fn(_, session):
assign_fn(session)
loss = training.train(
train_op,
None,
scaffold=monitored_session.Scaffold(init_fn=init_fn),
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
return losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
if gfile.Exists(logdir): # For running on jenkins.
gfile.DeleteRecursively(logdir)
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib.get_variables_by_name('weights')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=weights)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=200, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=200),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib.get_variables_by_name('biases')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=biases)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=400),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib.get_variables()
train_op = training.create_train_op(total_loss, optimizer)
train_weights = training.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = training.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with self.test_session() as session:
# Initialize the variables.
session.run(variables_lib2.global_variables_initializer())
# Get the initial weights and biases values.
weights_values, biases_values = session.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = session.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = session.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = session.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
multipliers = [1., 1000.]
number_of_steps = 10
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss0 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss0)
self.assertGreater(loss0, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss1 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss1)
self.assertLess(loss1, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(loss0, loss1)
if __name__ == '__main__':
test.main()
|
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
import subprocess
import unittest
from contextlib import contextmanager
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open, touch
from pants_test.base_test import TestGenerator
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_engine
from pants_test.testutils.git_util import initialize_repo
def lines_to_set(str_or_list):
if isinstance(str_or_list, list):
return set(str_or_list)
else:
return set(x for x in str(str_or_list).split('\n') if x)
@contextmanager
def mutated_working_copy(files_to_mutate, to_append='\n '):
"""Given a list of files, append whitespace to each of them to trigger a git diff - then reset."""
assert to_append, 'to_append may not be empty'
for f in files_to_mutate:
with open(f, 'ab') as fh:
fh.write(to_append)
try:
yield
finally:
seek_point = len(to_append) * -1
for f in files_to_mutate:
with open(f, 'ab') as fh:
fh.seek(seek_point, os.SEEK_END)
fh.truncate()
@contextmanager
def create_isolated_git_repo():
# Isolated Git Repo Structure:
# worktree
# |--README
# |--pants.ini
# |--3rdparty
# |--BUILD
# |--src
# |--resources
# |--org/pantsbuild/resourceonly
# |--BUILD
# |--README.md
# |--java
# |--org/pantsbuild/helloworld
# |--BUILD
# |--helloworld.java
# |--python
# |--python_targets
# |--BUILD
# |--test_binary.py
# |--test_library.py
# |--test_unclaimed_src.py
# |--sources
# |--BUILD
# |--sources.py
# |--sources.txt
# |--tests
# |--scala
# |--org/pantsbuild/cp-directories
# |--BUILD
# |--ClasspathDirectoriesSpec.scala
with temporary_dir(root_dir=get_buildroot()) as worktree:
with safe_open(os.path.join(worktree, 'README'), 'w') as fp:
fp.write('Just a test tree.')
# Create an empty pants config file.
touch(os.path.join(worktree, 'pants.ini'))
# Copy .gitignore to new repo.
shutil.copyfile('.gitignore', os.path.join(worktree, '.gitignore'))
with initialize_repo(worktree=worktree, gitdir=os.path.join(worktree, '.git')) as git:
# Resource File
resource_file = os.path.join(worktree, 'src/resources/org/pantsbuild/resourceonly/README.md')
with safe_open(resource_file, 'w') as fp:
fp.write('Just resource.')
resource_build_file = os.path.join(worktree, 'src/resources/org/pantsbuild/resourceonly/BUILD')
with safe_open(resource_build_file, 'w') as fp:
fp.write(dedent("""
resources(
name='resource',
sources=['README.md'],
)
"""))
git.add(resource_file, resource_build_file)
git.commit('Check in a resource target.')
# Java Program
src_file = os.path.join(worktree, 'src/java/org/pantsbuild/helloworld/helloworld.java')
with safe_open(src_file, 'w') as fp:
fp.write(dedent("""
package org.pantsbuild.helloworld;
class HelloWorld {
public static void main(String[] args) {
System.out.println("Hello, World!\n");
}
}
"""))
src_build_file = os.path.join(worktree, 'src/java/org/pantsbuild/helloworld/BUILD')
with safe_open(src_build_file, 'w') as fp:
fp.write(dedent("""
jvm_binary(
dependencies=[
'{}',
],
source='helloworld.java',
main='org.pantsbuild.helloworld.HelloWorld',
)
""".format('src/resources/org/pantsbuild/resourceonly:resource')))
git.add(src_file, src_build_file)
git.commit('hello world java program with a dependency on a resource file.')
# Scala Program
scala_src_dir = os.path.join(worktree, 'tests/scala/org/pantsbuild/cp-directories')
safe_mkdir(os.path.dirname(scala_src_dir))
shutil.copytree('testprojects/tests/scala/org/pantsbuild/testproject/cp-directories', scala_src_dir)
git.add(scala_src_dir)
git.commit('Check in a scala test target.')
# Python library and binary
python_src_dir = os.path.join(worktree, 'src/python/python_targets')
safe_mkdir(os.path.dirname(python_src_dir))
shutil.copytree('testprojects/src/python/python_targets', python_src_dir)
git.add(python_src_dir)
git.commit('Check in python targets.')
# A `python_library` with `resources=['file.name']`.
python_src_dir = os.path.join(worktree, 'src/python/sources')
safe_mkdir(os.path.dirname(python_src_dir))
shutil.copytree('testprojects/src/python/sources', python_src_dir)
git.add(python_src_dir)
git.commit('Check in a python library with resource dependency.')
# Copy 3rdparty/BUILD.
_3rdparty_build = os.path.join(worktree, '3rdparty/BUILD')
safe_mkdir(os.path.dirname(_3rdparty_build))
shutil.copyfile('3rdparty/BUILD', _3rdparty_build)
git.add(_3rdparty_build)
git.commit('Check in 3rdparty/BUILD.')
with environment_as(PANTS_BUILDROOT_OVERRIDE=worktree):
yield worktree
class ChangedIntegrationTest(PantsRunIntegrationTest, TestGenerator):
TEST_MAPPING = {
# A `jvm_binary` with `source='file.name'`.
'src/java/org/pantsbuild/helloworld/helloworld.java': dict(
none=['src/java/org/pantsbuild/helloworld:helloworld'],
direct=['src/java/org/pantsbuild/helloworld:helloworld'],
transitive=['src/java/org/pantsbuild/helloworld:helloworld']
),
# A `python_binary` with `source='file.name'`.
'src/python/python_targets/test_binary.py': dict(
none=['src/python/python_targets:test'],
direct=['src/python/python_targets:test'],
transitive=['src/python/python_targets:test']
),
# A `python_library` with `sources=['file.name']`.
'src/python/python_targets/test_library.py': dict(
none=['src/python/python_targets:test_library'],
direct=['src/python/python_targets:test',
'src/python/python_targets:test_library',
'src/python/python_targets:test_library_direct_dependee'],
transitive=['src/python/python_targets:test',
'src/python/python_targets:test_library',
'src/python/python_targets:test_library_direct_dependee',
'src/python/python_targets:test_library_transitive_dependee',
'src/python/python_targets:test_library_transitive_dependee_2',
'src/python/python_targets:test_library_transitive_dependee_3',
'src/python/python_targets:test_library_transitive_dependee_4']
),
# A `resources` target with `sources=['file.name']` referenced by a `java_library` target.
'src/resources/org/pantsbuild/resourceonly/README.md': dict(
none=['src/resources/org/pantsbuild/resourceonly:resource'],
direct=['src/java/org/pantsbuild/helloworld:helloworld',
'src/resources/org/pantsbuild/resourceonly:resource'],
transitive=['src/java/org/pantsbuild/helloworld:helloworld',
'src/resources/org/pantsbuild/resourceonly:resource'],
),
# A `python_library` with `sources=['file.name'] .
'src/python/sources/sources.py': dict(
none=['src/python/sources:sources'],
direct=['src/python/sources:sources'],
transitive=['src/python/sources:sources']
),
# A `scala_library` with `sources=['file.name']`.
'tests/scala/org/pantsbuild/cp-directories/ClasspathDirectoriesSpec.scala': dict(
none=['tests/scala/org/pantsbuild/cp-directories:cp-directories'],
direct=['tests/scala/org/pantsbuild/cp-directories:cp-directories'],
transitive=['tests/scala/org/pantsbuild/cp-directories:cp-directories']
),
# An unclaimed source file.
'src/python/python_targets/test_unclaimed_src.py': dict(
none=[],
direct=[],
transitive=[]
)
}
@classmethod
def generate_tests(cls):
"""Generates tests on the class for better reporting granularity than an opaque for loop test."""
def safe_filename(f):
return f.replace('/', '_').replace('.', '_')
for filename, dependee_mapping in cls.TEST_MAPPING.items():
for dependee_type in dependee_mapping.keys():
# N.B. The parameters here are used purely to close over the respective loop variables.
def inner_integration_coverage_test(self, filename=filename, dependee_type=dependee_type):
with create_isolated_git_repo() as worktree:
# Mutate the working copy so we can do `--changed-parent=HEAD` deterministically.
with mutated_working_copy([os.path.join(worktree, filename)]):
stdout = self.assert_changed_new_equals_old(
['--changed-include-dependees={}'.format(dependee_type), '--changed-parent=HEAD'],
test_list=True
)
self.assertEqual(
lines_to_set(self.TEST_MAPPING[filename][dependee_type]),
lines_to_set(stdout)
)
cls.add_test(
'test_changed_coverage_{}_{}'.format(dependee_type, safe_filename(filename)),
inner_integration_coverage_test
)
def assert_changed_new_equals_old(self, extra_args, success=True, test_list=False):
args = ['-q', 'changed'] + extra_args
changed_run = self.do_command(*args, success=success, enable_v2_engine=False)
engine_changed_run = self.do_command(*args, success=success, enable_v2_engine=True)
self.assertEqual(
lines_to_set(changed_run.stdout_data), lines_to_set(engine_changed_run.stdout_data)
)
if test_list:
# In the v2 engine, `--changed-*` options can alter the specs of any goal - test with `list`.
list_args = ['-q', 'list'] + extra_args
engine_list_run = self.do_command(*list_args, success=success, enable_v2_engine=True)
self.assertEqual(
lines_to_set(changed_run.stdout_data), lines_to_set(engine_list_run.stdout_data)
)
# If we get to here without asserting, we know all copies of stdout are identical - return one.
return changed_run.stdout_data
@ensure_engine
def test_changed_options_scope_shadowing(self):
"""Tests that the `test-changed` scope overrides `changed` scope."""
changed_src = 'src/python/python_targets/test_library.py'
expected_target = self.TEST_MAPPING[changed_src]['none'][0]
expected_set = {expected_target}
not_expected_set = set(self.TEST_MAPPING[changed_src]['transitive']).difference(expected_set)
with create_isolated_git_repo() as worktree:
with mutated_working_copy([os.path.join(worktree, changed_src)]):
pants_run = self.run_pants([
'-ldebug', # This ensures the changed target name shows up in the pants output.
'test-changed',
'--test-changed-changes-since=HEAD',
'--test-changed-include-dependees=none', # This option should be used.
'--changed-include-dependees=transitive' # This option should be stomped on.
])
self.assert_success(pants_run)
for expected_item in expected_set:
self.assertIn(expected_item, pants_run.stdout_data)
for not_expected_item in not_expected_set:
if expected_target.startswith(not_expected_item):
continue # Ignore subset matches.
self.assertNotIn(not_expected_item, pants_run.stdout_data)
@ensure_engine
def test_changed_options_scope_positional(self):
changed_src = 'src/python/python_targets/test_library.py'
expected_set = set(self.TEST_MAPPING[changed_src]['transitive'])
with create_isolated_git_repo() as worktree:
with mutated_working_copy([os.path.join(worktree, changed_src)]):
pants_run = self.run_pants([
'-ldebug', # This ensures the changed target names show up in the pants output.
'test-changed',
'--changes-since=HEAD',
'--include-dependees=transitive'
])
self.assert_success(pants_run)
for expected_item in expected_set:
self.assertIn(expected_item, pants_run.stdout_data)
@ensure_engine
def test_test_changed_exclude_target(self):
changed_src = 'src/python/python_targets/test_library.py'
exclude_target_regexp = r'_[0-9]'
excluded_set = {'src/python/python_targets:test_library_transitive_dependee_2',
'src/python/python_targets:test_library_transitive_dependee_3',
'src/python/python_targets:test_library_transitive_dependee_4'}
expected_set = set(self.TEST_MAPPING[changed_src]['transitive']) - excluded_set
with create_isolated_git_repo() as worktree:
with mutated_working_copy([os.path.join(worktree, changed_src)]):
pants_run = self.run_pants([
'-ldebug', # This ensures the changed target names show up in the pants output.
'--exclude-target-regexp={}'.format(exclude_target_regexp),
'test-changed',
'--changes-since=HEAD',
'--include-dependees=transitive'
])
self.assert_success(pants_run)
for expected_item in expected_set:
self.assertIn(expected_item, pants_run.stdout_data)
for excluded_item in excluded_set:
self.assertNotIn(excluded_item, pants_run.stdout_data)
@ensure_engine
def test_changed_changed_since_and_files(self):
with create_isolated_git_repo():
stdout = self.assert_changed_new_equals_old(['--changed-since=HEAD^^', '--files'])
# The output should be the files added in the last 2 commits.
self.assertEqual(
lines_to_set(stdout),
{'src/python/sources/BUILD',
'src/python/sources/sources.py',
'src/python/sources/sources.txt',
'3rdparty/BUILD'}
)
@ensure_engine
def test_changed_diffspec_and_files(self):
with create_isolated_git_repo():
git_sha = subprocess.check_output(['git', 'rev-parse', 'HEAD^^']).strip()
stdout = self.assert_changed_new_equals_old(['--changed-diffspec={}'.format(git_sha), '--files'])
# The output should be the files added in the last 2 commits.
self.assertEqual(
lines_to_set(stdout),
{'src/python/python_targets/BUILD',
'src/python/python_targets/test_binary.py',
'src/python/python_targets/test_library.py',
'src/python/python_targets/test_unclaimed_src.py'}
)
@ensure_engine
def test_changed_with_multiple_build_files(self):
new_build_file = 'src/python/python_targets/BUILD.new'
with create_isolated_git_repo() as worktree:
touch(os.path.join(worktree, new_build_file))
pants_run = self.run_pants(['changed'])
self.assert_success(pants_run)
self.assertEqual(pants_run.stdout_data.strip(), '')
# Following 4 tests do not run in isolated repo because they don't mutate working copy.
def test_changed(self):
self.assert_changed_new_equals_old([])
@unittest.skip("Pending fix for https://github.com/pantsbuild/pants/issues/4010")
def test_changed_with_changes_since(self):
self.assert_changed_new_equals_old(['--changes-since=HEAD^^'])
@unittest.skip("Pending fix for https://github.com/pantsbuild/pants/issues/4010")
def test_changed_with_changes_since_direct(self):
self.assert_changed_new_equals_old(['--changes-since=HEAD^^', '--include-dependees=direct'])
@unittest.skip("Pending fix for https://github.com/pantsbuild/pants/issues/4010")
def test_changed_with_changes_since_transitive(self):
self.assert_changed_new_equals_old(['--changes-since=HEAD^^', '--include-dependees=transitive'])
ChangedIntegrationTest.generate_tests()
|
|
#!/usr/bin/env python
################################################################################
# Copyright (c) 2010 Michael Fairley
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
################################################################################
import asynchat
import asyncore
import pickle
import hashlib
import hmac
import logging
import marshal
import optparse
import os
import random
import socket
import sys
import types
import binascii
VERSION = "0.1.2"
DEFAULT_PORT = 11235
class Protocol(asynchat.async_chat):
def __init__(self, conn=None):
if conn:
asynchat.async_chat.__init__(self, conn)
else:
asynchat.async_chat.__init__(self)
self.set_terminator(b"\n")
self.buffer = []
self.auth = None
self.mid_command = False
def collect_incoming_data(self, data):
self.buffer.append(data)
def send_command(self, command, data=None):
if not b":" in command:
command += b":"
if data:
pdata = pickle.dumps(data)
command += bytes(str(len(pdata)), 'utf-8')
logging.debug( "<- %s" % command)
self.push(command + b"\n" + pdata)
else:
logging.debug( "<- %s" % command)
self.push(command + b"\n")
def found_terminator(self):
if not self.auth == b"Done":
command, data = (b''.join(self.buffer).split(b":", 1))
self.process_unauthed_command(command, data)
elif not self.mid_command:
logging.debug("-> %s" % b''.join(self.buffer))
command, length = (b''.join(self.buffer)).split(b":", 1)
if command == b"challenge":
self.process_command(command, length)
elif length:
self.set_terminator(int(length))
self.mid_command = command
else:
self.process_command(command)
else: # Read the data segment from the previous command
if not self.auth == b"Done":
logging.fatal("Recieved pickled data from unauthed source")
sys.exit(1)
data = pickle.loads(b''.join(self.buffer))
self.set_terminator(b"\n")
command = self.mid_command
self.mid_command = None
self.process_command(command, data)
self.buffer = []
def send_challenge(self):
self.auth = binascii.hexlify(os.urandom(20))
self.send_command(b":".join([b"challenge", self.auth]))
def respond_to_challenge(self, command, data):
mac = hmac.new(self.password, data, hashlib.sha1)
self.send_command(b":".join([b"auth", binascii.hexlify(mac.digest())]))
self.post_auth_init()
def verify_auth(self, command, data):
mac = hmac.new(self.password, self.auth, hashlib.sha1)
if data == binascii.hexlify(mac.digest()):
self.auth = b"Done"
logging.info("Authenticated other end")
else:
self.handle_close()
def process_command(self, command, data=None):
commands = {
b'challenge': self.respond_to_challenge,
b'disconnect': lambda x, y: self.handle_close(),
}
if command in commands:
commands[command](command, data)
else:
logging.critical("Unknown command received: %s" % (command,))
self.handle_close()
def process_unauthed_command(self, command, data=None):
commands = {
b'challenge': self.respond_to_challenge,
b'auth': self.verify_auth,
b'disconnect': lambda x, y: self.handle_close(),
}
if command in commands:
commands[command](command, data)
else:
logging.critical("Unknown unauthed command received: %s" % (command,))
self.handle_close()
class Client(Protocol):
def __init__(self):
Protocol.__init__(self)
self.mapfn = self.reducefn = self.collectfn = None
def conn(self, server, port):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((server, port))
asyncore.loop()
def handle_connect(self):
pass
def handle_close(self):
self.close()
def set_mapfn(self, command, mapfn):
self.mapfn = types.FunctionType(marshal.loads(mapfn), globals(), 'mapfn')
def set_collectfn(self, command, collectfn):
self.collectfn = types.FunctionType(marshal.loads(collectfn), globals(), 'collectfn')
def set_reducefn(self, command, reducefn):
self.reducefn = types.FunctionType(marshal.loads(reducefn), globals(), 'reducefn')
def call_mapfn(self, command, data):
logging.info("Mapping %s" % str(data[0]))
results = {}
for k, v in self.mapfn(data[0], data[1]):
if k not in results:
results[k] = []
results[k].append(v)
if self.collectfn:
for k in results:
results[k] = [self.collectfn(k, results[k])]
self.send_command(b'mapdone', (data[0], results))
def call_reducefn(self, command, data):
logging.info("Reducing %s" % str(data[0]))
results = self.reducefn(data[0], data[1])
self.send_command(b'reducedone', (data[0], results))
def process_command(self, command, data=None):
commands = {
b'mapfn': self.set_mapfn,
b'collectfn': self.set_collectfn,
b'reducefn': self.set_reducefn,
b'map': self.call_mapfn,
b'reduce': self.call_reducefn,
}
if command in commands:
commands[command](command, data)
else:
Protocol.process_command(self, command, data)
def post_auth_init(self):
if not self.auth:
self.send_challenge()
class Server(asyncore.dispatcher, object):
def __init__(self):
asyncore.dispatcher.__init__(self)
self.mapfn = None
self.reducefn = None
self.collectfn = None
self.datasource = None
self.password = None
def run_server(self, password=b"", port=DEFAULT_PORT):
if (type(password) == str):
password = bytes(password, "utf-8")
self.password = password
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("", port))
self.listen(1)
try:
asyncore.loop()
except:
self.close_all()
raise
return self.taskmanager.results
# def handle_accepted(self):
def handle_accepted(self, conn, addr):
# conn, addr = self.accept()
sc = ServerChannel(conn, self)
sc.password = self.password
def handle_close(self):
self.close()
def set_datasource(self, ds):
self._datasource = ds
self.taskmanager = TaskManager(self._datasource, self)
def get_datasource(self):
return self._datasource
datasource = property(get_datasource, set_datasource)
class ServerChannel(Protocol):
def __init__(self, conn, server):
Protocol.__init__(self, conn)
self.server = server
self.start_auth()
def handle_close(self):
logging.info("Client disconnected")
self.close()
def start_auth(self):
self.send_challenge()
def start_new_task(self):
command, data = self.server.taskmanager.next_task(self)
if command == None:
return
self.send_command(command, data)
def map_done(self, command, data):
self.server.taskmanager.map_done(data)
self.start_new_task()
def reduce_done(self, command, data):
self.server.taskmanager.reduce_done(data)
self.start_new_task()
def process_command(self, command, data=None):
commands = {
b'mapdone': self.map_done,
b'reducedone': self.reduce_done,
}
if command in commands:
commands[command](command, data)
else:
Protocol.process_command(self, command, data)
def post_auth_init(self):
if self.server.mapfn:
self.send_command(b'mapfn', marshal.dumps(self.server.mapfn.__code__))
if self.server.reducefn:
self.send_command(b'reducefn', marshal.dumps(self.server.reducefn.__code__))
if self.server.collectfn:
self.send_command(b'collectfn', marshal.dumps(self.server.collectfn.__code__))
self.start_new_task()
class TaskManager:
START = 0
MAPPING = 1
REDUCING = 2
FINISHED = 3
def __init__(self, datasource, server):
self.datasource = datasource
self.server = server
self.state = TaskManager.START
def next_task(self, channel):
if self.state == TaskManager.START:
self.map_iter = iter(self.datasource)
self.working_maps = {}
self.map_results = {}
#self.waiting_for_maps = []
self.state = TaskManager.MAPPING
if self.state == TaskManager.MAPPING:
try:
map_key = next(self.map_iter)
map_item = map_key, self.datasource[map_key]
self.working_maps[map_item[0]] = map_item[1]
return (b'map', map_item)
except StopIteration:
if len(self.working_maps) > 0:
key = random.choice(list(self.working_maps.keys()))
return (b'map', (key, self.working_maps[key]))
self.state = TaskManager.REDUCING
self.reduce_iter = iter(self.map_results.items())
self.working_reduces = {}
self.results = {}
if self.state == TaskManager.REDUCING:
try:
reduce_item = next(self.reduce_iter)
self.working_reduces[reduce_item[0]] = reduce_item[1]
return (b'reduce', reduce_item)
except StopIteration:
if len(self.working_reduces) > 0:
key = random.choice(list(self.working_reduces.keys()))
return (b'reduce', (key, self.working_reduces[key]))
self.state = TaskManager.FINISHED
if self.state == TaskManager.FINISHED:
self.server.handle_close()
return (b'disconnect', None)
def map_done(self, data):
# Don't use the results if they've already been counted
if not data[0] in self.working_maps:
return
for (key, values) in data[1].items():
if key not in self.map_results:
self.map_results[key] = []
self.map_results[key].extend(values)
del self.working_maps[data[0]]
def reduce_done(self, data):
# Don't use the results if they've already been counted
if not data[0] in self.working_reduces:
return
self.results[data[0]] = data[1]
del self.working_reduces[data[0]]
def run_client():
parser = optparse.OptionParser(usage="%prog [options]", version="%%prog %s"%VERSION)
parser.add_option("-p", "--password", dest="password", default="", help="password")
parser.add_option("-P", "--port", dest="port", type="int", default=DEFAULT_PORT, help="port")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true")
parser.add_option("-V", "--loud", dest="loud", action="store_true")
(options, args) = parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.INFO)
if options.loud:
logging.basicConfig(level=logging.DEBUG)
client = Client()
if (type(options.password) == str):
options.password = bytes(options.password, "utf-8")
client.password = options.password
client.conn(args[0], options.port)
if __name__ == '__main__':
run_client()
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# suppressions.py
"""Post-process Valgrind suppression matcher.
Suppressions are defined as follows:
# optional one-line comments anywhere in the suppressions file.
{
<Short description of the error>
Toolname:Errortype
fun:function_name
obj:object_filename
fun:wildcarded_fun*_name
# an ellipsis wildcards zero or more functions in a stack.
...
fun:some_other_function_name
}
If ran from the command line, suppressions.py does a self-test
of the Suppression class.
"""
import re
ELLIPSIS = '...'
class Suppression(object):
"""This class represents a single stack trace suppression.
Attributes:
description: A string representing the error description.
type: A string representing the error type, e.g. Memcheck:Leak.
stack: a list of "fun:" or "obj:" or ellipsis lines.
"""
def __init__(self, description, type, stack):
"""Inits Suppression.
Args: Same as class attributes.
"""
self.description = description
self.type = type
self._stack = stack
re_line = '{\n.*\n%s\n' % self.type
re_bucket = ''
for line in stack:
if line == ELLIPSIS:
re_line += re.escape(re_bucket)
re_bucket = ''
re_line += '(.*\n)*'
else:
for char in line:
if char == '*':
re_line += re.escape(re_bucket)
re_bucket = ''
re_line += '.*'
else: # there can't be any '\*'s in a stack trace
re_bucket += char
re_line += re.escape(re_bucket)
re_bucket = ''
re_line += '\n'
re_line += '(.*\n)*'
re_line += '}'
self._re = re.compile(re_line, re.MULTILINE)
def Match(self, suppression_from_report):
"""Returns bool indicating whether this suppression matches
the suppression generated from Valgrind error report.
We match our suppressions against generated suppressions
(not against reports) since they have the same format
while the reports are taken from XML, contain filenames,
they are demangled, etc.
Args:
suppression_from_report: list of strings (function names).
Returns:
True if the suppression is not empty and matches the report.
"""
if not self._stack:
return False
lines = [f.strip() for f in suppression_from_report]
if self._re.match('\n'.join(lines) + '\n'):
return True
else:
return False
class SuppressionError(Exception):
def __init__(self, filename, line, message=''):
Exception.__init__(self, filename, line, message)
self._file = filename
self._line = line
self._message = message
def __str__(self):
return 'Error reading suppressions from "%s" (line %d): %s.' % (
self._file, self._line, self._message)
def ReadSuppressionsFromFile(filename):
"""Read suppressions from the given file and return them as a list"""
input_file = file(filename, 'r')
try:
return ReadSuppressions(input_file, filename)
except SuppressionError:
input_file.close()
raise
def ReadSuppressions(lines, supp_descriptor):
"""Given a list of lines, returns a list of suppressions.
Args:
lines: a list of lines containing suppressions.
supp_descriptor: should typically be a filename.
Used only when parsing errors happen.
"""
result = []
cur_descr = ''
cur_type = ''
cur_stack = []
in_suppression = False
nline = 0
for line in lines:
nline += 1
line = line.strip()
if line.startswith('#'):
continue
if not in_suppression:
if not line:
# empty lines between suppressions
pass
elif line.startswith('{'):
in_suppression = True
pass
else:
raise SuppressionError(supp_descriptor, nline,
'Expected: "{"')
elif line.startswith('}'):
result.append(Suppression(cur_descr, cur_type, cur_stack))
cur_descr = ''
cur_type = ''
cur_stack = []
in_suppression = False
elif not cur_descr:
cur_descr = line
continue
elif not cur_type:
if (not line.startswith("Memcheck:")) and \
(line != "Heapcheck:Leak"):
raise SuppressionError(supp_descriptor, nline,
'"Memcheck:TYPE" or "Heapcheck:Leak is expected, got "%s"' % line)
if not line.split(':')[1] in ["Addr1", "Addr2", "Addr4", "Addr8",
"Cond", "Free", "Jump", "Leak", "Overlap", "Param",
"Value1", "Value2", "Value4", "Value8"]:
raise SuppressionError(supp_descriptor, nline,
'Unknown suppression type "%s"' % line[9:])
cur_type = line
continue
elif re.match("^fun:.*|^obj:.*|^\.\.\.$", line):
cur_stack.append(line.strip())
elif len(cur_stack) == 0 and cur_type == "Memcheck:Param":
cur_stack.append(line.strip())
else:
raise SuppressionError(supp_descriptor, nline,
'"fun:function_name" or "obj:object_file" ' \
'or "..." expected')
return result
def TestStack(stack, positive, negative):
"""A helper function for SelfTest() that checks a single stack.
Args:
stack: the stack to match the suppressions.
positive: the list of suppressions that must match the given stack.
negative: the list of suppressions that should not match.
"""
for supp in positive:
assert ReadSuppressions(supp.split("\n"), "")[0].Match(stack), \
"Suppression:\n%s\ndidn't match stack:\n%s" % (supp, stack)
for supp in negative:
assert not ReadSuppressions(supp.split("\n"), "")[0].Match(stack), \
"Suppression:\n%s\ndidn't match stack:\n%s" % (supp, stack)
def SelfTest():
"""Tests the Suppression.Match() capabilities."""
test_stack1 = """{
test
Memcheck:Leak
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}""".split("\n")
test_stack2 = """{
test
Heapcheck:Leak
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}""".split("\n")
positive_memcheck_suppressions = [
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*ly\n}",
"{\nzzz\nMemcheck:Leak\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*ly\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\n...\nobj:condition\n}",
"{\nzzz\nMemcheck:Leak\n...\nobj:condition\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:brilliant\nobj:condition\n}",
]
positive_heapcheck_suppressions = [
"{\nzzz\nHeapcheck:Leak\n...\nobj:condition\n}",
"{\nzzz\nHeapcheck:Leak\nfun:absolutly\n}",
]
negative_memcheck_suppressions = [
"{\nzzz\nMemcheck:Leak\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Leak\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_heapcheck_suppressions = [
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n}",
"{\nzzz\nHeapcheck:Leak\nfun:brilliant\n}",
]
TestStack(test_stack1, positive_memcheck_suppressions,
negative_memcheck_suppressions)
TestStack(test_stack2, positive_heapcheck_suppressions,
negative_heapcheck_suppressions)
if __name__ == '__main__':
SelfTest()
print 'PASS'
|
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import unittest
from asciicanvas import colors
from asciicanvas.style import Style
from asciicanvas.asciicanvas import AsciiCanvas
class TestAsciiCanvas(unittest.TestCase):
"""
Test cases for AsciiCanvas
"""
def test_canvas_size(self):
ascii_canvas = AsciiCanvas(10, 10)
self.assertEqual(ascii_canvas.cols, 10)
self.assertEqual(ascii_canvas.lines, 10)
# check ranges
with self.assertRaises(Exception):
ascii_canvas = AsciiCanvas(-1, -1)
with self.assertRaises(Exception):
ascii_canvas = AsciiCanvas(0, 0)
with self.assertRaises(Exception):
ascii_canvas = AsciiCanvas(-10, 10)
with self.assertRaises(Exception):
ascii_canvas = AsciiCanvas(10, -100)
with self.assertRaises(Exception):
ascii_canvas = AsciiCanvas(10, 1001)
with self.assertRaises(Exception):
ascii_canvas = AsciiCanvas(1001, 1000)
def test_canvas(self):
ascii_canvas = AsciiCanvas(10, 10)
canvas_str = (' ' * 10 + '\n') * 9 + ' ' * 10
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str)
ascii_canvas = AsciiCanvas(1, 1, Style('#'))
canvas_str = '#'
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str)
ascii_canvas = AsciiCanvas(2, 1, Style('XYZ'))
canvas_str = 'XX'
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str)
ascii_canvas.clear()
# must be the same as before clear
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str)
def test_point_draw(self):
ascii_canvas = AsciiCanvas(2, 2)
ascii_canvas.add_point(0, 0)
canvas_with_points_str = \
line('o ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_points_str, 'Incorrect canvas with lines')
ascii_canvas = AsciiCanvas(2, 2)
ascii_canvas.add_point(-5, -5)
canvas_with_points_str = \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_points_str, 'Incorrect canvas with lines')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_point(1, 1, Style('Ooo'))
ascii_canvas.add_point(3, 3, Style('*'))
ascii_canvas.add_point(0, 4, Style('.'))
ascii_canvas.add_point(4, 0, Style(''))
ascii_canvas.add_point(4, 1, Style(' '))
canvas_with_lines_str = \
line(' o') + \
line(' O ') + \
line(' ') + \
line(' * ') + \
last('. ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_lines_str, 'Incorrect canvas with lines')
def test_line_draw(self):
ascii_canvas = AsciiCanvas(5, 2)
ascii_canvas.add_line(0, 0, 0, 0)
canvas_with_lines_str = \
line('o ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_lines_str, 'Incorrect canvas with lines')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_line(-5, -5, 10, 10, Style('****'))
ascii_canvas.add_line(4, 0, 0, 4, Style('#'))
canvas_with_lines_str = \
line('* #') + \
line(' * # ') + \
line(' # ') + \
line(' # * ') + \
last('# *')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_lines_str, 'Incorrect canvas with lines')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_line(0, 0, 4, 0, style=Style('-'))
ascii_canvas.add_line(0, 0, 0, 3, style=Style('|'))
canvas_with_lines_str = \
line('|----') + \
line('| ') + \
line('| ') + \
line('| ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_lines_str, 'Incorrect canvas with lines')
def test_text_draw(self):
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_text(-3, 2, 'hello world!!!!!')
canvas_with_text_str = \
line(' ') + \
line(' ') + \
line('lo wo') + \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_text_str, 'Incorrect canvas with text')
ascii_canvas = AsciiCanvas(5, 1)
ascii_canvas.add_text(2, 0, '')
canvas_with_text_str = \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_text_str, 'Incorrect canvas with text')
ascii_canvas = AsciiCanvas(5, 1)
ascii_canvas.add_text(2, 0, '\xFF')
canvas_with_text_str = \
last(' \xFF ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_text_str, 'Incorrect canvas with text')
def test_rect_draw(self):
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_rect(0, 0, 5, 5)
canvas_with_rect_str = \
line('ooooo') + \
line('o o') + \
line('o o') + \
line('o o') + \
last('ooooo')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with rect')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_rect(0, 0, 5, 5, fill_style=Style('.'), outline_style=Style('#'))
canvas_with_rect_str = \
line('#####') + \
line('#...#') + \
line('#...#') + \
line('#...#') + \
last('#####')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with rect')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_rect(4, 4, 1, 1)
canvas_with_rect_str = \
line(' ') + \
line(' ') + \
line(' ') + \
line(' ') + \
last(' o')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with rect')
ascii_canvas = AsciiCanvas(2, 2)
ascii_canvas.add_rect(1, 1, 0, 0)
canvas_with_rect_str = \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with rect')
ascii_canvas = AsciiCanvas(2, 2)
ascii_canvas.add_rect(1, 1, -1, -1)
canvas_with_rect_str = \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with rect')
def test_nine_patch_rect_draw(self):
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_nine_patch_rect(0, 0, 5, 5)
canvas_with_rect_str = \
line('.---.') + \
line('| |') + \
line('| |') + \
line('| |') + \
last("`---'")
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with 9-patch rect')
ascii_canvas = AsciiCanvas(5, 5)
nine_patch_style = (
Style('1'), Style('2'), Style('3'),
Style('4'), Style('5'), Style('6'),
Style('7'), Style('8'), Style('9')
)
ascii_canvas.add_nine_patch_rect(0, 0, 5, 5, nine_patch_style)
canvas_with_rect_str = \
line('12223') + \
line('45556') + \
line('45556') + \
line('45556') + \
last('78889')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with 9-patch rect')
ascii_canvas = AsciiCanvas(2, 2)
ascii_canvas.add_nine_patch_rect(0, 0, 2, 2)
canvas_with_rect_str = \
line('..') + \
last("`'")
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with 9-patch rect')
ascii_canvas = AsciiCanvas(2, 2)
ascii_canvas.add_nine_patch_rect(0, 0, 0, 0)
canvas_with_rect_str = \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with 9-patch rect')
ascii_canvas = AsciiCanvas(2, 2)
ascii_canvas.add_nine_patch_rect(1, 1, 1, 1, nine_patch_style)
canvas_with_rect_str = \
line(' ') + \
last(' 1')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with 9-patch rect')
ascii_canvas = AsciiCanvas(2, 2)
ascii_canvas.add_nine_patch_rect(1, 1, -1, -1)
canvas_with_rect_str = \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_with_rect_str, 'Incorrect canvas with 9-patch rect')
def test_ellipse_draw(self):
ascii_canvas = AsciiCanvas(3, 3)
ascii_canvas.add_ellipse(0, 0, 2, 2)
canvas_str = \
line('oo ') + \
line('oo ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(3, 3)
ascii_canvas.add_ellipse(0, 0, 1, 1)
canvas_str = \
line('o ') + \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(3, 3)
ascii_canvas.add_ellipse(0, 0, 0, 0)
canvas_str = \
line(' ') + \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(3, 3)
ascii_canvas.add_ellipse(3, 3, -2, -2)
canvas_str = \
line(' ') + \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(3, 3)
ascii_canvas.add_ellipse(0, 2, 3, 1)
canvas_str = \
line(' ') + \
line(' ') + \
last('ooo')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_ellipse(1, 1, 3, 3, Style('o'), Style('O'))
canvas_str = \
line(' ') + \
line(' O ') + \
line(' OoO ') + \
line(' O ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_ellipse(0, 0, 4, 5)
canvas_str = \
line(' oo ') + \
line('o o ') + \
line('o o ') + \
line('o o ') + \
last(' oo ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_ellipse(0, 0, 3, 5)
canvas_str = \
line(' o ') + \
line('o o ') + \
line('o o ') + \
line('o o ') + \
last(' o ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_ellipse(0, -1, 2, 7)
canvas_str = \
line('oo ') + \
line('oo ') + \
line('oo ') + \
line('oo ') + \
last('oo ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_ellipse(0, 0, 5, 3)
canvas_str = \
line(' ooo ') + \
line('o o') + \
line(' ooo ') + \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
ascii_canvas = AsciiCanvas(15, 5)
ascii_canvas.add_ellipse(0, 0, 15, 5, Style('.'))
canvas_str = \
line(' ooooooooo ') + \
line(' oo.........oo ') + \
line('o.............o') + \
line(' oo.........oo ') + \
last(' ooooooooo ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with ellipse')
def test_draw_order(self):
ascii_canvas = AsciiCanvas(3, 3)
ascii_canvas.add_text(0, 0, 'TE')
ascii_canvas.add_text(0, 1, 'XT')
canvas_str = \
line('TE ') + \
line('XT ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with text')
# must overlap the text
ascii_canvas.add_rect(0, 0, 3, 3)
canvas_str = \
line('ooo') + \
line('o o') + \
last('ooo')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str, 'Incorrect canvas with rect')
def test_output(self):
ascii_canvas = AsciiCanvas(5, 5)
ascii_canvas.add_point(2, 2)
canvas_str = \
line(' ') + \
line(' ') + \
line(' o ') + \
line(' ') + \
last(' ')
self.assertEqual(ascii_canvas.get_canvas_as_str(), canvas_str)
self.assertEqual(str(ascii_canvas), canvas_str)
class TestColors(unittest.TestCase):
"""
Test cases for Colors
"""
def test_rgb_to_terminal_color(self):
self.assertEqual(colors.rgb_to_terminal_color((0, 0, 0)), colors.black)
self.assertEqual(colors.rgb_to_terminal_color((255, 0, 0)), colors.light_red)
self.assertEqual(colors.rgb_to_terminal_color((0, 255, 0)), colors.light_green)
self.assertEqual(colors.rgb_to_terminal_color((0, 0, 255)), colors.light_blue)
self.assertEqual(colors.rgb_to_terminal_color((44, 0, 22)), colors.black)
self.assertEqual(colors.rgb_to_terminal_color((65, 65, 0)), colors.yellow)
self.assertEqual(colors.rgb_to_terminal_color((140, 140, 140)), colors.dark_gray)
self.assertEqual(colors.rgb_to_terminal_color((190, 190, 190)), colors.gray)
self.assertEqual(colors.rgb_to_terminal_color((255, 255, 255)), colors.white)
def test_rgb_to_terminal_rgb_ranges(self):
with self.assertRaises(Exception):
colors.rgb_to_terminal_color((333, 0, 0))
with self.assertRaises(Exception):
colors.rgb_to_terminal_color((-1, 0, 0))
with self.assertRaises(Exception):
colors.rgb_to_terminal_color((0, 1111, 0))
with self.assertRaises(Exception):
colors.rgb_to_terminal_color((0, 0, 555))
with self.assertRaises(Exception):
colors.rgb_to_terminal_color((0, -123, 0))
with self.assertRaises(Exception):
colors.rgb_to_terminal_color((0, 0, -123))
def line(s):
return s + '\n'
def last(s):
return s
if __name__ == '__main__':
unittest.main(verbosity=1)
|
|
import bcrypt
import datetime
import hashlib
import math
from collections import defaultdict
from flask import (
abort, render_template, flash, g, request, redirect, make_response, url_for)
from . import app
from .lib import current_page, authenticate, authenticated_endpoint, validate_signup, create_user, hash_password, validate_password, send_reset_password_email
from .models import (
AREA_ORDER_MAP,
db,
DEFAULT_ORDER,
PasswordReset,
QSTATUS,
Quote,
QuoteToTag,
Tag,
User,
VoteToUser
)
MEMBER_AREAS = ['favourites', 'disapproved']
ADMIN_AREAS = ['unapproved', 'reported', 'deleted']
@app.before_request
def before_request():
g.current_page = current_page()
g.user = None
auth = request.cookies.get('auth')
username = request.cookies.get('username')
level = request.cookies.get('level')
if auth:
value = '{}:{}:{}'.format(app.config['COOKIE_SECRET'], username, level)
if auth == hashlib.md5(value).hexdigest():
user = User.query.filter(User.username == username).first()
if user:
g.user = user
if g.user and g.user.is_admin:
g.unapproved_quotes = Quote.query.filter(
Quote.status == QSTATUS['unapproved']).count()
@app.route('/')
def landing_page():
return render_template('/index.html')
@app.route('/browse')
@app.route('/browse/<int:quote_id>')
@app.route('/browse/<area>')
def browse(area=None, quote_id=None):
if area in MEMBER_AREAS and not g.user:
flash('You must be logged in to view that page.', 'info')
return redirect(url_for('login', redirect_url=request.path))
if area in ADMIN_AREAS and not g.user.is_admin:
abort(404)
g.page = area or 'browse'
quotes = Quote.query
if quote_id is not None:
quotes = quotes.filter(Quote.id == quote_id)
if not quotes or quotes[0].status != QSTATUS['approved']:
abort(404)
else:
# Filtering
if area == 'favourites':
quotes = quotes.filter(Quote.id.in_([quote.id for quote in g.user.favourites]))
elif area == 'controversial':
quotes = quotes.filter(Quote.votes > 0)
else:
try:
quotes = quotes.filter(Quote.status == QSTATUS[area])
except KeyError:
# This is the default case, for areas like "best" and "worst", that
# don't have a specific filter.
quotes = quotes.filter(Quote.status == QSTATUS['approved'])
# Ordering
quotes = quotes.order_by(*AREA_ORDER_MAP.get(area, DEFAULT_ORDER))
pagination = quotes.paginate(
g.current_page, app.config['QUOTES_PER_PAGE'], error_out=True)
if quote_id or area == 'random':
pagination.items = pagination.items[:1]
return render_template('/browse.html', pagination=pagination)
def _generate_tagcloud(tags):
counts = defaultdict(int)
quote_to_tag = db.session.query(QuoteToTag).all()
tags = {t.id: t.tag for t in Tag.query.all()}
for quote_id, tag_id in quote_to_tag:
counts[tag_id] += 1
cloud = {}
for tag_id, count in counts.iteritems():
tag = tags.get(tag_id)
if tag:
cloud[tag] = math.log(count, math.e/2)
return cloud
@app.route('/browse/tags')
@app.route('/browse/tags/<tag>')
def browse_by_tags(tag=None, page=None):
if not tag:
tags = Tag.query.all()
return render_template('tagcloud.html', tagcloud=_generate_tagcloud(tags))
else:
tag = Tag.query.filter(Tag.tag == tag).one()
q = Quote.query
q = q.filter(Quote.tags.contains(tag))
q = q.filter(Quote.status == QSTATUS['approved'])
q = q.order_by(Quote.submitted.desc())
pagination = q.paginate(
g.current_page,
app.config['QUOTES_PER_PAGE'],
error_out=True
)
return render_template('/browse.html', pagination=pagination)
@app.route('/search', methods=['POST'])
def search():
term = request.form['term']
return redirect(url_for('display_search_results', term=term))
@app.route('/search/<term>')
def display_search_results(term=None, page=None):
quotes = Quote.query.filter(Quote.body.like('%' + term + '%')).filter(
Quote.status == QSTATUS['approved']).order_by(Quote.submitted.desc())
pagination = quotes.paginate(
g.current_page, app.config['QUOTES_PER_PAGE'], error_out=True)
g.page = 'search: %s' % term
return render_template('/browse.html', pagination=pagination)
@app.route('/create', methods=['GET', 'POST'])
@authenticated_endpoint
def new_quote():
if request.method == 'GET':
g.page = 'new quote'
return render_template('/create_quote.html')
else:
quote_body = request.form.get('quote_body')
if not quote_body:
abort(400)
notes = request.form.get('notes', '')
tags = filter(None, request.form.get('tags', '').replace(',', ' ').split(' '))
quote = Quote()
quote.body = quote_body
quote.notes = notes
quote.submitted_by = g.user
quote.voters.append(VoteToUser(direction='up', user=g.user))
quote.votes = 1
quote.rating = 1
quote.tags = []
for tagname in tags:
tag = Tag.query.filter(Tag.tag == tagname).first()
if not tag:
tag = Tag()
tag.tag = tagname
db.session.add(tag)
quote.tags.append(tag)
db.session.add(quote)
db.session.commit()
flash("Success! Your quote will appear once it's been approved.", 'info')
return redirect(url_for('browse'))
@app.route('/signup', methods=['GET', 'POST'])
def signup():
g.page = 'sign up'
if request.method == 'GET':
return render_template('/signup.html')
username = request.form['username']
password = request.form['password']
password_confirm = request.form['password_confirm']
email = request.form['email']
validity = validate_signup(username, password, password_confirm, email)
if not validity['status']:
flash(validity['msg'], 'error')
return render_template('/signup.html')
try:
create_user(username, password, email)
authenticate(username, password)
g.user = User.query.filter(User.username == username).first()
return render_template('/signup_success.html')
except NameError, e:
flash(e.__str__(), 'error')
return render_template('/signup.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
g.page = 'log in'
if request.method == 'GET':
return render_template('/login.html', redirect_url=request.args.get('redirect_url', ''))
user = authenticate(request.form['username'], request.form['password'])
if not user:
flash('Incorrect username / password', 'error')
return render_template('/login.html')
cleartext_value = '{}:{}:{}'.format(
app.config['COOKIE_SECRET'], user.username, user.level)
auth = hashlib.md5(cleartext_value).hexdigest()
if request.args.get('redirect_url') not in [None, '/signup', '/logout', '/reset_password']:
response = make_response(redirect(request.args.get('redirect_url')))
else:
response = make_response(redirect(url_for('browse')))
expiry = datetime.datetime.now() + datetime.timedelta(
days=app.config['COOKIE_LIFETIME'])
response.set_cookie('auth', auth, expires=expiry)
response.set_cookie('username', user.username, expires=expiry)
response.set_cookie('level', str(user.level), expires=expiry)
return response
@app.route('/logout')
def logout():
g.page = 'logout'
response = make_response(redirect(url_for('landing_page')))
response.set_cookie('auth', '', expires=0)
response.set_cookie('username', '', expires=0)
response.set_cookie('level', '', expires=0)
g.user = None
flash('Logged out successfully!', 'info')
return response
@app.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
if request.method == 'GET':
if not request.args.get('key'):
return render_template('/pw_reset/request.html')
token = PasswordReset.query.filter_by(key=request.args['key']).first()
if token and token.is_valid:
return render_template('/pw_reset/set.html', key=token.key)
else:
flash('Invalid reset token', 'error')
return render_template('/index.html')
elif request.method == 'POST':
if not request.args.get('key'):
# The user has requested a new token
email = request.form['email']
user = User.query.filter_by(email=email).first()
if not user:
flash('Invalid email address provided.', 'error')
return render_template('/pw_reset/request.html')
# If this user already has a valid reset token, don't
# let them create a new one until it has expired
existing_token = PasswordReset.query.filter_by(user_id=user.id).first()
if existing_token:
if existing_token.is_valid:
flash('A password reset has already been requested for this user.', 'error')
return render_template('/pw_reset/request.html')
else:
db.session.delete(existing_token)
db.session.commit()
token = PasswordReset()
token.user = user
db.session.add(token)
db.session.commit()
send_reset_password_email(email, token.key)
flash('Password reset email sent!', 'success')
return render_template('/index.html')
else:
# Reset the password to what they provided
token = PasswordReset.query.filter_by(key=request.args['key']).first()
if not token or not token.is_valid:
flash('Invalid reset token.', 'error')
return render_template('/pw_reset/request.html')
password = request.form['password']
confirm_password = request.form['password_confirm']
valid_password = validate_password(password, confirm_password)
if valid_password['status']:
user = User.query.get(token.user_id)
user.password = hash_password(password)
db.session.delete(token)
db.session.commit()
flash('Password successfully reset. You should now be able to log in.', 'success')
return render_template('/login.html')
else:
flash(valid_password['msg'], 'error')
return render_template('/pw_reset/set.html', key=token.key)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper CGI for logins/logout in the development application server.
This CGI has these parameters:
continue: URL to redirect to after a login or logout has completed.
email: Email address to set for the client.
admin: If 'True', the client should be logged in as an admin.
action: What action to take ('Login' or 'Logout').
To view the current user information and a form for logging in and out,
supply no parameters.
"""
import cgi
import Cookie
import md5
import os
import sys
import urllib
CONTINUE_PARAM = 'continue'
EMAIL_PARAM = 'email'
ADMIN_PARAM = 'admin'
ACTION_PARAM = 'action'
LOGOUT_ACTION = 'Logout'
LOGIN_ACTION = 'Login'
LOGOUT_PARAM = 'action=%s' % LOGOUT_ACTION
COOKIE_NAME = 'dev_appserver_login'
def GetUserInfo(http_cookie, cookie_name=COOKIE_NAME):
"""Get the requestor's user info from the HTTP cookie in the CGI environment.
Args:
http_cookie: Value of the HTTP_COOKIE environment variable.
cookie_name: Name of the cookie that stores the user info.
Returns:
Tuple (email, admin) where:
email: The user's email address, if any.
admin: True if the user is an admin; False otherwise.
"""
cookie = Cookie.SimpleCookie(http_cookie)
cookie_value = ''
if cookie_name in cookie:
cookie_value = cookie[cookie_name].value
email, admin, user_id = (cookie_value.split(':') + ['', '', ''])[:3]
return email, (admin == 'True'), user_id
def CreateCookieData(email, admin):
"""Creates cookie payload data.
Args:
email, admin: Parameters to incorporate into the cookie.
Returns:
String containing the cookie payload.
"""
admin_string = 'False'
if admin:
admin_string = 'True'
if email:
user_id_digest = md5.new(email.lower()).digest()
user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20]
else:
user_id = ''
return '%s:%s:%s' % (email, admin_string, user_id)
def SetUserInfoCookie(email, admin, cookie_name=COOKIE_NAME):
"""Creates a cookie to set the user information for the requestor.
Args:
email: Email to set for the user.
admin: True if the user should be admin; False otherwise.
cookie_name: Name of the cookie that stores the user info.
Returns:
'Set-Cookie' header for setting the user info of the requestor.
"""
cookie_value = CreateCookieData(email, admin)
set_cookie = Cookie.SimpleCookie()
set_cookie[cookie_name] = cookie_value
set_cookie[cookie_name]['path'] = '/'
return '%s\r\n' % set_cookie
def ClearUserInfoCookie(cookie_name=COOKIE_NAME):
"""Clears the user info cookie from the requestor, logging them out.
Args:
cookie_name: Name of the cookie that stores the user info.
Returns:
'Set-Cookie' header for clearing the user info of the requestor.
"""
set_cookie = Cookie.SimpleCookie()
set_cookie[cookie_name] = ''
set_cookie[cookie_name]['path'] = '/'
set_cookie[cookie_name]['max-age'] = '0'
return '%s\r\n' % set_cookie
LOGIN_TEMPLATE = """<html>
<head>
<title>Login</title>
</head>
<body>
<form method='get' action='%(login_url)s'
style='text-align:center; font: 13px sans-serif'>
<div style='width: 20em; margin: 1em auto;
text-align:left;
padding: 0 2em 1.25em 2em;
background-color: #d6e9f8;
border: 2px solid #67a7e3'>
<h3>%(login_message)s</h3>
<p style='padding: 0; margin: 0'>
<label for='email' style="width: 3em">Email:</label>
<input name='email' type='text' value='%(email)s' id='email'/>
</p>
<p style='margin: .5em 0 0 3em; font-size:12px'>
<input name='admin' type='checkbox' value='True'
%(admin_checked)s id='admin'/>
<label for='admin'>Sign in as Administrator</label>
</p>
<p style='margin-left: 3em'>
<input name='action' value='Login' type='submit'
id='submit-login' />
<input name='action' value='Logout' type='submit'
id='submit-logout' />
</p>
</div>
<input name='continue' type='hidden' value='%(continue_url)s'/>
</form>
</body>
</html>
"""
def RenderLoginTemplate(login_url, continue_url, email, admin):
"""Renders the login page.
Args:
login_url, continue_url, email, admin: Parameters passed to
LoginCGI.
Returns:
String containing the contents of the login page.
"""
login_message = 'Not logged in'
if email:
login_message = 'Logged in'
admin_checked = ''
if admin:
admin_checked = 'checked'
template_dict = {
'email': email or 'test\x40example.com',
'admin_checked': admin_checked,
'login_message': login_message,
'login_url': login_url,
'continue_url': continue_url
}
return LOGIN_TEMPLATE % template_dict
def LoginRedirect(login_url,
hostname,
port,
relative_url,
outfile):
"""Writes a login redirection URL to a user.
Args:
login_url: Relative URL which should be used for handling user logins.
hostname: Name of the host on which the webserver is running.
port: Port on which the webserver is running.
relative_url: String containing the URL accessed.
outfile: File-like object to which the response should be written.
"""
dest_url = "http://%s:%s%s" % (hostname, port, relative_url)
redirect_url = 'http://%s:%s%s?%s=%s' % (hostname,
port,
login_url,
CONTINUE_PARAM,
urllib.quote(dest_url))
outfile.write('Status: 302 Requires login\r\n')
outfile.write('Location: %s\r\n\r\n' % redirect_url)
def LoginCGI(login_url,
email,
admin,
action,
set_email,
set_admin,
continue_url,
outfile):
"""Runs the login CGI.
This CGI does not care about the method at all. For both POST and GET the
client will be redirected to the continue URL.
Args:
login_url: URL used to run the CGI.
email: Current email address of the requesting user.
admin: True if the requesting user is an admin; False otherwise.
action: The action used to run the CGI; 'Login' for a login action, 'Logout'
for when a logout should occur.
set_email: Email to set for the user; Empty if no email should be set.
set_admin: True if the user should be an admin; False otherwise.
continue_url: URL to which the user should be redirected when the CGI
finishes loading; defaults to the login_url with no parameters (showing
current status) if not supplied.
outfile: File-like object to which all output data should be written.
"""
redirect_url = ''
output_headers = []
if action:
if action.lower() == LOGOUT_ACTION.lower():
output_headers.append(ClearUserInfoCookie())
elif set_email:
output_headers.append(SetUserInfoCookie(set_email, set_admin))
redirect_url = continue_url or login_url
if redirect_url:
outfile.write('Status: 302 Redirecting to continue URL\r\n')
for header in output_headers:
outfile.write(header)
outfile.write('Location: %s\r\n' % redirect_url)
outfile.write('\r\n')
else:
outfile.write('Status: 200\r\n')
outfile.write('Content-Type: text/html\r\n')
outfile.write('\r\n')
outfile.write(RenderLoginTemplate(login_url,
continue_url,
email,
admin))
def main():
"""Runs the login and logout CGI script."""
form = cgi.FieldStorage()
login_url = os.environ['PATH_INFO']
email = os.environ.get('USER_EMAIL', '')
admin = os.environ.get('USER_IS_ADMIN', '0') == '1'
action = form.getfirst(ACTION_PARAM)
set_email = form.getfirst(EMAIL_PARAM, '')
set_admin = form.getfirst(ADMIN_PARAM, '') == 'True'
continue_url = form.getfirst(CONTINUE_PARAM, '')
LoginCGI(login_url,
email,
admin,
action,
set_email,
set_admin,
continue_url,
sys.stdout)
return 0
if __name__ == '__main__':
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.html import escape
from horizon.workflows import views
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.networks import workflows
INDEX_URL = reverse('horizon:project:networks:index')
def form_data_subnet(subnet,
name=None, cidr=None, ip_version=None,
gateway_ip='', enable_dhcp=None,
allocation_pools=None,
dns_nameservers=None,
host_routes=None):
def get_value(value, default):
return default if value is None else value
data = {}
data['subnet_name'] = get_value(name, subnet.name)
data['cidr'] = get_value(cidr, subnet.cidr)
data['ip_version'] = get_value(ip_version, subnet.ip_version)
gateway_ip = subnet.gateway_ip if gateway_ip == '' else gateway_ip
data['gateway_ip'] = gateway_ip or ''
data['no_gateway'] = (gateway_ip is None)
data['enable_dhcp'] = get_value(enable_dhcp, subnet.enable_dhcp)
pools = get_value(allocation_pools, subnet.allocation_pools)
data['allocation_pools'] = _str_allocation_pools(pools)
nameservers = get_value(dns_nameservers, subnet.dns_nameservers)
data['dns_nameservers'] = _str_dns_nameservers(nameservers)
routes = get_value(host_routes, subnet.host_routes)
data['host_routes'] = _str_host_routes(routes)
return data
def form_data_no_subnet():
return {'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': '',
'no_gateway': False,
'enable_dhcp': True,
'allocation_pools': '',
'dns_nameservers': '',
'host_routes': ''}
def _str_allocation_pools(allocation_pools):
if isinstance(allocation_pools, str):
return allocation_pools
return '\n'.join(['%s,%s' % (pool['start'], pool['end'])
for pool in allocation_pools])
def _str_dns_nameservers(dns_nameservers):
if isinstance(dns_nameservers, str):
return dns_nameservers
return '\n'.join(dns_nameservers)
def _str_host_routes(host_routes):
if isinstance(host_routes, str):
return host_routes
return '\n'.join(['%s,%s' % (route['destination'], route['nexthop'])
for route in host_routes])
class NetworkTests(test.TestCase):
@test.create_stubs({api.neutron: ('network_list',)})
def test_index(self):
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
@test.create_stubs({api.neutron: ('network_list',)})
def test_index_network_list_exception(self):
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/networks/index.html')
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail(self):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_network_exception(self):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:detail', args=[network_id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_subnet_exception(self):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertEqual(len(subnets), 0)
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_port_exception(self):
network_id = self.networks.first().id
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.neutron)
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'project/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertEqual(len(ports), 0)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_get(self):
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:create')
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.CreateNetwork.name)
expected_objs = ['<CreateNetworkInfo: createnetworkinfoaction>',
'<CreateSubnetInfo: createsubnetinfoaction>',
'<CreateSubnetDetail: createsubnetdetailaction>']
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post(self):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_create',
'subnet_create',
'profile_list',)})
def test_network_create_post_with_subnet(self):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_create',
'profile_list',)})
def test_network_create_post_network_exception(self):
network = self.networks.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
# subnet
'with_subnet': False}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_no_subnet())
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_create',
'profile_list')})
def test_network_create_post_with_subnet_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_create',
'network_delete',
'subnet_create',
'profile_list')})
def test_network_create_post_with_subnet_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
params = {'name': network.name,
'admin_state_up': network.admin_state_up}
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
params['net_profile_id'] = net_profile_id
api.neutron.network_create(IsA(http.HttpRequest),
**params).AndReturn(network)
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_delete(IsA(http.HttpRequest),
network.id)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_nocidr(self):
network = self.networks.first()
subnet = self.subnets.first()
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, escape('Specify "Network Address" or '
'clear "Create Subnet" checkbox.'))
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_without_mask(self):
network = self.networks.first()
subnet = self.subnets.first()
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr='10.0.0.0',
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = "The subnet in the Network Address is too small (/32)."
self.assertContains(res, expected_msg)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, cidr=cidr,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.create_stubs({api.neutron: ('profile_list',)})
def test_network_create_post_with_subnet_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
# TODO(absubram): Remove if clause and create separate
# test stubs for when profile_support is being used.
# Additionally ensure those are always run even in default setting
if api.neutron.is_port_profiles_supported():
net_profiles = self.net_profiles.list()
net_profile_id = self.net_profiles.first().id
api.neutron.profile_list(IsA(http.HttpRequest),
'network').AndReturn(net_profiles)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'net_name': network.name,
'admin_state': network.admin_state_up,
'with_subnet': True}
if api.neutron.is_port_profiles_supported():
form_data['net_profile_id'] = net_profile_id
form_data.update(form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[]))
url = reverse('horizon:project:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/update.html')
@test.create_stubs({api.neutron: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndReturn(network)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
api.neutron.network_update(IsA(http.HttpRequest), network.id,
name=network.name,
admin_state_up=network.admin_state_up)\
.AndRaise(self.exceptions.neutron)
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'admin_state': network.admin_state_up,
'tenant_id': network.tenant_id}
url = reverse('horizon:project:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete')})
def test_delete_network_no_subnet(self):
network = self.networks.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([])
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete',
'subnet_delete')})
def test_delete_network_with_subnet(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest), shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([subnet])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list',
'network_delete',
'subnet_delete')})
def test_delete_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([subnet])
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.network_delete(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
class NetworkSubnetTests(test.TestCase):
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(self.subnets.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/subnets/detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.neutron: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:subnets:detail',
args=[subnet.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:project:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes(self):
network = self.networks.list()[1]
subnet = self.subnets.list()[1]
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_with_additional_attributes_no_gateway(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
allocation_pools=subnet.allocation_pools)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet, gateway_ip=None)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip,
enable_dhcp=subnet.enable_dhcp)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = form_data_subnet(subnet, cidr=cidr,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertFormErrors(res, 1, expected_msg)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only allocation_pools
allocation_pools = '10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# pool with three entries
allocation_pools = '10.0.0.2,10.0.0.3,10.0.0.4'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start and end addresses must be specified '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_invalid_address(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# end address is not a valid IP address
allocation_pools = '10.0.0.2,invalid_address'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is CIDR
allocation_pools = '10.0.0.2/24,10.0.0.5'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'allocation_pools: Invalid IP address '
'(value=%s)' % allocation_pools.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_pools_start_larger_than_end(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# start address is larger than end address
allocation_pools = '10.0.0.254,10.0.0.2'
form_data = form_data_subnet(subnet,
allocation_pools=allocation_pools)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Start address is larger than end address '
'(value=%s)' % allocation_pools)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_nameservers(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_destination_only(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_three_entries(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_invalid_destination(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('network_get',)})
def test_subnet_create_post_invalid_routes_nexthop_ip_network(self):
network = self.networks.first()
subnet = self.subnets.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id).AndReturn(network)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_gateway_ip(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
gateway_ip = '10.0.0.100'
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=gateway_ip,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_no_gateway(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=None,
enable_dhcp=subnet.enable_dhcp,
dns_nameservers=[],
host_routes=[])\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
gateway_ip=None,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_with_additional_attributes(self):
subnet = self.subnets.list()[1]
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
enable_dhcp=False,
dns_nameservers=subnet.dns_nameservers,
host_routes=subnet.host_routes)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = form_data_subnet(subnet,
enable_dhcp=False)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = form_data_subnet(subnet, gateway_ip=gateway_ip,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_nameservers(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid DNS server address
dns_nameservers = ['192.168.0.2', 'invalid_address']
form_data = form_data_subnet(subnet, dns_nameservers=dns_nameservers,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'dns_nameservers: Invalid IP address '
'(value=%s)' % dns_nameservers[1])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_destination_only(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# Start only host_route
host_routes = '192.168.0.0/24'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_three_entries(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# host_route with three entries
host_routes = 'aaaa,bbbb,cccc'
form_data = form_data_subnet(subnet,
allocation_pools=[],
host_routes=host_routes)
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)' % host_routes)
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_invalid_destination(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# invalid destination network
host_routes = '172.16.0.0/64,10.0.0.253'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[0])
@test.create_stubs({api.neutron: ('subnet_update',
'subnet_get',)})
def test_subnet_update_post_invalid_routes_nexthop_ip_network(self):
subnet = self.subnets.first()
api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# nexthop is not an IP address
host_routes = '172.16.0.0/24,10.0.0.253/24'
form_data = form_data_subnet(subnet,
host_routes=host_routes,
allocation_pools=[])
url = reverse('horizon:project:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, form_data)
self.assertContains(res,
'host_routes: Invalid IP address '
'(value=%s)' % host_routes.split(',')[1])
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',)})
def test_subnet_delete(self):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',)})
def test_subnet_delete_excceeption(self):
subnet = self.subnets.first()
network_id = subnet.network_id
api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
form_data = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:project:networks:detail',
args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
class NetworkPortTests(test.TestCase):
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertTemplateUsed(res, 'project/networks/ports/detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:networks:ports:detail',
args=[port.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_update_get(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
self.mox.ReplayAll()
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'port_update')})
def test_port_update_post(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'port_update')})
def test_port_update_post_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up}
url = reverse('horizon:project:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:project:networks:detail',
args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
|
|
import re
import os
import sys
import inspect
import time
import functools
import shutil
import imp
from six import exec_
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.base import runTouchApp
from kivy.factory import Factory, FactoryException
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.uix.sandbox import Sandbox
from kivy.clock import Clock
from designer.helper_functions import get_indentation, get_indent_str,\
get_line_start_pos, get_kivy_designer_dir
from designer.proj_watcher import ProjectWatcher
PROJ_DESIGNER = '.designer'
KV_PROJ_FILE_NAME = os.path.join(PROJ_DESIGNER, 'kvproj')
PROJ_FILE_CONFIG = os.path.join(PROJ_DESIGNER, 'file_config.ini')
class Comment(object):
def __init__(self, string, path, _file):
super(Comment, self).__init__()
self.string = string
self.path = path
self.kv_file = _file
class WidgetRule(object):
'''WidgetRule is an Abstract class for representing a rule of Widget.
'''
def __init__(self, widget, parent):
super(WidgetRule, self).__init__()
self.name = widget
self.parent = parent
self.file = None
self.kv_file = None
self.module = None
class ClassRule(WidgetRule):
'''ClassRule is a class for representing a class rule in kv
'''
def __init__(self, class_name):
super(ClassRule, self).__init__(class_name, None)
class CustomWidgetRule(ClassRule):
'''CustomWidgetRule is a class for representing a custom widgets rule in kv
'''
def __init__(self, class_name, kv_file, py_file):
super(ClassRule, self).__init__(class_name, None)
self.class_name = class_name
self.kv_file = kv_file
self.py_file = py_file
class RootRule(ClassRule):
'''RootRule is a class for representing root rule in kv.
'''
def __init__(self, class_name, widget):
super(RootRule, self).__init__(class_name)
self.widget = widget
class ProjectLoaderException(Exception):
pass
class ProjectLoader(object):
'''ProjectLoader class, used to load Project
'''
def __init__(self, proj_watcher):
super(ProjectLoader, self).__init__()
self._dir_list = []
self.proj_watcher = proj_watcher
self.class_rules = []
self.root_rule = None
self.new_project = None
self.dict_file_type_and_path = {}
self.kv_file_list = []
self.kv_code_input = None
self.tab_pannel = None
self._root_rule = None
self.file_list = []
self.proj_dir = ""
self._is_root_already_in_factory = False
def _get_file_list(self, path):
'''This function is recursively called for loading all py file files
in the current directory.
'''
file_list = []
if '.designer' in path:
return []
sys.path.insert(0, path)
self._dir_list.append(path)
for _file in os.listdir(path):
file_path = os.path.join(path, _file)
if os.path.isdir(file_path):
file_list += self._get_file_list(file_path)
else:
# Consider only kv, py and buildozer(spec) files
if file_path[file_path.rfind('.'):] in [".py", ".spec"]:
if os.path.dirname(file_path) == self.proj_dir:
file_list.insert(0, file_path)
else:
file_list.append(file_path)
return file_list
def add_custom_widget(self, py_path):
'''This function is used to add a custom widget given path to its
py file.
'''
f = open(py_path, 'r')
py_string = f.read()
f.close()
# Find path to kv. py file will have Builder.load_file('path/to/kv')
_r = re.findall(r'Builder\.load_file\s*\(\s*.+\s*\)', py_string)
if _r == []:
raise ProjectLoaderException('Cannot find widget\'s kv file.')
py_string = py_string.replace(_r[0], '')
kv_path = _r[0][_r[0].find('(') + 1: _r[0].find(')')]
py_string = py_string.replace(kv_path, '')
kv_path = kv_path.replace("'", '').replace('"', '')
f = open(kv_path, 'r')
kv_string = f.read()
f.close()
# Remove all the 'app' lines
for app_str in re.findall(r'.+app+.+', kv_string):
kv_string = kv_string.replace(
app_str,
app_str[:get_indentation(app_str)] + '#' + app_str.lstrip())
Builder.load_string(kv_string)
sys.path.insert(0, os.path.dirname(kv_path))
_to_check = []
# Get all the class_rules
for class_str in re.findall(r'<+([\w_]+)>', kv_string):
if re.search(r'\bclass\s+%s+.+:' % class_str, py_string):
module = imp.new_module('CustomWidget')
exec_(py_string, module.__dict__)
sys.modules['AppModule'] = module
class_rule = CustomWidgetRule(class_str, kv_path, py_path)
class_rule.file = py_path
class_rule.module = module
self.custom_widgets.append(class_rule)
def get_root_str(self, kv_str=''):
'''This function will get the root widgets rule from either kv_str
or if it is empty string then from the kv file of root widget
'''
if kv_str == '':
f = open(self.root_rule.kv_file, 'r')
kv_str = f.read()
f.close()
# Find the start position of root_rule
start_pos = kv_str.find(self.root_rule.name)
if start_pos == -1:
raise ProjectLoaderException(
'Cannot find root rule in its file')
# Get line for start_pos
_line = 0
_line_pos = 0
_line_pos = kv_str.find('\n', _line_pos + 1)
while _line_pos != -1 and _line_pos < start_pos:
_line_pos = kv_str.find('\n', _line_pos + 1)
_line += 1
# Find the end position of root_rule, where indentation becomes 0
# or file ends
_line += 1
lines = kv_str.splitlines()
_total_lines = len(lines)
while _line < _total_lines and (lines[_line].strip() == '' or
get_indentation(lines[_line]) != 0):
_line_pos = kv_str.find('\n', _line_pos + 1)
_line += 1
end_pos = _line_pos
root_old_str = kv_str[start_pos: end_pos]
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
root_old_str = "<" + root_old_str
return root_old_str
def get_full_str(self):
'''This function will give the full string of all detected kv files.
'''
text = ''
for _file in self.kv_file_list:
f = open(_file, 'r')
text += f.read() + '\n'
f.close()
return text
def load_new_project(self, kv_path):
'''To load a new project given by kv_path
'''
self.new_project = True
self._load_project(kv_path)
def load_project(self, kv_path):
'''To load a project given by kv_path
'''
ret = self._load_project(kv_path)
self.new_project = False
# Add project_dir to watch
self.proj_watcher.start_watching(self.proj_dir)
return ret
def _load_project(self, kv_path):
'''Pivate function to load any project given by kv_path
'''
if os.path.isdir(kv_path):
self.proj_dir = kv_path
else:
self.proj_dir = os.path.dirname(kv_path)
parent_proj_dir = os.path.dirname(self.proj_dir)
sys.path.insert(0, parent_proj_dir)
self.class_rules = []
all_files_loaded = True
_file = None
for _file in os.listdir(self.proj_dir):
# Load each kv file in the directory
_file = os.path.join(self.proj_dir, _file)
if _file[_file.rfind('.'):] != '.kv':
continue
self.kv_file_list.append(_file)
f = open(_file, 'r')
kv_string = f.read()
f.close()
# Remove all the 'app' lines
for app_str in re.findall(r'.+app+.+', kv_string):
kv_string = kv_string.replace(
app_str,
app_str[:get_indentation(app_str)] +
'#' + app_str.lstrip())
# Get all the class_rules
for class_str in re.findall(r'<+([\w_]+)>', kv_string):
class_rule = ClassRule(class_str)
class_rule.kv_file = _file
self.class_rules.append(class_rule)
try:
root_name = re.findall(r'^([\w\d_]+)\:', kv_string,
re.MULTILINE)
if root_name != []:
# It will occur when there is a root rule and it can't
# be loaded by Builder because the its file
# has been imported
root_name = root_name[0]
if not hasattr(Factory, root_name):
match = re.search(r'^([\w\d_]+)\:', kv_string,
re.MULTILINE)
kv_string = kv_string[:match.start()] + \
'<' + root_name + '>:' + kv_string[match.end():]
self.root_rule = RootRule(root_name, None)
self.root_rule.kv_file = _file
self._root_rule = self.root_rule
self._is_root_already_in_factory = False
else:
self._is_root_already_in_factory = True
else:
self._is_root_already_in_factory = False
root_rule = Builder.load_string(re.sub(r'\s+on_\w+:\w+',
'', kv_string))
if root_rule:
self.root_rule = RootRule(root_rule.__class__.__name__,
root_rule)
self.root_rule.kv_file = _file
self._root_rule = self.root_rule
except Exception as e:
all_files_loaded = False
if not all_files_loaded:
raise ProjectLoaderException('Cannot load file "%s"' % (_file))
if os.path.exists(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME)):
projdir_mtime = os.path.getmtime(self.proj_dir)
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'r')
proj_str = f.read()
f.close()
_file_is_valid = True
# Checking if the file is valid
if proj_str == '' or\
proj_str.count('<files>') != proj_str.count('</files>') or\
proj_str.count('<file>') != proj_str.count('</file>') or\
proj_str.count('<class>') != proj_str.count('</class>'):
_file_is_valid = False
if _file_is_valid:
projdir_time = proj_str[
proj_str.find('<time>') + len('<time>'):
proj_str.find('</time>')]
projdir_time = float(projdir_time.strip())
if _file_is_valid and projdir_mtime <= projdir_time:
# Project Directory folder hasn't been modified,
# file list will remain same
self.file_list = []
un_modified_files = []
start_pos = proj_str.find('<files>')
end_pos = proj_str.find('</files>')
if start_pos != -1 and end_pos != -1:
start_pos = proj_str.find('<file>', start_pos)
end_pos1 = proj_str.find('</file>', start_pos)
while start_pos < end_pos and start_pos != -1:
_file = proj_str[
start_pos + len('<file>'):end_pos1].strip()
self.file_list.append(_file)
if os.path.getmtime(_file) <= projdir_time:
un_modified_files.append(_file)
start_pos = proj_str.find('<file>', end_pos1)
end_pos1 = proj_str.find('</file>', start_pos)
for _file in self.file_list:
_dir = os.path.dirname(_file)
if _dir not in sys.path:
sys.path.insert(0, _dir)
# Reload information for app
start_pos = proj_str.find('<app>')
end_pos = proj_str.find('</app>')
if start_pos != -1 and end_pos != -1:
self._app_class = proj_str[
proj_str.find('<class>', start_pos) + len('<class>'):
proj_str.find('</class>', start_pos)].strip()
self._app_file = proj_str[
proj_str.find('<file>', start_pos) + len('<file>'):
proj_str.find('</file>', start_pos)].strip()
f = open(self._app_file, 'r')
self._app_module = self._import_module(f.read(),
self._app_file)
f.close()
# Reload information for the files which haven't been modified
start_pos = proj_str.find('<classes>')
end_pos = proj_str.find('</classes>')
if start_pos != -1 and end_pos != -1:
while start_pos < end_pos and start_pos != -1:
start_pos = proj_str.find('<class>', start_pos) +\
len('<class>')
end_pos1 = proj_str.find('</class>', start_pos)
_file = proj_str[
proj_str.find('<file>', start_pos) + len('<file>'):
proj_str.find('</file>', start_pos)].strip()
if _file in un_modified_files:
# If _file is un modified then assign it to
# class rule with _name
_name = proj_str[
proj_str.find('<name>', start_pos) +
len('<name>'):
proj_str.find('</name>', start_pos)].strip()
for _rule in self.class_rules:
if _name == _rule.name:
_rule.file = _file
f = open(_file, 'r')
_rule.module = self._import_module(
f.read(), _file, _fromlist=[_name])
f.close()
start_pos = proj_str.find('<class>', start_pos)
end_pos1 = proj_str.find('</class>', start_pos)
if self.file_list == []:
self.file_list = self._get_file_list(self.proj_dir)
# Get all files corresponding to each class
self._get_class_files()
# If root widget is not created but root class is known
# then create widget
if self.root_rule and not self.root_rule.widget and \
self.root_rule.name:
self.root_rule.widget = self.get_widget_of_class(
self.root_rule.name)
self.load_proj_config()
def load_proj_config(self):
'''To load project's config file. Project's config file is stored in
.designer directory in project's directory.
'''
try:
f = open(os.path.join(self.proj_dir, PROJ_FILE_CONFIG), 'r')
s = f.read()
f.close()
start_pos = -1
end_pos = -1
start_pos = s.find('<file_type_and_dirs>\n')
end_pos = s.find('</file_type_and_dirs>\n')
if start_pos != -1 and end_pos != -1:
for searchiter in re.finditer(r'<file_type=.+', s):
if searchiter.start() < start_pos:
continue
if searchiter.start() > end_pos:
break
found_str = searchiter.group(0)
file_type = found_str[found_str.find('"') + 1:
found_str.find(
'"', found_str.find('"') + 1)]
folder = found_str[
found_str.find('"', found_str.find('dir=') + 1) + 1:
found_str.rfind('"')]
self.dict_file_type_and_path[file_type] = folder
except IOError:
pass
def save_proj_config(self):
'''To save project's config file.
'''
string = '<file_type_and_dirs>\n'
for file_type in self.dict_file_type_and_path.keys():
string += ' <file_type="' + file_type + '"' + ' dir="' + \
self.dict_file_type_and_path[file_type] + '">\n'
string += '</file_type_and_dirs>\n'
f = open(os.path.join(self.proj_dir, PROJ_CONFIG), 'w')
f.write(string)
f.close()
def add_dir_for_file_type(self, file_type, folder):
'''To add directory for specified file_type. More information in
add_file.py
'''
self.dict_file_type_and_path[file_type] = folder
self.save_proj_config()
def perform_auto_save(self, *args):
'''To perform auto save. Auto Save is done after every 5 min.
'''
if not self.root_rule:
return
auto_save_dir = os.path.join(self.proj_dir, '.designer')
auto_save_dir = os.path.join(auto_save_dir, 'auto_save')
if not os.path.exists(auto_save_dir):
os.makedirs(auto_save_dir)
else:
shutil.rmtree(auto_save_dir)
os.mkdir(auto_save_dir)
for _file in os.listdir(self.proj_dir):
if '.designer' in _file:
continue
old_file = os.path.join(self.proj_dir, _file)
new_file = os.path.join(auto_save_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
root_rule_file = os.path.join(auto_save_dir,
os.path.basename(self.root_rule.kv_file))
f = open(root_rule_file, 'r')
_file_str = f.read()
f.close()
text = self.kv_code_input.text
root_str = self.get_root_str()
f = open(root_rule_file, 'w')
_file_str = _file_str.replace(root_str, text)
f.write(_file_str)
f.close()
# For custom widgets copy py and kv file
for widget in self.custom_widgets:
custom_kv = os.path.join(auto_save_dir,
os.path.basename(widget.kv_file))
if not os.path.exists(custom_kv):
shutil.copy(widget.kv_file, custom_kv)
custom_py = os.path.join(auto_save_dir,
os.path.basename(widget.py_file))
if not os.path.exists(custom_py):
shutil.copy(widget.py_file, custom_py)
def save_project(self, proj_dir=''):
'''To save project to proj_dir. If proj_dir is not empty string then
project is saved to a new directory other than its
current directory and otherwise it is saved to the
current directory.
'''
# To stop ProjectWatcher from emitting event when project is saved
self.proj_watcher.allow_event_dispatch = False
proj_dir_changed = False
if self.new_project:
# Create dir and copy new_proj.kv and new_proj.py to new directory
if not os.path.exists(proj_dir):
os.mkdir(proj_dir)
kivy_designer_dir = get_kivy_designer_dir()
kivy_designer_new_proj_dir = os.path.join(kivy_designer_dir,
"new_proj")
for _file in os.listdir(kivy_designer_new_proj_dir):
old_file = os.path.join(kivy_designer_new_proj_dir, _file)
new_file = os.path.join(proj_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
self.file_list = self._get_file_list(proj_dir)
new_kv_file = os.path.join(proj_dir, "main.kv")
new_py_file = os.path.join(proj_dir, "main.py")
self.proj_dir = proj_dir
if self.root_rule:
self.root_rule.kv_file = new_kv_file
self.root_rule.py_file = new_py_file
if self.class_rules:
self.class_rules[0].py_file = new_py_file
self.class_rules[0].kv_file = new_kv_file
self.new_project = False
else:
if proj_dir != '' and proj_dir != self.proj_dir:
proj_dir_changed = True
# Remove previous project directories from sys.path
for _dir in self._dir_list:
try:
sys.path.remove(_dir)
except:
pass
# if proj_dir and self.proj_dir differs then user wants to save
# an already opened project to somewhere else
# Copy all the files
if not os.path.exists(proj_dir):
os.mkdir(proj_dir)
for _file in os.listdir(self.proj_dir):
old_file = os.path.join(self.proj_dir, _file)
new_file = os.path.join(proj_dir, _file)
if os.path.isdir(old_file):
shutil.copytree(old_file, new_file)
else:
shutil.copy(old_file, new_file)
self.file_list = self._get_file_list(proj_dir)
# Change the path of all files in the class rules,
# root rule and app
relative_path = self._app_file[
self._app_file.find(self.proj_dir):]
self._app_file = os.path.join(proj_dir, relative_path)
f = open(self._app_file, 'r')
s = f.read()
f.close()
self._import_module(s, self._app_file,
_fromlist=[self._app_class])
for _rule in self.class_rules:
relative_path = _rule.kv_file[
_rule.kv_file.find(self.proj_dir):]
_rule.kv_file = os.path.join(proj_dir, relative_path)
relative_path = _rule.file[_rule.file.find(self.proj_dir):]
_rule.file = os.path.join(proj_dir, relative_path)
f = open(_rule.file, 'r')
s = f.read()
f.close()
self._import_module(s, _rule.file, _fromlist=[_rule.name])
relative_path = self.root_rule.kv_file[
self.root_rule.kv_file.find(self.proj_dir):]
self.root_rule.kv_file = os.path.join(proj_dir, relative_path)
relative_path = self.root_rule.file[
self.root_rule.file.find(self.proj_dir):]
self.root_rule.file = os.path.join(proj_dir, relative_path)
self.proj_dir = proj_dir
# For custom widgets copy py and kv file to project directory
for widget in self.custom_widgets:
custom_kv = os.path.join(self.proj_dir,
os.path.basename(widget.kv_file))
if not os.path.exists(custom_kv):
shutil.copy(widget.kv_file, custom_kv)
custom_py = os.path.join(self.proj_dir,
os.path.basename(widget.py_file))
if not os.path.exists(custom_py):
shutil.copy(widget.py_file, custom_py)
# Saving all opened py files and also reimport them
for _code_input in self.tab_pannel.list_py_code_inputs:
path = os.path.join(self.proj_dir, _code_input.rel_file_path)
f = open(path, 'w')
f.write(_code_input.text)
f.close()
_from_list = []
for rule in self.class_rules:
if rule.file == path:
_from_list.append(rule.file)
if not self.is_root_a_class_rule():
if self.root_rule.file == path:
_from_list.append(self.root_rule.name)
# Ignore all types that are not .py
if path.endswith(".py"):
self._import_module(_code_input.text, path,
_fromlist=_from_list)
# Save all class rules
text = self.kv_code_input.text
for _rule in self.class_rules:
# Get the kv text from KVLangArea and write it to class rule's file
f = open(_rule.kv_file, 'r')
_file_str = f.read()
f.close()
old_str = self.get_class_str_from_text(_rule.name, _file_str)
new_str = self.get_class_str_from_text(_rule.name, text)
f = open(_rule.kv_file, 'w')
_file_str = _file_str.replace(old_str, new_str)
f.write(_file_str)
f.close()
# If root widget is not changed
if self._root_rule.name == self.root_rule.name:
# Save root widget's rule
is_root_class = False
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
is_root_class = True
break
if not is_root_class:
f = open(self.root_rule.kv_file, 'r')
_file_str = f.read()
f.close()
old_str = self.get_class_str_from_text(self.root_rule.name,
_file_str,
is_class=False)
new_str = self.get_class_str_from_text(self.root_rule.name,
text, is_class=False)
f = open(self.root_rule.kv_file, 'w')
_file_str = _file_str.replace(old_str, new_str)
f.write(_file_str)
f.close()
else:
# If root widget is changed
# Root Widget changes, there can be these cases:
root_name = self.root_rule.name
f = open(self._app_file, 'r')
file_str = f.read()
f.close()
self._root_rule = self.root_rule
if self.is_root_a_class_rule() and self._app_file:
# Root Widget's class rule is a custom class
# and its rule is class rule. So, it already have been saved
# the string of App's build() function will be changed to
# return new root widget's class
if self._app_class != 'runTouchApp':
s = re.search(r'class\s+%s.+:' % self._app_class, file_str)
if s:
build_searchiter = None
for searchiter in re.finditer(
r'[ \ \t]+def\s+build\s*\(\s*self.+\s*:',
file_str):
if searchiter.start() > s.start():
build_searchiter = searchiter
break
if build_searchiter:
indent = get_indentation(build_searchiter.group(0))
file_str = file_str[:build_searchiter.end()] +\
'\n' + get_indent_str(2 * indent) + "return " +\
root_name + "()\n" + \
file_str[build_searchiter.end():]
else:
file_str = file_str[:s.end()] + \
"\n def build(self):\n return " + \
root_name + '()\n' + file_str[s.end():]
else:
file_str = re.sub(r'runTouchApp\s*\(.+\)',
'runTouchApp(' + root_name + '())',
file_str)
f = open(self._app_file, 'w')
f.write(file_str)
f.close()
else:
# Root Widget's rule is not a custom class
# and its rule is root rule
# Its kv_file should be of App's class name
# and App's build() function should be cleared
if not self.root_rule.kv_file:
s = self._app_class.replace('App', '').lower()
root_file = None
for _file in self.kv_file_list:
if os.path.basename(_file).find(s) == 0:
self.root_rule.kv_file = _file
break
f = open(self.root_rule.kv_file, 'r')
_file_str = f.read()
f.close()
new_str = self.get_class_str_from_text(self.root_rule.name,
text, False)
f = open(self.root_rule.kv_file, 'a')
f.write(new_str)
f.close()
if self._app_class != 'runTouchApp':
s = re.search(r'class\s+%s.+:' % self._app_class, file_str)
if s:
build_searchiter = None
for searchiter in re.finditer(
r'[ \ \t]+def\s+build\s*\(\s*self.+\s*:',
file_str):
if searchiter.start() > s.start():
build_searchiter = searchiter
break
if build_searchiter:
lines = file_str.splitlines()
total_lines = len(lines)
indent = get_indentation(build_searchiter.group(0))
_line = 0
_line_pos = -1
_line_pos = file_str.find('\n', _line_pos + 1)
while _line_pos <= build_searchiter.start():
_line_pos = file_str.find('\n', _line_pos + 1)
_line += 1
_line += 1
while _line < total_lines:
if lines[_line].strip() != '' and\
get_indentation(lines[_line]) <= \
indent:
break
_line += 1
_line -= 1
end = get_line_start_pos(file_str, _line)
start = build_searchiter.start()
file_str = file_str.replace(file_str[start:end],
' pass')
f = open(self._app_file, 'w')
f.write(file_str)
f.close()
# Allow Project Watcher to emit events
Clock.schedule_once(self._allow_proj_watcher_dispatch, 1)
def get_class_str_from_text(self, class_name, _file_str, is_class=True):
'''To return the full class rule of class_name from _file_str
'''
_file_str += '\n'
start_pos = -1
# Find the start position of class_name
if is_class:
start_pos = _file_str.find('<' + class_name + '>:')
else:
while True:
start_pos = _file_str.find(class_name, start_pos + 1)
if start_pos == 0 or not (_file_str[start_pos - 1].isalnum() and
_file_str[start_pos - 1] != ''):
break
_line = 0
_line_pos = 0
_line_pos = _file_str.find('\n', _line_pos + 1)
while _line_pos != -1 and _line_pos < start_pos:
_line_pos = _file_str.find('\n', _line_pos + 1)
_line += 1
# Find the end position of class_name, where indentation becomes 0
# or file ends
_line += 1
lines = _file_str.splitlines()
_total_lines = len(lines)
hash_pos = 0
while hash_pos == 0 and _line < _total_lines:
hash_pos = lines[_line].find('#')
if hash_pos == 0:
_line_pos += 1 + len(lines[_line])
_line += 1
while _line < _total_lines and (lines[_line].strip() == '' or
get_indentation(lines[_line]) != 0):
_line_pos = _file_str.find('\n', _line_pos + 1)
_line += 1
hash_pos = 0
while hash_pos == 0 and _line < _total_lines:
hash_pos = lines[_line].find('#')
if hash_pos == 0:
_line += 1
end_pos = _line_pos
old_str = _file_str[start_pos: end_pos]
return old_str
def _allow_proj_watcher_dispatch(self, *args):
'''To start project_watcher to start watching self.proj_dir
'''
self.proj_watcher.allow_event_dispatch = True
# self.proj_watcher.start_watching(self.proj_dir)
def _app_in_string(self, s):
'''To determine if there is an App class or runTouchApp
defined/used in string s.
'''
if 'runTouchApp' in s:
self._app_class = 'runTouchApp'
return True
elif 'kivy.app' in s:
for _class in re.findall(r'\bclass\b.+:', s):
b_index1 = _class.find('(')
b_index2 = _class.find(')')
if _class[b_index1 + 1:b_index2].strip() == 'App':
self._app_class = _class[_class.find(' '):b_index1].strip()
return True
return False
def _get_class_files(self):
'''To search through all detected class rules and find
their python files and to search for app.
'''
if self._app_file is None:
# Search for main.py
for _file in self.file_list:
if _file[_file.rfind('/') + 1:] == 'main.py':
f = open(_file, 'r')
s = f.read()
f.close()
if self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
# Search for a file with app in its name
if not self._app_class:
for _file in self.file_list:
if 'app' in _file[_file.rfind('/'):]:
f = open(_file, 'r')
s = f.read()
f.close()
if self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
to_find = []
for _rule in self.class_rules:
if _rule.file is None:
to_find.append(_rule)
if self.root_rule:
to_find.append(self.root_rule)
# If cannot find due to above methods, search every file
for _file in self.file_list:
f = open(_file, 'r')
s = f.read()
f.close()
if not self._app_file and self._app_in_string(s):
self._app_module = self._import_module(s, _file)
self._app_file = _file
for _rule in to_find[:]:
if _rule.file:
continue
if re.search(r'\bclass\s*%s+.+:' % (_rule.name), s):
mod = self._import_module(s, _file, _fromlist=[_rule.name])
if hasattr(mod, _rule.name):
_rule.file = _file
to_find.remove(_rule)
_rule.module = mod
# Cannot Find App, So, use default runTouchApp
if not self._app_file:
self._app_class = 'runTouchApp'
# Root Widget may be in Factory not in file
if self.root_rule:
if not self.root_rule.file and\
hasattr(Factory, self.root_rule.name):
to_find.remove(self.root_rule)
# to_find should be empty, if not some class's files are not detected
if to_find != []:
raise ProjectLoaderException(
'Cannot find class files for all classes')
def _import_module(self, s, _file, _fromlist=[]):
module = None
import_from_s = False
_r = re.findall(r'Builder\.load_file\s*\(\s*.+\s*\)', s)
if _r:
s = s.replace(_r[0], '')
import_from_s = True
run_pos = s.rfind('().run()')
if run_pos != -1:
run_pos -= 1
while not s[run_pos].isspace():
run_pos -= 1
i = run_pos - 1
while s[i] == ' ':
i -= 1
if i == run_pos - 1 or _r != []:
if i == run_pos - 1:
s = s.replace('%s().run()' % self._app_class, '')
if 'AppModule' in sys.modules:
del sys.modules['AppModule']
module = imp.new_module('AppModule')
exec_(s, module.__dict__)
sys.modules['AppModule'] = module
return module
module_name = _file[_file.rfind(os.sep) + 1:].replace('.py', '')
if module_name in sys.modules:
del sys.modules[module_name]
module = __import__(module_name, fromlist=_fromlist)
return module
def cleanup(self, stop_watcher=True):
'''To cleanup everything loaded by previous project.
'''
if stop_watcher:
self.proj_watcher.stop()
# Remove all class rules and root rules of previous project
rules = []
try:
rules = Builder.match(self.root_rule.widget)
for _rule in rules:
for _tuple in Builder.rules[:]:
if _tuple[1] == _rule:
Builder.rules.remove(_tuple)
except:
pass
for _tuple in Builder.rules[:]:
for _rule in self.class_rules:
if "<" + _rule.name + ">" == _tuple[1].name:
Builder.rules.remove(_tuple)
if self.root_rule and not self._is_root_already_in_factory and\
hasattr(Factory, self.root_rule.name):
Factory.unregister(self.root_rule.name)
self._app_file = None
self._app_class = None
self._app_module = None
self._app = None
# Remove previous project directories
for _dir in self._dir_list:
try:
sys.path.remove(_dir)
except:
pass
self.kv_file_list = []
self.file_list = []
self._dir_list = []
self.class_rules = []
self.list_comments = []
self.custom_widgets = []
self.dict_file_type_and_path = {}
self.root_rule = None
self._root_rule = None
def get_app(self, reload_app=False):
'''To get the applications app class instance
'''
if not self._app_file or not self._app_class or not self._app_module:
return None
if not reload_app and self._app:
return self._app
for name, obj in inspect.getmembers(self._app_module):
if inspect.isclass(obj) and self._app_class == name:
self._app = obj()
return self._app
# if still couldn't get app, although that shouldn't happen
return None
def reload_from_str(self, root_str):
'''To reload from root_str
'''
rules = []
# Cleaning root rules
try:
rules = Builder.match(self.root_rule.widget)
for _rule in rules:
for _tuple in Builder.rules[:]:
if _tuple[1] == _rule:
Builder.rules.remove(_tuple)
except:
pass
# Cleaning class rules
for _rule in self.class_rules:
for rule in Builder.rules[:]:
if rule[1].name == '<' + _rule.name + '>':
Builder.rules.remove(rule)
break
root_widget = None
# Remove all the 'app' lines
root_str = re.sub(r'.+app+.+', '', root_str)
root_widget = Builder.load_string(root_str)
if not root_widget:
root_widget = self.get_widget_of_class(self.root_rule.name)
self.root_rule.widget = root_widget
if not root_widget:
root_name = root_str[:root_str.find('\n')]
root_name = root_widget.replace(':', '').replace('<', '')
root_name = root_widget.replace('>', '')
root_widget = self.set_root_widget(root_name)
return root_widget
def is_root_a_class_rule(self):
'''Returns True if root rule is a class rule
'''
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
return True
return False
def set_root_widget(self, root_name, widget=None):
'''To set root_name as the root rule.
'''
root_widget = None
if not widget:
root_widget = self.get_widget_of_class(root_name)
else:
root_widget = widget
self.root_rule = RootRule(root_name, root_widget)
for _rule in self.class_rules:
if _rule.name == root_name:
self.root_rule.kv_file = _rule.kv_file
self.root_rule.py_file = _rule.file
break
if not self._root_rule:
self._root_rule = self.root_rule
return root_widget
def get_root_widget(self, new_root=False):
'''To get the root widget of the current project.
'''
if not new_root and self.root_rule and self.root_rule.name != '':
return self.root_rule.widget
if self._app_file is None:
return None
f = open(self._app_file, 'r')
s = f.read()
f.close()
current_app = App.get_running_app()
app = self.get_app(reload_app=True)
root_widget = None
if app is not None:
root_widget = app.build()
if not root_widget:
root_widget = app.root
App._running_app = current_app
if root_widget:
self.root_rule = RootRule(root_widget.__class__.__name__,
root_widget)
for _rule in self.class_rules:
if _rule.name == self.root_rule.name:
self.root_rule.kv_file = _rule.kv_file
self.root_rule.file = _rule.file
break
if not self._root_rule:
self._root_rule = self.root_rule
if not self.root_rule.kv_file:
raise ProjectLoaderException("Cannot find root widget's kv file")
return root_widget
def get_widget_of_class(self, class_name):
'''To get instance of the class_name
'''
self.root = getattr(Factory, class_name)()
return self.root
def is_widget_custom(self, widget):
for rule in self.class_rules:
if rule.name == type(widget).__name__:
return True
return False
def record(self):
'''To record all the findings in ./designer/kvproj. These will
be loaded again if project hasn't been modified
outside Kivy Designer
'''
if not os.path.exists(os.path.join(
self.proj_dir, os.path.dirname(KV_PROJ_FILE_NAME))):
os.mkdir(os.path.join(self.proj_dir, ".designer"))
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'w')
f.close()
f = open(os.path.join(self.proj_dir, KV_PROJ_FILE_NAME), 'w')
proj_file_str = '<time>\n' + ' ' + str(time.time()) + '\n</time>\n'
proj_file_str += '<files>\n'
for _file in self.file_list:
proj_file_str += ' <file>\n'
proj_file_str += ' ' + _file
proj_file_str += '\n </file>\n'
proj_file_str += '</files>\n'
proj_file_str += '<classes>\n'
for _rule in self.class_rules:
proj_file_str += ' <class>\n'
proj_file_str += ' <name>\n'
proj_file_str += ' ' + _rule.name
proj_file_str += '\n </name>\n'
proj_file_str += ' <file>\n'
proj_file_str += ' ' + _rule.file
proj_file_str += '\n </file>\n'
proj_file_str += '\n </class>\n'
proj_file_str += '</classes>\n'
if self._app_class and self._app_file:
proj_file_str += '<app>\n'
proj_file_str += ' <class>\n'
proj_file_str += ' ' + self._app_class
proj_file_str += '\n </class>\n'
proj_file_str += ' <file>\n'
proj_file_str += ' ' + self._app_file
proj_file_str += '\n </file>\n'
proj_file_str += '</app>\n'
f.write(proj_file_str)
f.close()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.factorization.python.ops import gen_clustering_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.factorization.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.platform import resource_loader
_clustering_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_clustering_ops.so'))
# Euclidean distance between vectors U and V is defined as ||U - V||_F which is
# the square root of the sum of the absolute squares of the elements difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# 1 - (U \dot V) / (||U||_F ||V||_F)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors
num_clusters: number of clusters.
initial_clusters: Specifies the clusters used during initialization. Can
be a tensor or numpy array, or a function that generates the clusters.
Can also be "random" to specify that clusters should be chosen randomly
from input data.
distance_metric: distance metric used for clustering.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
"""
self._inputs = inputs if isinstance(inputs, list) else [inputs]
assert num_clusters > 0, num_clusters
self._num_clusters = num_clusters
if initial_clusters is None:
initial_clusters = RANDOM_INIT
self._initial_clusters = initial_clusters
assert distance_metric in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, ('Unsupported distance metric passed to Kmeans %s' %
str(distance_metric))
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (math_ops.reduce_sum(
math_ops.square(inp), 1, keep_dims=True) - 2 * math_ops.matmul(
inp, clusters, transpose_b=True) + array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keep_dims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidian_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp):
(indices,
distances) = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append(
(score, array_ops.squeeze(distances), array_ops.squeeze(indices)))
return zip(*output)
def _init_clusters_random(self):
"""Does random initialization of clusters.
Returns:
Tensor of randomly initialized clusters.
"""
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs])
# Note that for mini-batch k-means, we should ensure that the batch size of
# data used during initialization is sufficiently large to avoid duplicated
# clusters.
with ops.control_dependencies(
[check_ops.assert_less_equal(self._num_clusters, num_data)]):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_clusters, [-1]),
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
clusters_init = embedding_lookup(
self._inputs, indices, partition_strategy='div')
return clusters_init
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _initialize_clusters(self,
cluster_centers,
cluster_centers_initialized,
cluster_centers_updated):
"""Returns an op to initialize the cluster centers."""
init = self._initial_clusters
if init == RANDOM_INIT:
clusters_init = self._init_clusters_random()
elif init == KMEANS_PLUS_PLUS_INIT:
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
clusters_init = gen_clustering_ops.kmeans_plus_plus_initialization(
inp, self._num_clusters, self._random_seed,
self._kmeans_plus_plus_num_retries)
elif callable(init):
clusters_init = init(self._inputs, self._num_clusters)
elif not isinstance(init, str):
clusters_init = init
else:
assert False, 'Unsupported init passed to Kmeans %s' % str(init)
if self._distance_metric == COSINE_DISTANCE and clusters_init is not None:
clusters_init = nn_impl.l2_normalize(clusters_init, dim=1)
with ops.colocate_with(cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[clusters_init],
array_ops.identity(cluster_centers_initialized))
with ops.colocate_with(cluster_centers):
assign_centers = state_ops.assign(cluster_centers, clusters_init,
validate_shape=False)
if cluster_centers_updated != cluster_centers:
assign_centers = control_flow_ops.group(
assign_centers,
state_ops.assign(cluster_centers_updated, clusters_init,
validate_shape=False))
assign_centers = control_flow_ops.with_dependencies(
[assign_centers],
state_ops.assign(cluster_centers_initialized, True))
return control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: assign_centers).op
def _create_variables(self):
"""Creates variables.
Returns:
Tuple with following elements:
cluster_centers: a Tensor for storing cluster centers
cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
cluster_centers_updated: Tensor representing copy of cluster centers that
are updated every step.
update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.constant([], dtype=dtypes.float32)
cluster_centers = variable_scope.variable(init_value,
name='clusters',
validate_shape=False)
cluster_centers_initialized = variable_scope.variable(False,
dtype=dtypes.bool,
name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(init_value,
name='clusters_updated',
validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([self._num_clusters],
dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (variable_scope.variable(array_ops.ones(
[self._num_clusters],
dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers,
cluster_centers_initialized,
cluster_counts,
cluster_centers_updated,
update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
inputs = self._inputs
(cluster_centers_var,
cluster_centers_initialized,
total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables()
init_op = self._initialize_clusters(cluster_centers_var,
cluster_centers_initialized,
cluster_centers_updated)
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps,
cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, cluster_idx,
cluster_centers_var)
return (all_scores, cluster_idx, scores,
cluster_centers_initialized, init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps,
cluster_centers_var, cluster_centers_updated,
total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([state_ops.assign(
update_in_steps,
self._mini_batch_steps_per_iteration - 1)]):
with ops.colocate_with(cluster_centers_updated):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(cluster_centers_updated,
dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var):
with ops.control_dependencies([state_ops.assign(
cluster_centers_var,
cluster_centers)]):
with ops.colocate_with(cluster_centers_var):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0,
_f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list,
cluster_centers, total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
# with ops.colocate_with(cluster_centers):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(
unique_idx, dtype=total_counts.dtype),
unique_idx,
num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# x += (sum_i(d_i) - k * x) / (n + k).
# Compute sum_i(d_i), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat(
[
array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
],
0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(
total_counts,
unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(
cluster_centers,
unique_ids,
cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, self._num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, self._num_clusters))
with ops.colocate_with(cluster_centers):
new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(
math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
|
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleParserError, AnsibleAssertionError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.block import Block
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.vars.manager import preprocess_vars
from ansible.utils.display import Display
display = Display()
__all__ = ['Play']
class Play(Base, Taggable, CollectionSearch):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
_hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True)
# Facts
_gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
_gather_subset = FieldAttribute(isa='list', default=(lambda: C.DEFAULT_GATHER_SUBSET), listof=string_types, always_post_validate=True)
_gather_timeout = FieldAttribute(isa='int', default=C.DEFAULT_GATHER_TIMEOUT, always_post_validate=True)
_fact_path = FieldAttribute(isa='string', default=C.DEFAULT_FACT_PATH)
# Variable Attributes
_vars_files = FieldAttribute(isa='list', default=list, priority=99)
_vars_prompt = FieldAttribute(isa='list', default=list, always_post_validate=False)
# Role Attributes
_roles = FieldAttribute(isa='list', default=list, priority=90)
# Block (Task) Lists Attributes
_handlers = FieldAttribute(isa='list', default=list)
_pre_tasks = FieldAttribute(isa='list', default=list)
_post_tasks = FieldAttribute(isa='list', default=list)
_tasks = FieldAttribute(isa='list', default=list)
# Flag/Setting Attributes
_force_handlers = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('force_handlers'), always_post_validate=True)
_max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
_serial = FieldAttribute(isa='list', default=list, always_post_validate=True)
_strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
_order = FieldAttribute(isa='string', always_post_validate=True)
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self._included_conditional = None
self._included_path = None
self._removed_hosts = []
self.ROLE_CACHE = {}
self.only_tags = set(context.CLIARGS.get('tags', [])) or frozenset(('all',))
self.skip_tags = set(context.CLIARGS.get('skip_tags', []))
def __repr__(self):
return self.get_name()
def get_name(self):
''' return the name of the Play '''
return self.name
@staticmethod
def load(data, variable_manager=None, loader=None, vars=None):
if ('name' not in data or data['name'] is None) and 'hosts' in data:
if data['hosts'] is None or all(host is None for host in data['hosts']):
raise AnsibleParserError("Hosts list cannot be empty - please check your playbook")
if isinstance(data['hosts'], list):
data['name'] = ','.join(data['hosts'])
else:
data['name'] = data['hosts']
p = Play()
if vars:
p.vars = vars.copy()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
'''
Adjusts play datastructure to cleanup old/legacy items
'''
if not isinstance(ds, dict):
raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds)))
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. "
"The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
return super(Play, self).preprocess_data(ds)
def _load_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading tasks: %s" % to_native(e), obj=self._ds, orig_exc=e)
def _load_pre_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e)
def _load_post_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e)
def _load_handlers(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
'''
try:
return self._extend_value(
self.handlers,
load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
prepend=True
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e)
def _load_roles(self, attr, ds):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
'''
if ds is None:
ds = []
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager,
loader=self._loader, collection_search_list=self.collections)
except AssertionError as e:
raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e)
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
return self._extend_value(
self.roles,
roles,
prepend=True
)
def _load_vars_prompt(self, attr, ds):
new_ds = preprocess_vars(ds)
vars_prompts = []
if new_ds is not None:
for prompt_data in new_ds:
if 'name' not in prompt_data:
raise AnsibleParserError("Invalid vars_prompt data structure", obj=ds)
else:
vars_prompts.append(prompt_data)
return vars_prompts
def _compile_roles(self):
'''
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
# Don't insert tasks from ``import/include_role``, preventing
# duplicate execution at the wrong time
if r.from_include:
continue
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
'''
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
if r.from_include:
continue
block_list.extend(r.get_handler_blocks(play=self))
return block_list
def compile(self):
'''
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
'''
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block.load(
data={'meta': 'flush_handlers'},
play=self,
variable_manager=self._variable_manager,
loader=self._loader
)
block_list = []
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
if self.vars_files is None:
return []
elif not isinstance(self.vars_files, list):
return [self.vars_files]
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def serialize(self):
data = super(Play, self).serialize()
roles = []
for role in self.get_roles():
roles.append(role.serialize())
data['roles'] = roles
data['included_path'] = self._included_path
return data
def deserialize(self, data):
super(Play, self).deserialize(data)
self._included_path = data.get('included_path', None)
if 'roles' in data:
role_data = data.get('roles', [])
roles = []
for role in role_data:
r = Role()
r.deserialize(role)
roles.append(r)
setattr(self, 'roles', roles)
del data['roles']
def copy(self):
new_me = super(Play, self).copy()
new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
new_me._included_conditional = self._included_conditional
new_me._included_path = self._included_path
return new_me
|
|
#Main thread:
# Lock NewFrameAvailable lock.
# Loop:
# See if the face detector has new faces to add.
# Camera takes a picture.
# The main thread waits until all other threads have finished using the previous frame.
# The previous frame is rendered (with any modifications).
# Initiate new face trackers.
# Replace old frame.
# Release NewFrameAvailable lock.
# Process network messages
# Lock NewFrameAvailable lock.
# It turned out that locks don't allow for queuing in order
# (threads are notified in random order)
# For this reason, some network processing is done between
# releasing and reacquiring the NewFrameAvailable lock to allow
# trackers and detectors to work with the frame
from threading import Lock, Condition, Thread
import socket
import numpy as np
import messager
import cv2
import bbprocessors
from time import sleep, clock
import random
class TrackerMain():
def __init__(self, address, port, trackerMaker, detectorMaker, bbProcessor, multiDetect=0, width=320, height=240, detectorFrameSize=1, trackerFrameSize=1, backwardsConnection=False):
self.newFrameAvailable = Lock() # Queue for usage of new frame
self.usingCurrentFrame = Condition() # Lock when writing to frame
self.netLock = Lock() # Lock for sending network messages
self.frameUsers = 0 # Number of threads wanting to write eventually
self.faceTrackers = []
self.faceDetectors = []
self.bbProcessor = bbProcessor(self)
self.newFrameAvailable.acquire()
self.messager = messager.Messager()
# Connect to remote messager
# A possibility for backwards connection was necessary for security reasons
if not backwardsConnection:
#self.messageSock = socket.socket()
#self.messageSock.connect((address, port))
self.messageSock = self.messager.connect(address, port)
else:
# With backwards connection we need to wait for the connection
self.messager.setupHost(address, port)
self.start = False
self.messager.register('connect', self.setHost)
while not self.start:
self.messager.processHost()
self.messager.unregister('connect')
if multiDetect > 0:
# Setup queue for multiple detectors
# Detectors will be notified one at a time
# at intervals of average detection time
self.detectionQueue = Condition()
self.avgDetTime = 0 # average time to run a detector
self.avgDetSamples = 0 # N of samples in the average
self.multiDetect = True
else:
multiDetect = 1
self.multiDetect = False
# Make the face detectors
for i in range(0, multiDetect):
newDetector = detectorMaker(self)
self.faceDetectors.append(newDetector)
Thread(None, newDetector).start()
# Setup camera
self.videoCapture = cv2.VideoCapture(0)
self.videoCapture.set(3, width) # Width
self.videoCapture.set(4, height) # Height
self.maxw = self.videoCapture.get(3)
self.maxh = self.videoCapture.get(4)
ret, frame = self.videoCapture.read()
self.readFrame = frame
self.writeFrame = np.copy(frame)
# Setup preview window
self.windowName = 'Video'
self.window = cv2.namedWindow(self.windowName)
# Register trackerMaker
# This creates new trackers, e.g. starts tracking threads
# or does nothing if tracker threads are not wanted
# It also defines the type of tracker to use
self.trackerMaker = trackerMaker
self.detectorFrameSize = detectorFrameSize
self.trackerFrameSize = trackerFrameSize
self.startTime = clock()
# FPS calculation
self.fps = 0.0
self.fpsSamples = 100
self.frameTimes = [0.0] * self.fpsSamples
self.curSample = 0
self.lastTime = clock()
def mainLoop(self):
# Note: this thread should have aquired the newFrameAvailable
# lock before entering this method.
# The first acquisition is in the class constructor.
# See if there are any new faces waiting
averageTime = 0
for detector in self.faceDetectors:
detector.processFaces(self.bbProcessor)
averageTime += detector.lastTime
averageTime /= len(self.faceDetectors)
ret, frame = self.videoCapture.read()
# Wait until all trackers are finished with the previous frame
with self.usingCurrentFrame:
if self.frameUsers > 0:
self.usingCurrentFrame.wait()
# Draw boundingboxes and labels (when available) on the image
for tr in self.faceTrackers:
tr.drawBB(self.writeFrame, 1 / self.trackerFrameSize)
# Calculate and display FPS
newTime = clock()
fps = 1.0 / float(newTime - self.lastTime)
self.lastTime = newTime
self.fps -= self.frameTimes[self.curSample]
self.fps += fps
self.frameTimes[self.curSample] = fps
self.curSample = (self.curSample + 1) % self.fpsSamples
cv2.putText(self.writeFrame, str(float(self.fps)/float(self.fpsSamples)), (0, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# Show previous frame
cv2.imshow(self.windowName, self.writeFrame)
# Replace old frames
self.fullFrame = frame
self.readFrame = cv2.resize( frame
, (0, 0)
, fx=self.trackerFrameSize
, fy=self.trackerFrameSize
)
self.writeFrame = np.copy(frame)
# See if we should notify a detector
if self.multiDetect:
elapsedTime = clock() - self.startTime
if elapsedTime >= averageTime / len(self.faceDetectors):
self.startTime = clock()
with self.detectionQueue:
self.detectionQueue.notify()
# Let other threads use the current frame for a while
self.newFrameAvailable.release()
self.messager.processHost()
tic = clock()
self.newFrameAvailable.acquire()
def stop(self):
if self.multiDetect:
with self.detectionQueue:
self.detectionQueue.notify_all()
for fd in self.faceDetectors:
fd.stop()
for ft in self.faceTrackers:
ft.stop()
self.newFrameAvailable.release()
def getCopyOfFrame(self):
with self.newFrameAvailable:
return cv2.resize( self.fullFrame
, (0, 0)
, fx=self.detectorFrameSize
, fy=self.detectorFrameSize
)
def getReadWriteFrame(self):
with self.newFrameAvailable:
with self.usingCurrentFrame:
self.frameUsers += 1
return self.readFrame
def freeReadWriteFrame(self):
with self.usingCurrentFrame:
self.frameUsers -= 1
if self.frameUsers == 0:
self.usingCurrentFrame.notifyAll()
def applyToFrame(self, f):
with self.usingCurrentFrame:
f(self.writeFrame)
def makeNewFaceTracker(self, bb):
# Invoke closure
self.trackerMaker(self, bb)
def sendMessage(self, messagetype, message):
tic = clock()
with self.netLock:
messager.send_message(self.messageSock, messagetype, message)
def register(self, tracker, add):
if add:
self.messager.register(tracker.id, tracker.setLabel)
else:
self.messager.unregister(tracker.id)
def getWindowSize(self):
return (self.maxw, self.maxh)
def setHost(self, sock, msg):
self.messageSock = sock
self.start = True
|
|
print 'start'
from dolfin import *
import sys
from random import gauss, expovariate
import math
from math import atan, pi, atan2, sqrt
import numpy as np
import nanopores as nano
import nanopores.geometries.pughpore as pughpore
from get_F import Force, Current
from get_D import Dx, Dy, Dz, dxDx, dyDy, dzDz, dis
import os
from time import time as timer
print 'passt'
HOME = os.path.expanduser("~")
PAPERDIR = os.path.join(HOME, "papers", "paper-howorka")
FIGDIR = os.path.join(PAPERDIR, "figures", "")
DATADIR = os.path.join(HOME,"Dropbox", "nanopores", "fields")
import nanopores.tools.fields as fields
fields.set_dir(DATADIR)
def argument(x,y,z):
return np.array([float(x),float(y),float(z)])
geop = nano.Params(pughpore.params)
physp = nano.Physics(name="pore_mol")
kT = physp.kT
eta = physp.eta
#H = geop.H
#R = geop.R
H = 100.
R = 50.
l0 = geop.l0
l1 = geop.l1
l2 = geop.l2
l3 = geop.l3
l4 = geop.l4
hpore = geop.hpore
hmem = geop.hmem
h2 = geop.h2
h1 = geop.h1
h4 = geop.h4
rMolecule = geop.rMolecule
eps = 0.1
beps = (l3 - rMolecule)*1e-1
r = rMolecule + eps
p0=hpore/2.
p1=p0-h1
p2=p0-h2
p3=-hpore/2.
def R_(z):
if z>=p3 and z<=p2:
return l3/2.
elif z>=p2 and z<=p1:
return l2/2.
elif z>=p1 and z<=p0:
return l0/2.
else: return R/2.
#def fac(z):
# if z>=p3 and z<=p2:
# return l3/2.-r
# elif z>p2 and z<p2+r:
# x=z-p2
# return -sqrt(r**2-x**2)+l3/2.
# elif z>=p2+r and z<=p1:
# return l2/2.-r
# elif z>p1 and z<p1+r:
# x=z-p1
# return -sqrt(r**2-x**2)+l2/2.
# elif z>=p1+r and z<=p0:
# return l0/2.-r
# elif z>p0 and z<p0+r:
# x=z-p0
# return -sqrt(r**2-x**2)+l0/2.
# elif z<p3 and z>p3-r:
# x=z-p3
# return -sqrt(r**2-x**2)+l3/2.
# else: return R/2.
#params=dict(avgbind1=7e6,avgbind2=1e2,P_bind1=5.e-3,P_bind2=1e-1,z0=hpore/2.+5.)
Dmol = kT/(6.*math.pi*eta*rMolecule*1e-9) # [m^2/s]
gamma = (6.*math.pi*eta*rMolecule) #friction [microgramm/s]
maxiter = 1e6 # [ns]
tau = .05 # [ns]
C = tau/gamma*1e9 # [s^2/kg * 1e9 nm/m]
coeff = math.sqrt(2*Dmol*1e9*tau) # [nm]
#avgbinding = 10000000.
#P_bind = 3.e-4
#F=[0.,0.,-1e-11]
#F=[0.,0.,0.]
def hatfct(ang):
x=(ang+2*pi)%(pi/2.)
if x<=pi/4.:
return x
else:
return pi/2.-x
def D(x,y,z):
if z>hpore/2. or z<-hpore/2.:
return [[1.,1.,1.],[0.,0.,0.]]
else:
if x==0 and y==0:
return [[Dx(0.),Dy(0.),Dz(0.)],[dDx(0.),dDy(0.),dDz(0.)]]
else:
ang=atan2(y,x)
ang2=hatfct(ang)
A=np.array([[cos(ang),-sin(ang),0.],[sin(ang),cos(ang),0.],[0.,0.,1.]])
dist=sqrt(x**2+y**2)*cos(ang2)/(R_(z))
vec1=A.dot(np.array([Dx(dist),Dy(dist),Dz(dist)]))
vec2=A.dot(np.array([dDx(dist),dDy(dist),dDz(dist)]))
return [list(vec1),list(vec2)]
def run(params,fieldsname):
z0 = params["z0"]
X = np.array([0.])
Y = np.array([0.])
Z = np.array([z0])
J1 = np.array([])
T = np.array([])
bind1 = 0
bind2 = 0
avgbind1=params["avgbind1"]
P_bind1=params["P_bind1"]
avgbind2=params["avgbind2"]
P_bind2=params["P_bind2"]
ffa = True
i=0
ood = False
while i<maxiter and Z[-1]>=-hpore/2.-2.:
if ood:
bind1 = 0
bind2 = 0
i=0
ood = False
ffa = True
X = np.array([0.])
Y = np.array([0.])
Z = np.array([z0])
T = np.array([])
J1 = np.array([])
add=tau
xi_x=gauss(0.,1.)
xi_y=gauss(0.,1.)
xi_z=gauss(0.,1.)
arg = argument(X[-1],Y[-1],Z[-1])
F = Force(X[-1],Y[-1],Z[-1])
D = [Dx(arg)*1e9,Dy(arg)*1e9,Dz(arg)*1e9]
dD = [dxDx(arg)*1e9,dyDy(arg)*1e9,dzDz(arg)*1e9]
# x_new = X[-1] + coeff*xi_x*math.sqrt(abs(Dxfac)) + C*Force[0]*Dxfac + DDx*tau*Dmol
# y_new = Y[-1] + coeff*xi_y*math.sqrt(abs(Dyfac)) + C*Force[1]*Dyfac + DDy*tau*Dmol
# z_new = Z[-1] + coeff*xi_z*math.sqrt(abs(Dzfac)) + C*Force[2]*Dzfac + DDz*tau*Dmol
# x_new = X[-1] + coeff*xi_x + C*Force[0]
# y_new = Y[-1] + coeff*xi_y + C*Force[1]
# z_new = Z[-1] + coeff*xi_z + C*Force[2]
x_new = X[-1] + sqrt(2*D[0]*tau)*xi_x + F[0]*D[0]*1e-9*tau/kT+dD[0]*tau
y_new = Y[-1] + sqrt(2*D[1]*tau)*xi_y + F[1]*D[1]*1e-9*tau/kT+dD[1]*tau
z_new = Z[-1] + sqrt(2*D[2]*tau)*xi_z + F[2]*D[2]*1e-9*tau/kT+dD[2]*tau
if dis(argument(x_new,y_new,z_new)) < rMolecule:
x_new = X[-1]
y_new = Y[-1]
z_new = Z[-1]
if ffa and np.random.binomial(1,P_bind1)==1 and Z[-1]<=hpore/2.-h2-5 and Z[-1]>=-hpore/2.+h4:
add+=expovariate(lambd=1./avgbind1)
# print add
bind1+=1
elif ffa and np.random.binomial(1,P_bind2)==1 and ((Z[-1]<=-hpore/2.+h4 and Z[-1]>=-hpore/2.+0.) or (Z[-1]<=hpore/2.-h2 and Z[-1]>=hpore/2.-h2-5.)):
add+=expovariate(lambd=1./avgbind2)
bind2+=1
else:
add+=0.
ffa = False
elif dis(argument(x_new,y_new,z_new)) < rMolecule + beps:
pass
else:
ffa = True
X = np.append(X,x_new)
Y = np.append(Y,y_new)
Z = np.append(Z,z_new)
if abs(Z[-1])>35. or abs(X[-1])>10. or abs(Y[-1])>10.:
print 'Out of domain!'
ood = True
X[-1]=0.
Y[-1]=0.
Z[-1]=0.
Jx=Current(X[-1],Y[-1],Z[-1])
if math.isnan(Jx):
if add<=tau:
Jx = J1[-1]
else:
print 'current at binding position is NaN!!!'
print 'current = %.1e A'%Jx
print 'X = %.8f'%X[-1]
print 'Y = %.8f'%Y[-1]
print 'Z = %.8f'%Z[-1]
print 'add = %.2f nanoseconds'%add
exit()
J1=np.append(J1,Jx)
T =np.append(T,add)
i+=1
if not (Z[i]<=H/2. and Z[i]>=-H/2 and X[i] <=R/2 and X[i] >=-R/2 and Y[i] <=R/2 and Y[i] >=-R/2):
break
if i>=maxiter:
print 'randomwalk: more than 1e6 steps!'
# tau_off = np.sum(T)*1e-6
# curr = 7.523849e-10
# amp = (curr-np.inner(T*1e-6,J1)/tau_off)/curr*100.
# if tau_off<1.:
# t1 = [tau_off]
# a1 = [amp]
# t2 = []
# a2 = []
# else:
# t2 = [tau_off]
# a2 = [amp]
# t1 = []
# a1 = []
X=[list(X)]
Y=[list(Y)]
Z=[list(Z)]
T=[list(T)]
J1=[list(J1)]
fields.save_fields(fieldsname,params,X=X, Y=Y, Z=Z, T=T, J=J1)
|
|
"""
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
def env(key, default=None):
"""Retrieves env vars and makes Python boolean replacements"""
val = os.getenv(key, default)
if val == 'True':
val = True
elif val == 'False':
val = False
return val
def env_list(key, default=""):
val = os.getenv(key, default)
return val.split(",")
PROJECT_DIR = env(
"DJANGO_PROJECT_DIR",
os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("DJANGO_SECRET_KEY", 'changeme')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env("DJANGO_DEBUG", True)
ALLOWED_HOSTS = env_list("DJANGO_ALLOWED_HOSTS", '*')
# Application definition
INSTALLED_APPS = [
'core', # our hack to override templates
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'analytical',
'anymail',
'raven.contrib.django.raven_compat',
'localflavor',
'django_extensions',
'fancy_cache',
'material.theme.lightblue',
'material',
'dbbackup',
'material.frontend',
'django_admin_row_actions',
'hijack',
'compat',
'djgeojson',
'leaflet',
# nuestras apps
'elecciones',
'fiscales',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'escrutinio_social.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'escrutinio_social.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# FORMAT: postgres://USER:PASSWORD@HOST:PORT/NAME
DATABASES = {'default': dj_database_url.parse(
env("DJANGO_DATABASE_URL",
"sqlite://" + os.path.join(BASE_DIR, 'db.sqlite3')))}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'es-ar'
TIME_ZONE = 'America/Argentina/Cordoba'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
DBBACKUP_STORAGE = 'django.core.files.storage.FileSystemStorage'
DBBACKUP_STORAGE_OPTIONS = {'location': os.path.join(BASE_DIR, 'backups')}
HIJACK_LOGIN_REDIRECT_URL = 'home' # Where admins are redirected to after hijacking a user
HIJACK_ALLOW_GET_REQUESTS = True
HIJACK_LOGOUT_REDIRECT_URL = 'admin:fiscales_voluntario_changelist'
LEAFLET_CONFIG = {
'DEFAULT_CENTER': (-32.3108144, -63.7066957),
'DEFAULT_ZOOM': 7,
'MIN_ZOOM': 4,
'MAX_ZOOM': 18,
'PLUGINS': {
'awesome-markers': {
'css': ['https://cdn.rawgit.com/lvoogdt/Leaflet.awesome-markers/2.0/develop/dist/leaflet.awesome-markers.css'],
'js': 'https://cdn.rawgit.com/lvoogdt/Leaflet.awesome-markers/2.0/develop/dist/leaflet.awesome-markers.min.js',
'auto-include': True,
},
}
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
],
'PAGE_SIZE': 10
}
ANYMAIL = {
# (exact settings here depend on your ESP...)
"MAILGUN_API_KEY": "",
"MAILGUN_SENDER_DOMAIN": '', # your Mailgun domain, if needed
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend" # or sendgrid.EmailBackend, or...
DEFAULT_FROM_EMAIL = "[email protected]" # if you don't already have this in settings
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
# 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/escrutinio_social_cache',
}
}
DEFAULT_PASS_PREFIX = ''
GOOGLE_ANALYTICS_PROPERTY_ID = env("GOOGLE_ANALYTICS_PROPERTY_ID", 'UA-123456-7')
|
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import errno
import http.client
import mmap
import operator
import io
import os
import sys
import tempfile
import warnings
import zipfile
import re
from functools import reduce
import numpy as np
from .util import (isreadable, iswritable, isfile, fileobj_open, fileobj_name,
fileobj_closed, fileobj_mode, _array_from_file,
_array_to_file, _write_string)
from astropy.utils.data import download_file, _is_url
from astropy.utils.decorators import classproperty
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import NOT_OVERWRITING_MSG
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2
if HAS_BZ2:
import bz2
# Maps astropy.io.fits-specific file mode names to the appropriate file
# modes to use for the underlying raw files.
IO_FITS_MODES = {
'readonly': 'rb',
'copyonwrite': 'rb',
'update': 'rb+',
'append': 'ab+',
'ostream': 'wb',
'denywrite': 'rb'}
# Maps OS-level file modes to the appropriate astropy.io.fits specific mode
# to use when given file objects but no mode specified; obviously in
# IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite'
# both require the file to be opened in 'rb' mode. But 'readonly' is the
# default behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
'rb': 'readonly', 'rb+': 'update',
'wb': 'ostream', 'wb+': 'update',
'ab': 'ostream', 'ab+': 'append'}
# A match indicates the file was opened in text mode, which is not allowed
TEXT_RE = re.compile(r'^[rwa]((t?\+?)|(\+?t?))$')
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {'readonly': mmap.ACCESS_COPY,
'copyonwrite': mmap.ACCESS_COPY,
'update': mmap.ACCESS_WRITE,
'append': mmap.ACCESS_COPY,
'denywrite': mmap.ACCESS_READ}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b'\x1f\x8b\x08'
PKZIP_MAGIC = b'\x50\x4b\x03\x04'
BZIP2_MAGIC = b'\x42\x5a'
def _is_bz2file(fileobj):
if HAS_BZ2:
return isinstance(fileobj, bz2.BZ2File)
else:
return False
def _normalize_fits_mode(mode):
if mode is not None and mode not in IO_FITS_MODES:
if TEXT_RE.match(mode):
raise ValueError(
"Text mode '{}' not supported: "
"files must be opened in binary mode".format(mode))
new_mode = FILE_MODES.get(mode)
if new_mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
mode = new_mode
return mode
class _File:
"""
Represents a FITS file on disk (or in some other file-like object).
"""
def __init__(self, fileobj=None, mode=None, memmap=None, overwrite=False,
cache=True):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
# Should the object be closed on error: see
# https://github.com/astropy/astropy/issues/6168
self.close_on_error = False
# Holds mmap instance for files that use mmap
self._mmap = None
if fileobj is None:
self.simulateonly = True
return
else:
self.simulateonly = False
if isinstance(fileobj, os.PathLike):
fileobj = os.fspath(fileobj)
if mode is not None and mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
if isfile(fileobj):
objmode = _normalize_fits_mode(fileobj_mode(fileobj))
if mode is not None and mode != objmode:
raise ValueError(
"Requested FITS mode '{}' not compatible with open file "
"handle mode '{}'".format(mode, objmode))
mode = objmode
if mode is None:
mode = 'readonly'
# Handle raw URLs
if (isinstance(fileobj, (str, bytes)) and
mode not in ('ostream', 'append', 'update') and _is_url(fileobj)):
self.name = download_file(fileobj, cache=cache)
# Handle responses from URL requests that have already been opened
elif isinstance(fileobj, http.client.HTTPResponse):
if mode in ('ostream', 'append', 'update'):
raise ValueError(
f"Mode {mode} not supported for HTTPResponse")
fileobj = io.BytesIO(fileobj.read())
else:
self.name = fileobj_name(fileobj)
self.mode = mode
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# Initialize the internal self._file object
if isfile(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, (str, bytes)):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = 'gzip'
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = 'zip'
elif _is_bz2file(fileobj):
self.compression = 'bzip2'
if (mode in ('readonly', 'copyonwrite', 'denywrite') or
(self.compression and mode == 'update')):
self.readonly = True
elif (mode == 'ostream' or
(self.compression and mode == 'append')):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if (mode == 'ostream' or self.compression or
not hasattr(self._file, 'seek')):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return f'<{self.__module__}.{self.__class__.__name__} {self._file}>'
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, 'read'):
raise EOFError
try:
return self._file.read(size)
except OSError:
# On some versions of Python, it appears, GzipFile will raise an
# OSError if you try to read past its end (as opposed to just
# returning '')
if self.compression == 'gzip':
return ''
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, 'read'):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError(f'size {size} not a multiple of {dtype}')
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn('No size or shape given to readarray(); assuming a '
'shape of (1,)', AstropyUserWarning)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize > size:
raise ValueError('size {} is too few bytes for a {} array of '
'{}'.format(size, shape, dtype))
elif actualsize < size:
raise ValueError('size {} is too many bytes for a {} array of '
'{}'.format(size, shape, dtype))
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
access_mode = MEMMAP_MODES[self.mode]
# For reasons unknown the file needs to point to (near)
# the beginning or end of the file. No idea how close to
# the beginning or end.
# If I had to guess there is some bug in the mmap module
# of CPython or perhaps in microsoft's underlying code
# for generating the mmap.
self._file.seek(0, 0)
# This would also work:
# self._file.seek(0, 2) # moves to the end
try:
self._mmap = mmap.mmap(self._file.fileno(), 0,
access=access_mode,
offset=0)
except OSError as exc:
# NOTE: mode='readonly' results in the memory-mapping
# using the ACCESS_COPY mode in mmap so that users can
# modify arrays. However, on some systems, the OS raises
# a '[Errno 12] Cannot allocate memory' OSError if the
# address space is smaller than the file. The solution
# is to open the file in mode='denywrite', which at
# least allows the file to be opened even if the
# resulting arrays will be truly read-only.
if exc.errno == errno.ENOMEM and self.mode == 'readonly':
warnings.warn("Could not memory map array with "
"mode='readonly', falling back to "
"mode='denywrite', which means that "
"the array will be read-only",
AstropyUserWarning)
self._mmap = mmap.mmap(self._file.fileno(), 0,
access=MEMMAP_MODES['denywrite'],
offset=0)
else:
raise
return np.ndarray(shape=shape, dtype=dtype, offset=offset,
buffer=self._mmap)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count)
data.shape = shape
return data
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if self.simulateonly:
return
if hasattr(self._file, 'write'):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if self.simulateonly:
return
if hasattr(self._file, 'write'):
_array_to_file(array, self._file)
def flush(self):
if self.simulateonly:
return
if hasattr(self._file, 'flush'):
self._file.flush()
def seek(self, offset, whence=0):
if not hasattr(self._file, 'seek'):
return
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn('File may have been truncated: actual file length '
'({}) is smaller than the expected size ({})'
.format(self.size, pos), AstropyUserWarning)
def tell(self):
if self.simulateonly:
raise OSError
if not hasattr(self._file, 'tell'):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, 'truncate'):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, 'close'):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
self.close_on_error = False
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
if (self._mmap is not None and
sys.getrefcount(self._mmap) == 2 + refcount_delta):
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an OSError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if ((self.file_like and hasattr(fileobj, 'len') and fileobj.len > 0) or
(os.path.exists(self.name) and os.path.getsize(self.name) != 0)):
if overwrite:
if self.file_like and hasattr(fileobj, 'truncate'):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise OSError(NOT_OVERWRITING_MSG.format(self.name))
def _try_read_compressed(self, obj_or_name, magic, mode, ext=''):
"""Attempt to determine if the given file is compressed"""
is_ostream = mode == 'ostream'
if (is_ostream and ext == '.gz') or magic.startswith(GZIP_MAGIC):
if mode == 'append':
raise OSError("'append' mode is not supported with gzip files."
"Use 'update' mode instead")
# Handle gzip files
kwargs = dict(mode=IO_FITS_MODES[mode])
if isinstance(obj_or_name, str):
kwargs['filename'] = obj_or_name
else:
kwargs['fileobj'] = obj_or_name
self._file = gzip.GzipFile(**kwargs)
self.compression = 'gzip'
elif (is_ostream and ext == '.zip') or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
self.compression = 'zip'
elif (is_ostream and ext == '.bz2') or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ['update', 'append']:
raise OSError("update and append modes are not supported "
"with bzip2 files")
if not HAS_BZ2:
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module.")
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = 'w' if is_ostream else 'r'
self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode)
self.compression = 'bzip2'
return self.compression is not None
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object (including compressed files)."""
closed = fileobj_closed(fileobj)
# FIXME: this variable was unused, check if it was useful
# fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
self._file = fileobj
elif isfile(fileobj):
self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
# Attempt to determine if the file represented by the open file object
# is compressed
try:
# We need to account for the possibility that the underlying file
# handle may have been opened with either 'ab' or 'ab+', which
# means that the current file position is at the end of the file.
if mode in ['ostream', 'append']:
self._file.seek(0)
magic = self._file.read(4)
# No matter whether the underlying file was opened with 'ab' or
# 'ab+', we need to return to the beginning of the file in order
# to properly process the FITS header (and handle the possibility
# of a compressed file).
self._file.seek(0)
except OSError:
return
self._try_read_compressed(fileobj, magic, mode)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise OSError("Cannot read from/write to a closed file-like "
"object ({!r}).".format(fileobj))
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if (not hasattr(self._file, 'seek') or
not hasattr(self._file, 'tell')):
self.mode = mode = 'ostream'
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if (self.mode in ('update', 'append', 'ostream') and
not hasattr(self._file, 'write')):
raise OSError("File-like object does not have a 'write' "
"method, required for mode '{}'.".format(self.mode))
# Any mode except for 'ostream' requires readability
if self.mode != 'ostream' and not hasattr(self._file, 'read'):
raise OSError("File-like object does not have a 'read' "
"method, required for mode {!r}.".format(self.mode))
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == 'ostream':
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with fileobj_open(self.name, 'rb') as f:
magic = f.read(4)
else:
magic = b''
ext = os.path.splitext(self.name)[1]
if not self._try_read_compressed(self.name, magic, mode, ext=ext):
self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
self.close_on_error = True
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if not (_is_bz2file(self._file) and mode == 'ostream'):
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b' ')
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except OSError as exc:
warnings.warn('Failed to create mmap: {}; mmap use will be '
'disabled'.format(str(exc)), AstropyUserWarning)
del exc
return False
try:
mm.flush()
except OSError:
warnings.warn('mmap.flush is unavailable on this platform; '
'using mmap in writeable mode will be disabled',
AstropyUserWarning)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ('update', 'append'):
raise OSError(
"Writing to zipped fits files is not currently "
"supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise OSError(
"Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix='.fits')
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
# We just wrote the contents of the first file in the archive to a new
# temp file, which now serves as our underlying file object. So it's
# necessary to reset the position back to the beginning
self._file.seek(0)
|
|
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Utils
"""
import contextlib
import uuid
import mock
from mox3 import mox
from oslo_config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import objects
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
CONF = cfg.CONF
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
def setUp(self):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_build_request_spec_without_image(self, mock_get):
image = None
instance = {'uuid': 'fake-uuid'}
instance_type = objects.Flavor(**test_flavor.fake_flavor)
mock_get.return_value = objects.Flavor(extra_specs={})
self.mox.StubOutWithMock(flavors, 'extract_flavor')
flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
self.mox.ReplayAll()
request_spec = scheduler_utils.build_request_spec(self.context, image,
[instance])
self.assertEqual({}, request_spec['image'])
def test_build_request_spec_with_object(self):
instance_type = objects.Flavor()
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(instance, 'get_flavor') as mock_get:
mock_get.return_value = instance_type
request_spec = scheduler_utils.build_request_spec(self.context,
None,
[instance])
mock_get.assert_called_once_with()
self.assertIsInstance(request_spec['instance_properties'], dict)
@mock.patch.object(rpc, 'get_notifier', return_value=mock.Mock())
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(objects.Instance, 'save')
def test_set_vm_state_and_notify(self, mock_save, mock_add, mock_get):
expected_uuid = 'fake-uuid'
request_spec = dict(instance_properties=dict(uuid='other-uuid'))
updates = dict(vm_state='fake-vm-state')
service = 'fake-service'
method = 'fake-method'
exc_info = 'exc_info'
payload = dict(request_spec=request_spec,
instance_properties=request_spec.get(
'instance_properties', {}),
instance_id=expected_uuid,
state='fake-vm-state',
method=method,
reason=exc_info)
event_type = '%s.%s' % (service, method)
scheduler_utils.set_vm_state_and_notify(self.context,
expected_uuid,
service,
method,
updates,
exc_info,
request_spec,
db)
mock_save.assert_called_once_with()
mock_add.assert_called_once_with(self.context, mock.ANY,
exc_info, mock.ANY)
self.assertIsInstance(mock_add.call_args[0][1], objects.Instance)
self.assertIsInstance(mock_add.call_args[0][3], tuple)
mock_get.return_value.error.assert_called_once_with(self.context,
event_type,
payload)
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True,
force_hosts=None,
force_nodes=None):
if force_hosts is None:
force_hosts = []
if force_nodes is None:
force_nodes = []
if with_retry:
if ((len(force_hosts) == 1 and len(force_nodes) <= 1)
or (len(force_nodes) == 1 and len(force_hosts) <= 1)):
filter_properties = dict(force_hosts=force_hosts,
force_nodes=force_nodes)
elif len(force_hosts) > 1 or len(force_nodes) > 1:
filter_properties = dict(retry=dict(hosts=[]),
force_hosts=force_hosts,
force_nodes=force_nodes)
else:
filter_properties = dict(retry=dict(hosts=[]))
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
enable_retry_force_hosts = not force_hosts or len(force_hosts) > 1
enable_retry_force_nodes = not force_nodes or len(force_nodes) > 1
if with_retry or enable_retry_force_hosts or enable_retry_force_nodes:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if force_hosts:
expected_limits = None
else:
expected_limits = 'fake-limits'
self.assertEqual(expected_limits,
filter_properties.get('limits'))
if (with_retry and enable_retry_force_hosts
and enable_retry_force_nodes):
self.assertEqual([['fake-host', 'fake-node'],
['fake-host', 'fake-node']],
filter_properties['retry']['hosts'])
else:
self.assertNotIn('retry', filter_properties)
def test_populate_filter_props(self):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
def test_populate_filter_props_force_hosts_no_retry(self):
self._test_populate_filter_props(force_hosts=['force-host'])
def test_populate_filter_props_force_nodes_no_retry(self):
self._test_populate_filter_props(force_nodes=['force-node'])
def test_populate_filter_props_multi_force_hosts_with_retry(self):
self._test_populate_filter_props(force_hosts=['force-host1',
'force-host2'])
def test_populate_filter_props_multi_force_nodes_with_retry(self):
self._test_populate_filter_props(force_nodes=['force-node1',
'force-node2'])
@mock.patch.object(scheduler_utils, '_max_attempts')
def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
_max_attempts.return_value = 2
msg = 'The exception text was preserved!'
filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
exc=[msg]))
nvh = self.assertRaises(exception.MaxRetriesExceeded,
scheduler_utils.populate_retry,
filter_properties, 'fake-uuid')
# make sure 'msg' is a substring of the complete exception text
self.assertIn(msg, nvh.message)
def _check_parse_options(self, opts, sep, converter, expected):
good = scheduler_utils.parse_options(opts,
sep=sep,
converter=converter)
for item in expected:
self.assertIn(item, good)
def test_parse_options(self):
# check normal
self._check_parse_options(['foo=1', 'bar=-2.1'],
'=',
float,
[('foo', 1.0), ('bar', -2.1)])
# check convert error
self._check_parse_options(['foo=a1', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check separator missing
self._check_parse_options(['foo', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check key missing
self._check_parse_options(['=5', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
def test_validate_filters_configured(self):
self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
def _create_server_group(self, policy='anti-affinity'):
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.name = 'pele'
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
return group
def _get_group_details(self, group, policy=None):
group_hosts = ['hostB']
with contextlib.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(
self.context, 'fake_uuid', group_hosts)
self.assertEqual(
(set(['hostA', 'hostB']), [policy]),
group_info)
def test_get_group_details(self):
for policy in ['affinity', 'anti-affinity']:
group = self._create_server_group(policy)
self._get_group_details(group, policy=policy)
def test_get_group_details_with_no_affinity_filters(self):
self.flags(scheduler_default_filters=['fake'])
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(self.context,
'fake-uuid')
self.assertIsNone(group_info)
def test_get_group_details_with_no_instance_uuid(self):
self.flags(scheduler_default_filters=['fake'])
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(self.context, None)
self.assertIsNone(group_info)
def _get_group_details_with_filter_not_configured(self, policy):
wrong_filter = {
'affinity': 'ServerGroupAntiAffinityFilter',
'anti-affinity': 'ServerGroupAffinityFilter',
}
self.flags(scheduler_default_filters=[wrong_filter[policy]])
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
with contextlib.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
self.assertRaises(exception.UnsupportedPolicyException,
scheduler_utils._get_group_details,
self.context, 'fake-uuid')
def test_get_group_details_with_filter_not_configured(self):
policies = ['anti-affinity', 'affinity']
for policy in policies:
self._get_group_details_with_filter_not_configured(policy)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_in_filter_properties(self, mock_ggd):
mock_ggd.return_value = scheduler_utils.GroupDetails(
hosts=set(['hostA', 'hostB']), policies=['policy'])
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
['hostC'])
expected_filter_props = {'group_updated': True,
'group_hosts': set(['hostA', 'hostB']),
'group_policies': ['policy']}
self.assertEqual(expected_filter_props, filter_props)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_no_group(self, mock_ggd):
mock_ggd.return_value = None
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
['hostC'])
self.assertNotIn('group_updated', filter_props)
self.assertNotIn('group_policies', filter_props)
self.assertEqual(['hostC'], filter_props['group_hosts'])
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
self.assertRaises(exception.NoValidHost,
scheduler_utils.setup_instance_group,
self.context, spec, filter_props)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import cpp_util
import model
import os
class HGenerator(object):
"""A .h generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator):
self._cpp_type_generator = cpp_type_generator
self._namespace = namespace
self._target_namespace = (
self._cpp_type_generator.GetCppNamespaceName(self._namespace))
def Generate(self):
"""Generates a Code object with the .h for a single namespace.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
)
ifndef_name = cpp_util.GenerateIfndefName(self._namespace.source_file_dir,
self._target_namespace)
(c.Append('#ifndef %s' % ifndef_name)
.Append('#define %s' % ifndef_name)
.Append('#pragma once')
.Append()
.Append('#include <string>')
.Append('#include <vector>')
.Append()
.Append('#include "base/basictypes.h"')
.Append('#include "base/memory/linked_ptr.h"')
.Append('#include "base/memory/scoped_ptr.h"')
.Append('#include "base/values.h"')
.Append('#include "tools/json_schema_compiler/any.h"')
.Append()
)
c.Concat(self._cpp_type_generator.GetRootNamespaceStart())
# TODO(calamity): These forward declarations should be #includes to allow
# $ref types from other files to be used as required params. This requires
# some detangling of windows and tabs which will currently lead to circular
# #includes.
forward_declarations = (
self._cpp_type_generator.GenerateForwardDeclarations())
if not forward_declarations.IsEmpty():
(c.Append()
.Concat(forward_declarations)
.Append()
)
c.Concat(self._cpp_type_generator.GetNamespaceStart())
c.Append()
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for property in self._namespace.properties.values():
property_code = self._cpp_type_generator.GeneratePropertyValues(
property,
'extern const %(type)s %(name)s;')
if property_code:
c.Concat(property_code).Append()
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
)
for type_ in self._FieldDependencyOrder():
(c.Concat(self._GenerateType(type_))
.Append()
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
(c.Concat(self._GenerateFunction(function))
.Append()
)
(c.Concat(self._cpp_type_generator.GetNamespaceEnd())
.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
.Append()
.Append('#endif // %s' % ifndef_name)
.Append()
)
return c
def _FieldDependencyOrder(self):
"""Generates the list of types in the current namespace in an order in which
depended-upon types appear before types which depend on them.
"""
dependency_order = []
def ExpandType(path, type_):
if type_ in path:
raise ValueError("Illegal circular dependency via cycle " +
", ".join(map(lambda x: x.name, path + [type_])))
for prop in type_.properties.values():
if not prop.optional and prop.type_ == PropertyType.REF:
ExpandType(path + [type_], self._namespace.types[prop.ref_type])
if not type_ in dependency_order:
dependency_order.append(type_)
for type_ in self._namespace.types.values():
ExpandType([], type_)
return dependency_order
def _GenerateEnumDeclaration(self, enum_name, prop, values):
"""Generate the declaration of a C++ enum for the given property and
values.
"""
c = Code()
c.Sblock('enum %s {' % enum_name)
if prop.optional:
c.Append(self._cpp_type_generator.GetEnumNoneValue(prop) + ',')
for value in values:
c.Append(self._cpp_type_generator.GetEnumValue(prop, value) + ',')
(c.Eblock('};')
.Append()
)
return c
def _GenerateFields(self, props):
"""Generates the field declarations when declaring a type.
"""
c = Code()
# Generate the enums needed for any fields with "choices"
for prop in props:
if prop.type_ == PropertyType.CHOICES:
enum_name = self._cpp_type_generator.GetChoicesEnumType(prop)
c.Append('%s %s_type;' % (enum_name, prop.unix_name))
c.Append()
for prop in self._cpp_type_generator.GetExpandedChoicesInParams(props):
if prop.description:
c.Comment(prop.description)
c.Append('%s %s;' % (
self._cpp_type_generator.GetType(prop, wrap_optional=True),
prop.unix_name))
c.Append()
return c
def _GenerateType(self, type_):
"""Generates a struct for a type.
"""
classname = cpp_util.Classname(type_.name)
c = Code()
if type_.functions:
# Types with functions are not instantiable in C++ because they are
# handled in pure Javascript and hence have no properties or
# additionalProperties.
if type_.properties:
raise NotImplementedError('\n'.join(model.GetModelHierarchy(type_)) +
'\nCannot generate both functions and properties on a type')
c.Sblock('namespace %(classname)s {')
for function in type_.functions.values():
(c.Concat(self._GenerateFunction(function))
.Append()
)
c.Eblock('}')
elif type_.type_ == PropertyType.ARRAY:
if type_.description:
c.Comment(type_.description)
c.Append('typedef std::vector<%(item_type)s> %(classname)s;')
c.Substitute({'classname': classname, 'item_type':
self._cpp_type_generator.GetType(type_.item_type,
wrap_optional=True)})
elif type_.type_ == PropertyType.STRING:
if type_.description:
c.Comment(type_.description)
c.Append('typedef std::string %(classname)s;')
c.Substitute({'classname': classname})
else:
if type_.description:
c.Comment(type_.description)
(c.Sblock('struct %(classname)s {')
.Append('~%(classname)s();')
.Append('%(classname)s();')
.Append()
.Concat(self._GeneratePropertyStructures(type_.properties.values()))
.Concat(self._GenerateFields(type_.properties.values()))
)
if type_.from_json:
(c.Comment('Populates a %s object from a Value. Returns'
' whether |out| was successfully populated.' % classname)
.Append(
'static bool Populate(const Value& value, %(classname)s* out);')
.Append()
)
if type_.from_client:
(c.Comment('Returns a new DictionaryValue representing the'
' serialized form of this %s object. Passes '
'ownership to caller.' % classname)
.Append('scoped_ptr<DictionaryValue> ToValue() const;')
)
(c.Eblock()
.Sblock(' private:')
.Append('DISALLOW_COPY_AND_ASSIGN(%(classname)s);')
.Eblock('};')
)
c.Substitute({'classname': classname})
return c
def _GenerateFunction(self, function):
"""Generates the structs for a function.
"""
c = Code()
(c.Sblock('namespace %s {' % cpp_util.Classname(function.name))
.Concat(self._GenerateFunctionParams(function))
.Append()
)
if function.callback:
(c.Concat(self._GenerateFunctionResult(function))
.Append()
)
c.Eblock('};')
return c
def _GenerateFunctionParams(self, function):
"""Generates the struct for passing parameters into a function.
"""
c = Code()
if function.params:
(c.Sblock('struct Params {')
.Concat(self._GeneratePropertyStructures(function.params))
.Concat(self._GenerateFields(function.params))
.Append('~Params();')
.Append()
.Append('static scoped_ptr<Params> Create(const ListValue& args);')
.Eblock()
.Sblock(' private:')
.Append('Params();')
.Append()
.Append('DISALLOW_COPY_AND_ASSIGN(Params);')
.Eblock('};')
)
return c
def _GeneratePropertyStructures(self, props):
"""Generate the structures required by a property such as OBJECT classes
and enums.
"""
c = Code()
for prop in props:
if prop.type_ == PropertyType.OBJECT:
c.Concat(self._GenerateType(prop))
c.Append()
elif prop.type_ == PropertyType.CHOICES:
c.Concat(self._GenerateEnumDeclaration(
self._cpp_type_generator.GetChoicesEnumType(prop),
prop,
[choice.type_.name for choice in prop.choices.values()]))
c.Concat(self._GeneratePropertyStructures(prop.choices.values()))
elif prop.type_ == PropertyType.ENUM:
enum_name = self._cpp_type_generator.GetType(prop)
c.Concat(self._GenerateEnumDeclaration(
enum_name,
prop,
prop.enum_values))
c.Append('static scoped_ptr<Value> CreateEnumValue(%s %s);' %
(enum_name, prop.unix_name))
return c
def _GenerateFunctionResult(self, function):
"""Generates functions for passing a function's result back.
"""
c = Code()
c.Sblock('namespace Result {')
params = function.callback.params
if not params:
c.Append('Value* Create();')
else:
c.Concat(self._GeneratePropertyStructures(params))
# If there is a single parameter, this is straightforward. However, if
# the callback parameter is of 'choices', this generates a Create method
# for each choice. This works because only 1 choice can be returned at a
# time.
for param in self._cpp_type_generator.GetExpandedChoicesInParams(params):
if param.description:
c.Comment(param.description)
if param.type_ == PropertyType.ANY:
c.Comment("Value* Result::Create(Value*) not generated "
"because it's redundant.")
continue
c.Append('Value* Create(const %s);' % cpp_util.GetParameterDeclaration(
param, self._cpp_type_generator.GetType(param)))
c.Eblock('};')
return c
|
|
# Copyright (c) 2016 Simon van Heeringen <[email protected]>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
""" Module for motif activity prediction """
from __future__ import print_function
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import os
import sys
import shutil
from functools import partial
try:
from itertools import izip
except:
izip = zip
import itertools
import logging
from multiprocessing import Pool
import pandas as pd
import numpy as np
from scipy.stats import ks_2samp, hypergeom,mannwhitneyu
from scipy.cluster.hierarchy import linkage, fcluster
from statsmodels.sandbox.stats.multicomp import multipletests
from tqdm import tqdm
# scikit-learn
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.ensemble import BaggingClassifier,RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import Ridge,MultiTaskLasso,BayesianRidge
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import scale, LabelEncoder
from lightning.classification import CDClassifier
from lightning.regression import CDRegressor
import xgboost
from gimmemotifs import __version__
from gimmemotifs.motif import read_motifs
from gimmemotifs.scanner import Scanner
from gimmemotifs.config import MotifConfig
from gimmemotifs.utils import pwmfile_location
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
logger = logging.getLogger("gimme.maelstrom")
class Moap(object):
"""Moap base class.
Motif activity prediction.
"""
_predictors = {}
name = None
@classmethod
def create(cls, name, ncpus=None):
"""Create a Moap instance based on the predictor name.
Parameters
----------
name : str
Name of the predictor (eg. Xgboost, BayesianRidge, ...)
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Returns
-------
moap : Moap instance
moap instance.
"""
try:
return cls._predictors[name.lower()](ncpus=ncpus)
except KeyError:
raise Exception("Unknown class")
@classmethod
def register_predictor(cls, name):
"""Register method to keep list of predictors."""
def decorator(subclass):
"""Register as decorator function."""
cls._predictors[name.lower()] = subclass
subclass.name = name.lower()
return subclass
return decorator
@classmethod
def list_predictors(self):
"""List available predictors."""
return list(self._predictors.keys())
@classmethod
def list_classification_predictors(self):
"""List available classification predictors."""
preds = [self.create(x) for x in self._predictors.keys()]
return [x.name for x in preds if x.ptype == "classification"]
@classmethod
def list_regression_predictors(self):
"""List available regression predictors."""
preds = [self.create(x) for x in self._predictors.keys()]
return [x.name for x in preds if x.ptype == "regression"]
register_predictor = Moap.register_predictor
def br_fit(X, y):
model = BayesianRidge()
model.fit(X, y)
return model.coef_
def br_fit_star(args):
return br_fit(*args)
@register_predictor('BayesianRidge')
class BayesianRidgeMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using Bayesian Ridge Regression.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Coefficients of the regression model.
"""
self.act_description = ("activity values: coefficients of the"
"regression model")
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting BayesianRidge")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
logger.debug("Scaling motif scores")
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
#logger.debug("Scaling y")
# Normalize across samples and features
#y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
logger.debug("Fitting model")
pool = Pool(self.ncpus)
coefs = [x for x in tqdm(pool.imap(br_fit_star, izip(itertools.repeat(X), [y[col] for col in y.columns])), total=len(y.columns))]
logger.info("Done")
self.act_ = pd.DataFrame(coefs, columns=X.columns, index=y.columns).T
@register_predictor('Xgboost')
class XgboostRegressionMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using XGBoost.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Feature scores.
"""
self.act_description = ("activity values: feature scores from"
"fitted model")
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting XGBoostRegression")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# Normalize across samples and features
#y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
# Define model
xgb = xgboost.XGBRegressor(
n_estimators=100,
learning_rate=0.1,
nthread=self.ncpus,
min_child_weight=2,
max_depth=3,
subsample=0.75,
colsample_bytree=0.75,
objective='reg:linear')
logger.debug("xgb: 0%")
self.act_ = pd.DataFrame(index=X.columns)
# Fit model
for i,col in enumerate(tqdm(y.columns)):
xgb.fit(X, y[col].values)
d = xgb.get_booster().get_fscore()
self.act_[col] = [d.get(m, 0) for m in X.columns]
for motif in self.act_.index:
if self.act_.loc[motif, col] != 0:
high = df_y.loc[df_X[motif] >= df_X[motif].quantile(0.75), col].mean()
low = df_y.loc[df_X[motif] <= df_X[motif].quantile(0.25), col].mean()
if low > high:
self.act_.loc[motif, col] *= -1
logger.debug("..{}%".format(int(float(i + 1)/ len(y.columns) * 100)))
logger.info("Done")
@register_predictor('LightningRegressor')
class LightningRegressionMoap(Moap):
def __init__(self, scale=True, cv=3, ncpus=None):
"""Predict motif activities using lightning CDRegressor
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
cv : int, optional, default 3
Cross-validation k-fold parameter.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted coefficients
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.act_description = ("activity values: coefficients from "
"fitted model")
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.kfolds = cv
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y, batch_size=50, shuffle=True, tmpdir=None):
logger.info("Fitting LightningRegression")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# Normalize across samples and features
#y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
if not y.shape[0] == X.shape[0]:
raise ValueError("number of regions is not equal")
# Define model
cd = CDRegressor(penalty="l1/l2", C=1.0/X.shape[0])
parameters = {
"alpha": [np.exp(-x) for x in np.arange(0, 8, 1/2.5)],
}
clf = GridSearchCV(cd, parameters, n_jobs=self.ncpus)
nsplits = int(y.shape[1] / batch_size)
if shuffle:
idx = list(y.sample(y.shape[1], axis=1, random_state=42).columns)
else:
idx = list(y.columns)
if tmpdir:
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
coefs = pd.DataFrame(index=X.columns)
start_i = 0
if tmpdir:
for i in range(0, len(idx), batch_size):
fname = os.path.join(tmpdir, "{}.feather".format(i))
if os.path.exists(fname) and os.path.exists(fname + ".done"):
tmp = pd.read_feather(fname)
tmp = tmp.set_index(tmp.columns[0])
coefs = coefs.join(tmp)
else:
logger.info("Resuming at batch {}".format(i))
start_i = i
break
for i in tqdm(range(start_i, len(idx), batch_size)):
split_y = y[idx[i:i+batch_size]]
# Fit model
clf.fit(X.values, split_y.values)
tmp = pd.DataFrame(clf.best_estimator_.coef_.T,
index=X.columns, columns = split_y.columns)
if tmpdir:
fname = os.path.join(tmpdir, "{}.feather".format(i))
tmp.reset_index().rename(columns=str).to_feather(fname)
# Make sure we don't read corrupted files
open(fname + ".done", "a").close()
# Get coefficients
coefs = coefs.join(tmp)
# Get coefficients
self.act_ = coefs[y.columns]
logger.info("Done")
@register_predictor('LightningClassification')
class LightningClassificationMoap(Moap):
def __init__(self, scale=True, permute=False, ncpus=None):
"""Predict motif activities using lightning CDClassifier
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted coefficients
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.act_description = ("activity values: coefficients from "
"fitted model")
#self.cdc = CDClassifier(random_state=args.seed)
self.cdc = CDClassifier()
self.parameters = {
"penalty": ["l1/l2"],
"loss": ["squared_hinge"],
"multiclass":[True],
"max_iter":[20],
"alpha": [np.exp(-x) for x in np.arange(0, 10, 1/3.0)],
"C":[0.001, 0.01, 0.1, 0.5, 1.0],
"tol":[1e-3]
}
self.kfolds = 10
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.clf = GridSearchCV(self.cdc, self.parameters,
cv=self.kfolds, n_jobs=ncpus)
self.scale = scale
self.permute = permute
self.act_ = None
self.sig_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting LightningClassification")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
idx = list(range(df_y.shape[0]))
y = df_y.iloc[idx]
X = df_X.loc[y.index].values
y = y.values.flatten()
# Convert (putative) string labels
l = LabelEncoder()
y = l.fit_transform(y)
# Split data
X_train,X_test,y_train,y_test = train_test_split(X,y)
logger.debug("Setting parameters through cross-validation")
# Determine best parameters based on CV
self.clf.fit(X_train,y_train)
logger.debug("Average score ({} fold CV): {}".format(
self.kfolds,
self.clf.score(X_test, y_test)
))
logger.debug("Estimate coefficients using bootstrapping")
# Estimate coefficients using bootstrappig
#b = BaggingClassifier(self.clf.best_estimator_,
# max_samples=0.75, n_jobs=-1, random_state=state)
b = BaggingClassifier(self.clf.best_estimator_,
max_samples=0.75, n_jobs=-1)
b.fit(X,y)
# Get mean coefficients
coeffs = np.array([e.coef_ for e in b.estimators_]).mean(axis=0)
# Create dataframe of predicted coefficients
if len(l.classes_) == 2:
self.act_ = pd.DataFrame(np.hstack((-coeffs.T, coeffs.T)))
else:
self.act_ = pd.DataFrame(coeffs.T)
# Convert labels back to original names
self.act_.columns = l.inverse_transform(range(len(l.classes_)))
self.act_.index = df_X.columns
if self.permute:
# Permutations
logger.debug("Permutations")
random_dfs = []
for _ in range(10):
y_random = np.random.permutation(y)
b.fit(X,y_random)
coeffs = np.array([e.coef_ for e in b.estimators_]).mean(axis=0)
if len(l.classes_) == 2:
random_dfs.append(pd.DataFrame(np.hstack((-coeffs.T, coeffs.T))))
else:
random_dfs.append(pd.DataFrame(coeffs.T))
random_df = pd.concat(random_dfs)
# Select cutoff based on percentile
high_cutoffs = random_df.quantile(0.99)
low_cutoffs = random_df.quantile(0.01)
# Set significance
self.sig_ = pd.DataFrame(index=df_X.columns)
self.sig_["sig"] = False
for col,c_high,c_low in zip(
self.act_.columns, high_cutoffs, low_cutoffs):
self.sig_["sig"].loc[self.act_[col] >= c_high] = True
self.sig_["sig"].loc[self.act_[col] <= c_low] = True
logger.info("Done")
@register_predictor('MWU')
class MWUMoap(Moap):
def __init__(self, *args, **kwargs):
"""Predict motif activities using Mann-Whitney U p-value
This method compares the motif score distribution of each
cluster versus the motif score distribution of all other
clusters.
Parameters
----------
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
-log10 of the Mann-Whitney U p-value, corrected for multiple
testing using the Benjamini-Hochberg correction
"""
self.act_ = None
self.act_description = ("activity values: BH-corrected "
"-log10 Mann-Whitney U p-value")
self.pref_table = "score"
self.supported_tables = ["score"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting MWU")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
# calculate Mann-Whitney U p-values
pvals = []
clusters = df_y[df_y.columns[0]].unique()
for cluster in clusters:
pos = df_X[df_y.iloc[:,0] == cluster]
neg = df_X[df_y.iloc[:,0] != cluster]
p = []
for m in pos:
try:
p.append(mannwhitneyu(pos[m], neg[m], alternative="greater")[1])
except Exception as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("motif {} failed, setting to p = 1\n".format(m))
p.append(1)
pvals.append(p)
# correct for multipe testing
pvals = np.array(pvals)
fpr = multipletests(pvals.flatten(),
method="fdr_bh")[1].reshape(pvals.shape)
# create output DataFrame
self.act_ = pd.DataFrame(-np.log10(fpr.T),
columns=clusters, index=df_X.columns)
logger.info("Done")
@register_predictor('Hypergeom')
class HypergeomMoap(Moap):
def __init__(self, *args, **kwargs):
"""Predict motif activities using hypergeometric p-value
Parameters
----------
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
-log10 of the hypergeometric p-value, corrected for multiple
testing using the Benjamini-Hochberg correction
"""
self.act_ = None
self.act_description = ("activity values: BH-corrected "
"hypergeometric p-values")
self.pref_table = "count"
self.supported_tables = ["count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting Hypergeom")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
if set(df_X.dtypes) != set([np.dtype(int)]):
raise ValueError("need motif counts, not scores")
# calculate hypergeometric p-values
pvals = []
clusters = df_y[df_y.columns[0]].unique()
M = df_X.shape[0]
for cluster in clusters:
pos = df_X[df_y.iloc[:,0] == cluster]
neg = df_X[df_y.iloc[:,0] != cluster]
pos_true = (pos > 0).sum(0)
pos_false = (pos == 0).sum(0)
neg_true = (neg > 0).sum(0)
p = []
for pt, pf, nt in zip(pos_true, pos_false, neg_true):
n = pt + nt
N = pt + pf
x = pt - 1
p.append(hypergeom.sf(x, M, n, N))
pvals.append(p)
# correct for multipe testing
pvals = np.array(pvals)
fpr = multipletests(pvals.flatten(),
method="fdr_bh")[1].reshape(pvals.shape)
# create output DataFrame
self.act_ = pd.DataFrame(-np.log10(fpr.T),
columns=clusters, index=df_X.columns)
logger.info("Done")
@register_predictor('RF')
class RFMoap(Moap):
def __init__(self, ncpus=None):
"""Predict motif activities using a random forest classifier
Parameters
----------
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
feature importances from the model
"""
self.act_ = None
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.act_description = ("activity values: feature importances "
"from fitted Random Forest model")
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting RF")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
le = LabelEncoder()
y = le.fit_transform(df_y.iloc[:,0].values)
clf = RandomForestClassifier(n_estimators=100, n_jobs=self.ncpus)
# Multiclass
if len(le.classes_) > 2:
orc = OneVsRestClassifier(clf)
orc.fit(df_X.values, y)
importances = np.array([c.feature_importances_ for c in orc.estimators_]).T
else: # Only two classes
clf.fit(df_X.values, y)
importances = np.array([
clf.feature_importances_,
clf.feature_importances_
]).T
for i,c in enumerate(le.classes_):
diff = df_X.loc[y == i].quantile(q=0.75) - df_X.loc[y != i].quantile(q=0.75)
sign = (diff >= 0) * 2 - 1
importances[:,i] *= sign
# create output DataFrame
self.act_ = pd.DataFrame(importances,
columns=le.inverse_transform(range(len(le.classes_))),
index=df_X.columns)
logger.info("Done")
@register_predictor('Lasso')
class LassoMoap(Moap):
def __init__(self, scale=True, kfolds=4, alpha_stepsize=1.0, ncpus=None):
"""Predict motif activities using Lasso MultiTask regression
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
kfolds : integer, optional, default 5
number of kfolds for parameter search
alpha_stepsize : float, optional, default 1.0
stepsize for use in alpha gridsearch
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted motif activities
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.kfolds = kfolds
self.act_description = ("activity values: coefficients from "
"fitted model")
self.scale = scale
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
# initialize attributes
self.act_ = None
self.sig_ = None
mtk = MultiTaskLasso()
parameters = {
"alpha": [np.exp(-x) for x in np.arange(0, 10, alpha_stepsize)],
}
self.clf = GridSearchCV(mtk, parameters, cv=kfolds, n_jobs=self.ncpus)
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y, permute=False):
logger.info("Fitting Lasso")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
idx = list(range(df_y.shape[0]))
y = df_y.iloc[idx]
X = df_X.loc[y.index].values
y = y.values
# fit coefficients
coefs = self._get_coefs(X, y)
self.act_ = pd.DataFrame(coefs.T)
# convert labels back to original names
self.act_.columns = df_y.columns
self.act_.index = df_X.columns
if permute:
# Permutations
logger.info("permutations\n")
random_dfs = []
for _ in range(10):
y_random = y[np.random.permutation(range(y.shape[0]))]
coefs = self._get_coefs(X, y_random)
random_dfs.append(pd.DataFrame(coefs.T))
random_df = pd.concat(random_dfs)
# Select cutoff based on percentile
high_cutoffs = random_df.quantile(0.99)
low_cutoffs = random_df.quantile(0.01)
# Set significance
self.sig_ = pd.DataFrame(index=df_X.columns)
self.sig_["sig"] = False
for col,c_high,c_low in zip(
self.act_.columns, high_cutoffs, low_cutoffs):
self.sig_["sig"].loc[self.act_[col] >= c_high] = True
self.sig_["sig"].loc[self.act_[col] <= c_low] = True
logger.info("Done")
def _get_coefs(self, X, y):
logger.info("set alpha through cross-validation\n")
# Determine best parameters based on CV
self.clf.fit(X, y)
logger.debug("average score ({} fold CV): {}".format(
self.kfolds,
self.clf.best_score_
))
logger.info("Estimate coefficients using bootstrapping\n")
n_samples = 0.75 * X.shape[0]
max_samples = X.shape[0]
m = self.clf.best_estimator_
coefs = []
for _ in range(10):
idx = np.random.randint(0, n_samples, max_samples)
m.fit(X[idx], y[idx])
coefs.append(m.coef_)
coefs = np.array(coefs).mean(axis=0)
return coefs
def moap(inputfile, method="hypergeom", scoring=None, outfile=None, motiffile=None, pwmfile=None, genome=None, fpr=0.01, ncpus=None,
subsample=None):
"""Run a single motif activity prediction algorithm.
Parameters
----------
inputfile : str
:1File with regions (chr:start-end) in first column and either cluster
name in second column or a table with values.
method : str, optional
Motif activity method to use. Any of 'hypergeom', 'lasso',
'lightningclassification', 'lightningregressor', 'bayesianridge',
'rf', 'xgboost'. Default is 'hypergeom'.
scoring: str, optional
Either 'score' or 'count'
outfile : str, optional
Name of outputfile to save the fitted activity values.
motiffile : str, optional
Table with motif scan results. First column should be exactly the same
regions as in the inputfile.
pwmfile : str, optional
File with motifs in pwm format. Required when motiffile is not
supplied.
genome : str, optional
Genome name, as indexed by gimme. Required when motiffile is not
supplied
fpr : float, optional
FPR for motif scanning
ncpus : int, optional
Number of threads to use. Default is the number specified in the config.
Returns
-------
pandas DataFrame with motif activity
"""
if scoring and scoring not in ['score', 'count']:
raise ValueError("valid values are 'score' and 'count'")
config = MotifConfig()
if inputfile.endswith("feather"):
df = pd.read_feather(inputfile)
df = df.set_index(df.columns[0])
else:
# read data
df = pd.read_table(inputfile, index_col=0, comment="#")
clf = Moap.create(method, ncpus=ncpus)
if clf.ptype == "classification":
if df.shape[1] != 1:
raise ValueError("1 column expected for {}".format(method))
else:
if np.dtype('object') in set(df.dtypes):
raise ValueError(
"columns should all be numeric for {}".format(method))
if motiffile is None:
if genome is None:
raise ValueError("need a genome")
pwmfile = pwmfile_location(pwmfile)
try:
motifs = read_motifs(pwmfile)
except:
sys.stderr.write("can't read motifs from {}".format(pwmfile))
raise
# initialize scanner
s = Scanner(ncpus=ncpus)
sys.stderr.write(pwmfile + "\n")
s.set_motifs(pwmfile)
s.set_genome(genome)
s.set_background(genome=genome)
# scan for motifs
sys.stderr.write("scanning for motifs\n")
motif_names = [m.id for m in read_motifs(pwmfile)]
scores = []
if method == 'classic' or scoring == "count":
s.set_threshold(fpr=fpr)
for row in s.count(list(df.index)):
scores.append(row)
else:
for row in s.best_score(list(df.index), normalize=True):
scores.append(row)
motifs = pd.DataFrame(scores, index=df.index, columns=motif_names)
else:
motifs = pd.read_table(motiffile, index_col=0, comment="#")
if outfile and os.path.exists(outfile):
out = pd.read_table(outfile, index_col=0, comment="#")
ncols = df.shape[1]
if ncols == 1:
ncols = len(df.iloc[:,0].unique())
if out.shape[0] == motifs.shape[1] and out.shape[1] == ncols:
logger.warn("%s output already exists... skipping", method)
return out
if subsample is not None:
n = int(subsample * df.shape[0])
logger.debug("Subsampling %d regions", n)
df = df.sample(n)
motifs = motifs.loc[df.index]
if method == "lightningregressor":
outdir = os.path.dirname(outfile)
tmpname = os.path.join(outdir, ".lightning.tmp")
clf.fit(motifs, df, tmpdir=tmpname)
shutil.rmtree(tmpname)
else:
clf.fit(motifs, df)
if outfile:
with open(outfile, "w") as f:
f.write("# maelstrom - GimmeMotifs version {}\n".format(__version__))
f.write("# method: {} with motif {}\n".format(method, scoring))
if genome:
f.write("# genome: {}\n".format(genome))
if motiffile:
f.write("# motif table: {}\n".format(motiffile))
f.write("# {}\n".format(clf.act_description))
with open(outfile, "a") as f:
clf.act_.to_csv(f, sep="\t")
return clf.act_
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms.bls import BoxLeastSquares
from astropy.timeseries.periodograms.lombscargle.core import has_units
def assert_allclose_blsresults(blsresult, other, **kwargs):
"""Assert that another BoxLeastSquaresResults object is consistent
This method loops over all attributes and compares the values using
:func:`~astropy.tests.helper.assert_quantity_allclose` function.
Parameters
----------
other : BoxLeastSquaresResults
The other results object to compare.
"""
for k, v in blsresult.items():
if k not in other:
raise AssertionError(f"missing key '{k}'")
if k == "objective":
assert v == other[k], (
"Mismatched objectives. Expected '{}', got '{}'"
.format(v, other[k])
)
continue
assert_quantity_allclose(v, other[k], **kwargs)
@pytest.fixture
def data():
rand = np.random.RandomState(123)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
dy = rand.uniform(0.005, 0.01, len(t))
period = 2.0
transit_time = 0.5
duration = 0.16
depth = 0.2
m = np.abs((t-transit_time+0.5*period) % period-0.5*period) < 0.5*duration
y[m] = 1.0 - depth
y += dy * rand.randn(len(t))
return t, y, dy, dict(period=period, transit_time=transit_time,
duration=duration, depth=depth)
def test_32bit_bug():
rand = np.random.RandomState(42)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
y[np.abs((t + 1.0) % 2.0-1) < 0.08] = 1.0 - 0.1
y += 0.01 * rand.randn(len(t))
model = BoxLeastSquares(t, y)
results = model.autopower(0.16)
assert np.allclose(results.period[np.argmax(results.power)],
1.9923406038842544)
periods = np.linspace(1.9, 2.1, 5)
results = model.power(periods, 0.16)
assert np.allclose(
results.power,
np.array([0.01421067, 0.02842475, 0.10867671, 0.05117755, 0.01783253])
)
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_correct_model(data, objective):
t, y, dy, params = data
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 0.1,
np.log(params["period"]) + 0.1, 1000))
results = model.power(periods, params["duration"], objective=objective)
ind = np.argmax(results.power)
for k, v in params.items():
assert_allclose(results[k][ind], v, atol=0.01)
chi = (results.depth[ind]-params["depth"]) / results.depth_err[ind]
assert np.abs(chi) < 1
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
@pytest.mark.parametrize("offset", [False, True])
def test_fast_method(data, objective, offset):
t, y, dy, params = data
if offset:
t = t - params["transit_time"] + params["period"]
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 1,
np.log(params["period"]) + 1, 10))
durations = params["duration"]
results = model.power(periods, durations, objective=objective)
assert_allclose_blsresults(results, model.power(periods, durations,
method="slow",
objective=objective))
def test_input_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * y_unit, dy * u.one)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * u.one, dy * y_unit)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y, dy * y_unit)
model = BoxLeastSquares(t*t_unit, y * u.one, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y * y_unit, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y*y_unit)
assert model.dy is None
def test_period_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
model = BoxLeastSquares(t * t_unit, y * y_unit, dy)
p = model.autoperiod(params["duration"])
assert p.unit == t_unit
p = model.autoperiod(params["duration"] * 24 * u.hour)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
model.autoperiod(params["duration"] * u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], minimum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], maximum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], maximum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5,
maximum_period=1.5)
p2 = model.autoperiod(params["duration"], maximum_period=0.5,
minimum_period=1.5)
assert_quantity_allclose(p, p2)
@pytest.mark.parametrize("method", ["fast", "slow"])
@pytest.mark.parametrize("with_err", [True, False])
@pytest.mark.parametrize("t_unit", [None, u.day])
@pytest.mark.parametrize("y_unit", [None, u.mag])
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_results_units(data, method, with_err, t_unit, y_unit, objective):
t, y, dy, params = data
periods = np.linspace(params["period"]-1.0, params["period"]+1.0, 3)
if t_unit is not None:
t = t * t_unit
if y_unit is not None:
y = y * y_unit
dy = dy * y_unit
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(periods, params["duration"], method=method,
objective=objective)
if t_unit is None:
assert not has_units(results.period)
assert not has_units(results.duration)
assert not has_units(results.transit_time)
else:
assert results.period.unit == t_unit
assert results.duration.unit == t_unit
assert results.transit_time.unit == t_unit
if y_unit is None:
assert not has_units(results.power)
assert not has_units(results.depth)
assert not has_units(results.depth_err)
assert not has_units(results.depth_snr)
assert not has_units(results.log_likelihood)
else:
assert results.depth.unit == y_unit
assert results.depth_err.unit == y_unit
assert results.depth_snr.unit == u.one
if dy is None:
assert results.log_likelihood.unit == y_unit * y_unit
if objective == "snr":
assert results.power.unit == u.one
else:
assert results.power.unit == y_unit * y_unit
else:
assert results.log_likelihood.unit == u.one
assert results.power.unit == u.one
def test_autopower(data):
t, y, dy, params = data
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model = BoxLeastSquares(t, y, dy)
period = model.autoperiod(duration)
results1 = model.power(period, duration)
results2 = model.autopower(duration)
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize("with_units", [True, False])
def test_model(data, with_units):
t, y, dy, params = data
# Compute the model using linear regression
A = np.zeros((len(t), 2))
p = params["period"]
dt = np.abs((t-params["transit_time"]+0.5*p) % p-0.5*p)
m_in = dt < 0.5*params["duration"]
A[~m_in, 0] = 1.0
A[m_in, 1] = 1.0
w = np.linalg.solve(np.dot(A.T, A / dy[:, None]**2),
np.dot(A.T, y / dy**2))
model_true = np.dot(A, w)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
model_true = model_true * u.mag
# Compute the model using the periodogram
pgram = BoxLeastSquares(t, y, dy)
model = pgram.model(t, p, params["duration"], params["transit_time"])
# Make sure that the transit mask is consistent with the model
transit_mask = pgram.transit_mask(t, p, params["duration"],
params["transit_time"])
transit_mask0 = (model - model.max()) < 0.0
assert_allclose(transit_mask, transit_mask0)
assert_quantity_allclose(model, model_true)
@pytest.mark.parametrize("shape", [(1,), (2,), (3,), (2, 3)])
def test_shapes(data, shape):
t, y, dy, params = data
duration = params["duration"]
model = BoxLeastSquares(t, y, dy)
period = np.empty(shape)
period.flat = np.linspace(params["period"]-1, params["period"]+1,
period.size)
if len(period.shape) > 1:
with pytest.raises(ValueError):
results = model.power(period, duration)
else:
results = model.power(period, duration)
for k, v in results.items():
if k == "objective":
continue
assert v.shape == shape
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("with_err", [True, False])
def test_compute_stats(data, with_units, with_err):
t, y, dy, params = data
y_unit = 1
if with_units:
y_unit = u.mag
t = t * u.day
y = y * u.mag
dy = dy * u.mag
params["period"] = params["period"] * u.day
params["duration"] = params["duration"] * u.day
params["transit_time"] = params["transit_time"] * u.day
params["depth"] = params["depth"] * u.mag
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(params["period"], params["duration"],
oversample=1000)
stats = model.compute_stats(params["period"], params["duration"],
params["transit_time"])
# Test the calculated transit times
tt = params["period"] * np.arange(int(t.max() / params["period"]) + 1)
tt += params["transit_time"]
assert_quantity_allclose(tt, stats["transit_times"])
# Test that the other parameters are consistent with the periodogram
assert_allclose(stats["per_transit_count"], np.array([9, 7, 7, 7, 8]))
assert_quantity_allclose(np.sum(stats["per_transit_log_likelihood"]),
results["log_likelihood"])
assert_quantity_allclose(stats["depth"][0], results["depth"])
# Check the half period result
results_half = model.power(0.5*params["period"], params["duration"],
oversample=1000)
assert_quantity_allclose(stats["depth_half"][0], results_half["depth"])
# Skip the uncertainty tests when the input errors are None
if not with_err:
assert_quantity_allclose(stats["harmonic_amplitude"],
0.029945029964964204 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-0.5875918155223113 * y_unit * y_unit)
return
assert_quantity_allclose(stats["harmonic_amplitude"],
0.033027988742275853 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-12407.505922833765)
assert_quantity_allclose(stats["depth"][1], results["depth_err"])
assert_quantity_allclose(stats["depth_half"][1], results_half["depth_err"])
for f, k in zip((1.0, 1.0, 1.0, 0.0),
("depth", "depth_even", "depth_odd", "depth_phased")):
assert np.abs((stats[k][0]-f*params["depth"]) / stats[k][1]) < 1.0
def test_negative_times(data):
t, y, dy, params = data
mu = np.mean(t)
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model1 = BoxLeastSquares(t, y, dy)
results1 = model1.autopower(duration)
# Compute the periodogram with offset (negative) times
model2 = BoxLeastSquares(t - mu, y, dy)
results2 = model2.autopower(duration)
# Shift the transit times back into the unshifted coordinates
results2.transit_time = (results2.transit_time + mu) % results2.period
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize('timedelta', [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy, params = data
# FIXME: There seems to be a numerical stability issue in that if we run
# the algorithm with the same values but offset in time, the transit_time
# is not offset by a fixed amount. To avoid this issue in this test, we
# make sure the first time is also the smallest so that internally the
# values of the relative time should be the same.
t[0] = 0.
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same.
start = Time('2019-05-04T12:34:56')
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of BoxLeastSquares, one with absolute and one
# with relative times.
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(trel, y, dy)
results1 = bls1.autopower(0.16 * u.day)
results2 = bls2.autopower(0.16 * u.day)
# All the results should match except transit time which should be
# absolute instead of relative in the first case.
for key in results1:
if key == 'transit_time':
assert_quantity_allclose((results1[key] - start).to(u.day), results2[key])
elif key == 'objective':
assert results1[key] == results2[key]
else:
assert_allclose(results1[key], results2[key])
# Check that model evaluation works fine
model1 = bls1.model(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
model2 = bls2.model(trel, 0.2 * u.day, 0.05 * u.day, TimeDelta(1 * u.day))
assert_quantity_allclose(model1, model2)
# Check model validation
with pytest.raises(TypeError) as exc:
bls1.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t_model was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t_model was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check compute_stats
stats1 = bls1.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
stats2 = bls2.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
for key in stats1:
if key == 'transit_times':
assert_quantity_allclose((stats1[key] - start).to(u.day), stats2[key], atol=1e-10 * u.day)
elif key.startswith('depth'):
for value1, value2 in zip(stats1[key], stats2[key]):
assert_quantity_allclose(value1, value2)
else:
assert_allclose(stats1[key], stats2[key])
# Check compute_stats validation
with pytest.raises(TypeError) as exc:
bls1.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check transit_mask
mask1 = bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
mask2 = bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert_equal(mask1, mask2)
# Check transit_mask validation
with pytest.raises(TypeError) as exc:
bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
|
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from lxml import etree
import six
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.integrated import integrated_helpers
class NoMatch(test.TestingException):
pass
class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
ctype = 'json'
all_extensions = False
extension_name = None
def _pretty_data(self, data):
if self.ctype == 'json':
data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
indent=4)
else:
if data is None:
# Likely from missing XML file.
return ""
xml = etree.XML(data)
data = etree.tostring(xml, encoding="UTF-8",
xml_declaration=True, pretty_print=True)
return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
def _objectify(self, data):
if not data:
return {}
if self.ctype == 'json':
# NOTE(vish): allow non-quoted replacements to survive json
data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
return jsonutils.loads(data)
else:
def to_dict(node):
ret = {}
if node.items():
ret.update(dict(node.items()))
if node.text:
ret['__content__'] = node.text
if node.tag:
ret['__tag__'] = node.tag
if node.nsmap:
ret['__nsmap__'] = node.nsmap
for element in node:
ret.setdefault(node.tag, [])
ret[node.tag].append(to_dict(element))
return ret
return to_dict(etree.fromstring(data))
@classmethod
def _get_sample_path(cls, name, dirname, suffix=''):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
if cls.extension_name:
alias = importutils.import_class(cls.extension_name).alias
parts.append(alias)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname, "../../../doc"))
return cls._get_sample_path(name, dirname)
@classmethod
def _get_template(cls, name):
dirname = os.path.dirname(os.path.abspath(__file__))
return cls._get_sample_path(name, dirname, suffix='.tpl')
def _read_template(self, name):
template = self._get_template(name)
with open(template) as inf:
return inf.read().strip()
def _write_template(self, name, data):
with open(self._get_template(name), 'w') as outf:
outf.write(data)
def _write_sample(self, name, data):
with open(self._get_sample(name), 'w') as outf:
outf.write(data)
def _compare_result(self, subs, expected, result, result_str):
matched_value = None
if isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch(_('%(result_str)s: %(result)s is not a dict.')
% {'result_str': result_str, 'result': result})
ex_keys = sorted(expected.keys())
res_keys = sorted(result.keys())
if ex_keys != res_keys:
ex_delta = []
res_delta = []
for key in ex_keys:
if key not in res_keys:
ex_delta.append(key)
for key in res_keys:
if key not in ex_keys:
res_delta.append(key)
raise NoMatch(
_('Dictionary key mismatch:\n'
'Extra key(s) in template:\n%(ex_delta)s\n'
'Extra key(s) in %(result_str)s:\n%(res_delta)s\n') %
{'ex_delta': ex_delta, 'result_str': result_str,
'res_delta': res_delta})
for key in ex_keys:
res = self._compare_result(subs, expected[key], result[key],
result_str)
matched_value = res or matched_value
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
_('%(result_str)s: %(result)s is not a list.') %
{'result_str': result_str, 'result': result})
expected = expected[:]
extra = []
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
matched_value = self._compare_result(subs, ex_obj,
res_obj,
result_str)
del expected[i]
break
except NoMatch:
pass
else:
extra.append(res_obj)
error = []
if expected:
error.append(_('Extra list items in template:'))
error.extend([repr(o) for o in expected])
if extra:
error.append(_('Extra list items in %(result_str)s:') %
{'result_str': result_str})
error.extend([repr(o) for o in extra])
if error:
raise NoMatch('\n'.join(error))
elif isinstance(expected, six.string_types) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
expected = expected.replace(char, '\\%s' % char)
# NOTE(vish): special handling of subs that are not quoted. We are
# expecting an int but we had to pass in a string
# so the json would parse properly.
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(
_('Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s') %
{'expected': expected, 'result_str': result_str,
'result': result})
try:
matched_value = match.group('id')
except IndexError:
if match.groups():
matched_value = match.groups()[0]
else:
if isinstance(expected, six.string_types):
# NOTE(danms): Ignore whitespace in this comparison
expected = expected.strip()
result = result.strip()
if expected != result:
raise NoMatch(
_('Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: '
'%(result)s') % {'expected': expected,
'result_str': result_str,
'result': result})
return matched_value
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
This may be needed by some tests to convert exact matches expected
from the server into pattern matches to verify what is in the
sample file.
If there are no changes to be made, subs is returned unharmed.
"""
return subs
def _verify_response(self, name, subs, response, exp_code):
self.assertEqual(response.status, exp_code)
response_data = response.read()
response_data = self._pretty_data(response_data)
if not os.path.exists(self._get_template(name)):
self._write_template(name, response_data)
template_data = response_data
else:
template_data = self._read_template(name)
if (self.generate_samples and
not os.path.exists(self._get_sample(name))):
self._write_sample(name, response_data)
sample_data = response_data
else:
with file(self._get_sample(name)) as sample:
sample_data = sample.read()
try:
template_data = self._objectify(template_data)
response_data = self._objectify(response_data)
response_result = self._compare_result(subs, template_data,
response_data, "Response")
# NOTE(danms): replace some of the subs with patterns for the
# doc/api_samples check, which won't have things like the
# correct compute host name. Also let the test do some of its
# own generalization, if necessary
vanilla_regexes = self._get_regexes()
subs['compute_host'] = vanilla_regexes['host_name']
subs['id'] = vanilla_regexes['id']
subs = self.generalize_subs(subs, vanilla_regexes)
sample_data = self._objectify(sample_data)
self._compare_result(subs, template_data, sample_data, "Sample")
return response_result
except NoMatch:
raise
def _get_host(self):
return 'http://openstack.example.com'
def _get_glance_host(self):
return 'http://glance.openstack.example.com'
def _get_regexes(self):
if self.ctype == 'json':
text = r'(\\"|[^"])*'
else:
text = r'[^<]*'
return {
# NOTE(treinish): Could result in a false positive, but it
# shouldn't be an issue for this case.
'timestamp': '\d{4}-[0,1]\d-[0-3]\d[ ,T]'
'\d{2}:\d{2}:\d{2}'
'(Z|(\+|-)\d{2}:\d{2}|\.\d{6}|)',
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12})',
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}',
'reservation_id': 'r-[0-9a-zA-Z]{8}',
'private_key': '-----BEGIN RSA PRIVATE KEY-----'
'[a-zA-Z0-9\n/+=]*'
'-----END RSA PRIVATE KEY-----',
'public_key': 'ssh-rsa[ a-zA-Z0-9/+=]*'
'Generated by Nova',
'fingerprint': '([0-9a-f]{2}:){15}[0-9a-f]{2}',
'host': self._get_host(),
'host_name': '[0-9a-z]{32}',
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
'int': '[0-9]+',
}
def _get_response(self, url, method, body=None, strip_version=False):
headers = {}
headers['Content-Type'] = 'application/' + self.ctype
headers['Accept'] = 'application/' + self.ctype
return self.api.api_request(url, body=body, method=method,
headers=headers, strip_version=strip_version)
def _do_get(self, url, strip_version=False):
return self._get_response(url, 'GET', strip_version=strip_version)
def _do_post(self, url, name, subs, method='POST'):
body = self._read_template(name) % subs
sample = self._get_sample(name)
if self.generate_samples and not os.path.exists(sample):
self._write_sample(name, body)
return self._get_response(url, method, body)
def _do_put(self, url, name, subs):
return self._do_post(url, name, subs, method='PUT')
def _do_delete(self, url):
return self._get_response(url, 'DELETE')
|
|
#!/usr/bin/env python
#
# Copyright 2012 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This software exports a splunk index using the streaming export endpoint
using a parameterized chunking mechanism.
"""
# installation support files
from __future__ import absolute_import
from __future__ import print_function
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
import time
from os import path
# splunk support files
from splunklib.binding import connect
try:
from utils import parse
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
# hidden file
OUTPUT_FILE = "./export.out"
OUTPUT_MODE = "xml"
OUTPUT_MODES = ["csv", "xml", "json"]
CLIRULES = {
'end': {
'flags': ["--endtime"],
'default': "",
'help': "Start time of export (default is start of index)"
},
'index': {
'flags': ["--index"],
'default': "*",
'help': "Index to export (default is all user defined indices)"
},
'omode': {
'flags': ["--omode"],
'default': OUTPUT_MODE,
'help': "output format %s default is %s" % (OUTPUT_MODES, OUTPUT_MODE)
},
'output': {
'flags': ["--output"],
'default': OUTPUT_FILE,
'help': "Output file name (default is %s)" % OUTPUT_FILE
},
'recover': {
'flags': ["--recover"],
'default': False,
'help': "Export attempts to recover from end of existing export"
},
'search': {
'flags': ["--search"],
'default': "search *",
'help': "search string (default 'search *')"
},
'start': {
'flags': ["--starttime"],
'default': "",
'help': "Start time of export (default is start of index)"
}
}
def get_csv_next_event_start(location, event_buffer):
""" determin the event start and end of *any* valid event """
start = -1
end = -1
event_start = event_buffer.find("\n", location + 1)
event_end = event_buffer.find('"\n', event_start + 1)
while (event_end > 0):
parts = event_buffer[event_start:event_end].split(",")
# test parts 0 and 1 of CSV. Format should be time.qqq, anything
# else is not time stamp to keep moving.
try:
int(parts[0].replace('\n',""))
timestamp = parts[1].replace('"', "")
timeparts = timestamp.split('.')
int(timeparts[0])
int(timeparts[1])
return (event_start, event_end)
except:
event_start = event_buffer.find("\n", event_end + 2)
event_end = event_buffer.find('"\n', event_start + 1)
return (start, end)
def get_csv_event_start(event_buffer):
""" get the event start of an event that is different (in time)from the
adjoining event, in CSV format """
(start, end) = get_csv_next_event_start(0, event_buffer)
if start < 0:
return (-1, -1, "")
print(event_buffer[start:end])
tstart = event_buffer.find(",", start)
tend = event_buffer.find(",", tstart+1)
print(event_buffer[tstart:tend])
last_time = event_buffer[tstart+1:tend].replace('"',"")
while end > 0:
(start, end) = get_csv_next_event_start(start, event_buffer)
if end < 0:
return (-1, -1, "")
tstart = event_buffer.find(",", start)
tend = event_buffer.find(",", tstart+1)
this_time = event_buffer[tstart+1:tend].replace('"',"")
if this_time != last_time:
return (start, end + 1, last_time)
return (-1, -1, "")
def get_xml_event_start(event_buffer):
""" get the event start of an event that is different (in time)from the
adjoining event, in XML format """
result_pattern = "<result offset='"
time_key_pattern = "<field k='_time'>"
time_start_pattern = "<value><text>"
time_end_pattern = "<"
event_end_pattern = "</result>"
event_start = event_buffer.find(result_pattern)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
return (-1, -1, "")
time_key_start = event_buffer.find(time_key_pattern, event_start)
time_start = event_buffer.find(time_start_pattern, time_key_start) + \
len(time_start_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
last_time = event_buffer[time_start:time_end]
# wallk through events until time changes
event_start = event_end
while event_end > 0:
event_start = event_buffer.find(result_pattern, event_start + 1)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
return (-1, -1, "")
time_key_start = event_buffer.find(time_key_pattern, event_start)
time_start = event_buffer.find(time_start_pattern, time_key_start)
time_end = event_buffer.find(time_end_pattern, time_start)
this_time = event_buffer[time_start:time_end]
if this_time != last_time:
return (event_start, event_end, last_time)
event_start = event_end
return (-1, -1, "")
def get_json_event_start(event_buffer):
""" get the event start of an event that is different (in time)from the
adjoining event, in XML format """
event_start_pattern = '{"_cd":"'
time_key_pattern = '"_time":"'
time_end_pattern = '"'
event_end_pattern = '"},\n'
event_end_pattern2 = '"}[]' # old json output format bug
event_start = event_buffer.find(event_start_pattern)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
event_end = event_buffer.find(event_end_pattern2, event_start) + \
len(event_end_pattern2)
if (event_end < 0):
return (-1, -1, "")
time_start = event_buffer.find(time_key_pattern, event_start) + \
len(time_key_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
last_time = event_buffer[time_start:time_end]
event_start = event_end
while event_end > 0:
event_start = event_buffer.find(event_start_pattern, event_start + 1)
event_end = event_buffer.find(event_end_pattern, event_start) + \
len(event_end_pattern)
if event_end < 0:
event_end = event_buffer.find(event_end_pattern2, event_start) + \
len(event_end_pattern2)
if (event_end < 0):
return (-1, -1, "")
time_start = event_buffer.find(time_key_pattern, event_start) + \
len(time_key_pattern)
time_end = event_buffer.find(time_end_pattern, time_start + 1)
this_time = event_buffer[time_start:time_end]
if this_time != last_time:
return (event_start-2, event_end, last_time)
event_start = event_end
return (-1, -1, "")
def get_event_start(event_buffer, event_format):
""" dispatch event start method based on event format type """
if event_format == "csv":
return get_csv_event_start(event_buffer)
elif event_format == "xml":
return get_xml_event_start(event_buffer)
else:
return get_json_event_start(event_buffer)
def recover(options):
""" recover from an existing export run. We do this by
finding the last time change between events, truncate the file
and restart from there """
event_format = options.kwargs['omode']
buffer_size = 64*1024
fpd = open(options.kwargs['output'], "r+")
fpd.seek(0, 2) # seek to end
fptr = max(fpd.tell() - buffer_size, 0)
fptr_eof = 0
while (fptr > 0):
fpd.seek(fptr)
event_buffer = fpd.read(buffer_size)
(event_start, next_event_start, last_time) = \
get_event_start(event_buffer, event_format)
if (event_start != -1):
fptr_eof = event_start + fptr
break
fptr = fptr - buffer_size
if fptr < 0:
# didn't find a valid event, so start over
fptr_eof = 0
last_time = 0
# truncate file here
fpd.truncate(fptr_eof)
fpd.seek(fptr_eof)
fpd.write("\n")
fpd.close()
return last_time
def cleanup_tail(options):
""" cleanup the tail of a recovery """
if options.kwargs['omode'] == "csv":
options.kwargs['fd'].write("\n")
elif options.kwargs['omode'] == "xml":
options.kwargs['fd'].write("\n</results>\n")
else:
options.kwargs['fd'].write("\n]\n")
def export(options, service):
""" main export method: export any number of indexes """
start = options.kwargs['start']
end = options.kwargs['end']
fixtail = options.kwargs['fixtail']
once = True
squery = options.kwargs['search']
squery = squery + " index=%s" % options.kwargs['index']
if (start != ""):
squery = squery + " earliest_time=%s" % start
if (end != ""):
squery = squery + " latest_time=%s" % end
success = False
while not success:
# issue query to splunkd
# count=0 overrides the maximum number of events
# returned (normally 50K) regardless of what the .conf
# file for splunkd says.
result = service.get('search/jobs/export',
search=squery,
output_mode=options.kwargs['omode'],
timeout=60,
earliest_time="0.000",
time_format="%s.%Q",
count=0)
if result.status != 200:
print("warning: export job failed: %d, sleep/retry" % result.status)
time.sleep(60)
else:
success = True
# write export file
while True:
if fixtail and once:
cleanup_tail(options)
once = False
content = result.body.read()
if len(content) == 0: break
options.kwargs['fd'].write(content)
options.kwargs['fd'].write("\n")
options.kwargs['fd'].flush()
def main():
""" main entry """
options = parse(sys.argv[1:], CLIRULES, ".splunkrc")
if options.kwargs['omode'] not in OUTPUT_MODES:
print("output mode must be one of %s, found %s" % (OUTPUT_MODES,
options.kwargs['omode']))
sys.exit(1)
service = connect(**options.kwargs)
if path.exists(options.kwargs['output']):
if not options.kwargs['recover']:
print("Export file %s exists, and recover option nor specified" % \
options.kwargs['output'])
sys.exit(1)
else:
options.kwargs['end'] = recover(options)
options.kwargs['fixtail'] = True
openmode = "a"
else:
openmode = "w"
options.kwargs['fixtail'] = False
try:
options.kwargs['fd'] = open(options.kwargs['output'], openmode)
except IOError:
print("Failed to open output file %s w/ mode %s" % \
(options.kwargs['output'], openmode))
sys.exit(1)
export(options, service)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
import sys, time, gc
from omniORB import CORBA, PortableServer
import omniORB
import TypeTest, TypeTest__POA
failed = []
output = 1
exc_info = 0
def tstart(s):
global current_test, output
current_test = s
if output:
sys.stdout.write(s + ": ")
sys.stdout.flush()
def tresult(s):
if output:
sys.stdout.write(str(s))
sys.stdout.flush()
def tpass():
if output:
sys.stdout.write(" pass\n")
sys.stdout.flush()
def tfail():
global failed, current_test
failed.append(current_test)
if output:
sys.stdout.write(" fail\n")
sys.stdout.flush()
def tdone():
global failed
if len(failed) > 0:
print "\nFailed tests:"
for fail in failed:
print " ", fail
else:
if output:
print "\nAll tests passed."
def exc(ex):
if exc_info:
sys.stdout.write(" %s\n" % ex)
sys.stdout.flush()
op_called = 0
j_deleted = 0
class J_i (TypeTest__POA.J):
def __init__(self):
global op_called, j_deleted
op_called = j_deleted = 0
def op(self):
global op_called
op_called = 1
def __del__(self):
global j_deleted
j_deleted = 1
def doTests(orb, poa, io):
global op_called, failed, exc_info
if "-e" in sys.argv:
exc_info = 1
failed = []
tstart("Void")
r = io.simple1()
if r is None: tpass()
else: tfail()
tstart("Oneway")
r = io.simple2()
if r is None: tpass()
else: tfail()
tstart("Short")
r = io.simple3(42)
if r == 42: tpass()
else: tfail()
tstart("UShort")
r = io.simple4(42)
if r == 42: tpass()
else: tfail()
tstart("Long")
r = io.simple5(42)
if r == 42: tpass()
else: tfail()
tstart("ULong with long")
r = io.simple6(42L)
if r == 42L: tpass()
else: tfail()
tstart("ULong with int")
r = io.simple6(42)
if r == 42: tpass()
else: tfail()
tstart("Float with float")
r = io.simple7(1.234)
tresult(r)
tpass()
tstart("Float with int")
r = io.simple7(42)
if r == 42: tpass()
else: tfail()
tstart("Double with float")
r = io.simple8(1.234)
if r == 1.234: tpass()
else: tfail()
tstart("Double with int")
r = io.simple8(42)
if r == 42: tpass()
else: tfail()
tstart("Boolean")
r = io.simple9(1)
if r == 1: tpass()
else: tfail()
tstart("Char")
r = io.simple10("a")
if r == "a": tpass()
else: tfail()
tstart("Octet")
r = io.simple11(123)
if r == 123: tpass()
else: tfail()
tstart("Invalid arguments")
ok = 1
try:
io.simple1(5)
ok = 0
tresult("-")
except TypeError:
tresult("+")
try:
io.simple3(1.234)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.simple3("Hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.simple3(0x8123)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.simple4(-1)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.simple6(1233456789012345L)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.simple10("Hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.simple10(65)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.simple11(1234)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.simple11(-1)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Combination 1")
r = io.combine1(123, 456, 789L)
if r is None: tpass()
else: tfail()
tstart("Combination 2")
r = io.combine2(123, 456L)
tresult(r)
tpass()
tstart("Short attr")
io._set_sattr1(123)
tresult("+")
r = io._get_sattr1()
if r == 123: tpass()
else: tfail()
tstart("UShort attr")
io._set_sattr2(123)
tresult("+")
r = io._get_sattr2()
if r == 123: tpass()
else: tfail()
tstart("Long attr")
io._set_sattr3(123)
tresult("+")
r = io._get_sattr3()
if r == 123: tpass()
else: tfail()
tstart("ULong attr")
io._set_sattr4(123L)
tresult("+")
r = io._get_sattr4()
if r == 123L: tpass()
else: tfail()
tstart("Float attr")
io._set_sattr5(1.234)
tresult("+")
r = io._get_sattr5()
tresult(r)
tpass()
tstart("Double attr")
io._set_sattr6(1.234)
tresult("+")
r = io._get_sattr6()
if r == 1.234: tpass()
else: tfail()
tstart("Boolean attr")
io._set_sattr7(0)
tresult("+")
r = io._get_sattr7()
if r == 0: tpass()
else: tfail()
tstart("Char attr")
io._set_sattr8("a")
tresult("+")
r = io._get_sattr8()
if r == "a": tpass()
else: tfail()
tstart("Octet attr")
io._set_sattr9(123)
tresult("+")
r = io._get_sattr9()
if r == 123: tpass()
else: tfail()
tstart("Short readonly attr")
r = io._get_rattr1()
if r == 123: tpass()
else: tfail()
tstart("UShort readonly attr")
r = io._get_rattr2()
if r == 123: tpass()
else: tfail()
tstart("Long readonly attr")
r = io._get_rattr3()
if r == 123: tpass()
else: tfail()
tstart("ULong readonly attr")
r = io._get_rattr4()
if r == 123L: tpass()
else: tfail()
tstart("Float readonly attr")
r = io._get_rattr5()
tresult(r)
tpass()
tstart("Double readonly attr")
r = io._get_rattr6()
if r == 1.234: tpass()
else: tfail()
tstart("Boolean readonly attr")
r = io._get_rattr7()
if r == 0: tpass()
else: tfail()
tstart("Char readonly attr")
r = io._get_rattr8()
if r == "a": tpass()
else: tfail()
tstart("Octet readonly attr")
r = io._get_rattr9()
if r == 123: tpass()
else: tfail()
tstart("Invalid attributes")
ok = 1
try:
io._set_sattr1("hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io._set_sattr2(1.234)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io._set_sattr3("hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io._set_sattr4("hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io._set_sattr5("hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io._set_sattr6("hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io._set_sattr7("hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io._set_sattr8("hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io._set_sattr9("hello")
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Attributes by property")
tstart("Short attr")
io.sattr1 = 123
tresult("+")
r = io.sattr1
if r == 123: tpass()
else: tfail()
tstart("UShort attr")
io.sattr2 = 123
tresult("+")
r = io.sattr2
if r == 123: tpass()
else: tfail()
tstart("Long attr")
io.sattr3 = 123
tresult("+")
r = io.sattr3
if r == 123: tpass()
else: tfail()
tstart("ULong attr")
io.sattr4 = 123L
tresult("+")
r = io.sattr4
if r == 123L: tpass()
else: tfail()
tstart("Float attr")
io.sattr5 = 1.234
tresult("+")
r = io.sattr5
tresult(r)
tpass()
tstart("Double attr")
io.sattr6 = 1.234
tresult("+")
r = io.sattr6
if r == 1.234: tpass()
else: tfail()
tstart("Boolean attr")
io.sattr7 = 0
tresult("+")
r = io.sattr7
if r == 0: tpass()
else: tfail()
tstart("Char attr")
io.sattr8 = "a"
tresult("+")
r = io.sattr8
if r == "a": tpass()
else: tfail()
tstart("Octet attr")
io.sattr9 = 123
tresult("+")
r = io.sattr9
if r == 123: tpass()
else: tfail()
tstart("Short readonly attr")
r = io.rattr1
if r == 123: tpass()
else: tfail()
tstart("UShort readonly attr")
r = io.rattr2
if r == 123: tpass()
else: tfail()
tstart("Long readonly attr")
r = io.rattr3
if r == 123: tpass()
else: tfail()
tstart("ULong readonly attr")
r = io.rattr4
if r == 123L: tpass()
else: tfail()
tstart("Float readonly attr")
r = io.rattr5
tresult(r)
tpass()
tstart("Double readonly attr")
r = io.rattr6
if r == 1.234: tpass()
else: tfail()
tstart("Boolean readonly attr")
r = io.rattr7
if r == 0: tpass()
else: tfail()
tstart("Char readonly attr")
r = io.rattr8
if r == "a": tpass()
else: tfail()
tstart("Octet readonly attr")
r = io.rattr9
if r == 123: tpass()
else: tfail()
tstart("Invalid attributes")
ok = 1
try:
io.sattr1 = "hello"
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.sattr2 = 1.234
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.sattr3 = "hello"
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.sattr4 = "hello"
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.sattr5 = "hello"
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.sattr6 = "hello"
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.sattr7 = "hello"
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.sattr8 = "hello"
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
io.sattr9 = "hello"
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Property attribute")
io.propattr = "Test attribute"
tresult("+")
r = io.propattr
if r == "Test attribute": tpass()
else: tfail()
ji = J_i()
jo = ji._this()
tstart("Known Object")
r = io.complex1(jo)
if r._is_equivalent(jo): tpass()
else: tfail()
tstart("Unknown Object")
try:
ns = orb.resolve_initial_references("NameService")
r = io.complex1(ns)
if r._is_equivalent(ns): tpass()
else: tfail()
except CORBA.NO_RESOURCES:
tresult("skip")
tpass()
tstart("Nil Object")
r = io.complex1(None)
if r is None: tpass()
else: tfail()
tstart("Known interface")
r = io.complex2(jo)
ok = 1
if r._is_equivalent(jo):
tresult("+")
else:
ok = 0
tresult("-")
if op_called:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Deactivate object")
id = poa.reference_to_id(jo)
del ji, jo
r = None
poa.deactivate_object(id)
time.sleep(0.1) # In case a separate thread is doing the deletion
if j_deleted: tpass()
else: tfail()
tstart("Struct S1")
s1 = TypeTest.S1(1, 2, 3, 4, 5.6, 7, 1, "a", 9)
r = io.complex3(s1)
if r.a == 1 and \
r.b == 2 and \
r.c == 3 and \
r.d == 4 and \
r.e > 5.0 and r.e < 6.0 and \
r.f == 7 and \
r.g == 1 and \
r.h == "a" and \
r.i == 9:
tpass()
else:
tfail()
tstart("Invalid Struct S1")
s1 = TypeTest.S1(1, 2, 3, 4, 5.6, 7.8, 1, "ab", 9)
try:
r = io.complex3(s1)
tfail()
except CORBA.BAD_PARAM, ex:
exc(ex)
tpass()
tstart("Struct S1 in class")
class mys1:
a = 1
b = 2
c = 3
d = 4
e = 5.6
f = 7.8
g = 1
h = "a"
i = 9
r = io.complex3(mys1)
if r.a == 1 and \
r.b == 2 and \
r.c == 3 and \
r.d == 4 and \
r.e > 5.0 and r.e < 6.0 and \
r.f == 7.8 and \
r.g == 1 and \
r.h == "a" and \
r.i == 9:
tpass()
else:
tfail()
tstart("Struct S1 in non-S1 instance")
r = io.complex3(mys1())
if r.a == 1 and \
r.b == 2 and \
r.c == 3 and \
r.d == 4 and \
r.e > 5.0 and r.e < 6.0 and \
r.f == 7.8 and \
r.g == 1 and \
r.h == "a" and \
r.i == 9:
tpass()
else:
tfail()
tstart("Union U1")
ok = 1
u = TypeTest.U1(a=123)
r = io.complex4(u)
if r.a == 123:
tresult("+")
else:
ok = 0
tresult("-")
u = TypeTest.U1(b=42)
r = io.complex4(u)
if r.b == 42 and u._d == r._d:
tresult("+" + str(u._d) + str(r._d))
else:
ok = 0
tresult("-" + str(u._d) + str(r._d))
u = TypeTest.U1(1, 42)
r = io.complex4(u)
if r.b == 42 and u._d == r._d:
tresult("+" + str(u._d) + str(r._d))
else:
ok = 0
tresult("-" + str(u._d) + str(r._d))
u = TypeTest.U1(c=5)
r = io.complex4(u)
if r.c == 5:
tresult("+")
else:
ok = 0
tresult("-")
u = TypeTest.U1(42, None)
r = io.complex4(u)
if r._d == 42:
tresult("+" + str(r._v))
else:
ok = 0
tresult("-" + str(r._v))
u = TypeTest.U1(42, "Hello")
r = io.complex4(u)
if r._d == 42:
tresult("+" + str(r._v))
else:
ok = 0
tresult("-" + str(r._v))
if ok: tpass()
else: tfail()
tstart("Union U2")
ok = 1
u = TypeTest.U2(a=123)
r = io.complex5(u)
if r.a == 123:
tresult("+")
else:
ok = 0
tresult("-")
u = TypeTest.U2(b=42)
r = io.complex5(u)
if r.b == 42 and u._d == r._d:
tresult("+" + str(u._d) + str(r._d))
else:
ok = 0
tresult("-" + str(u._d) + str(r._d))
u = TypeTest.U2("b", 42)
r = io.complex5(u)
if r.b == 42 and u._d == r._d:
tresult("+" + str(u._d) + str(r._d))
else:
ok = 0
tresult("-" + str(u._d) + str(r._d))
u = TypeTest.U2(c=5)
r = io.complex5(u)
if r.c == 5:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Invalid Unions")
s1 = TypeTest.S1(1, 2, 3, 4, 5.6, 7.8, 1, "a", 9)
ok = 1
try:
r = io.complex4(s1)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
u = TypeTest.U2(b = 0x81234)
r = io.complex5(u)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Enum")
ok = 1
r = io.complex6(TypeTest.one)
if r == TypeTest.one:
tresult("+")
else:
tresult("-")
ok = 0
r = io.complex6(TypeTest.four)
if r == TypeTest.four:
tresult("+")
else:
tresult("-")
ok = 0
if ok: tpass()
else: tfail()
tstart("Invalid enum")
ok = 1
try:
r = io.complex6(1234)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
r = io.complex6(TypeTest.five)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
try:
r = io.complex6(TypeTest.nine)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("String")
ok = 1
s = "Hello there"
r = io.complex7(s)
if r == s:
tresult("+")
else:
tresult("-")
ok = 0
s = "This is a much longer string with lots of stuff in it. Blah blah blah"
r = io.complex7(s)
if r == s:
tresult("+")
else:
tresult("-")
ok = 0
s = "This is a string with a \0 in it."
try:
r = io.complex7(s)
tresult("-")
ok = 0
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Bounded string")
ok = 1
s = "Hello"
r = io.complex8(s)
if r == s:
tresult("+")
else:
tresult("-")
ok = 0
s = "This is a much longer string with lots of stuff in it. Blah blah blah"
try:
r = io.complex8(s)
tresult("-")
ok = 0
except CORBA.MARSHAL:
tresult("+")
s = "str\0ing"
try:
r = io.complex8(s)
tresult("-")
ok = 0
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Sequences")
ok = 1
s = [1, 2, 3, 4, 5]
r = io.complex9(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (-6, 7, -8, 9, 10, 11)
r = io.complex9(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = [1, 2, 3, 4, 5]
r = io.complex10(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (1, 2, -3, 4, 5)
r = io.complex11(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = [1L, 2, 3L, 4L, 5]
r = io.complex12(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (1.2, 2, -3.4, 4.5, 5.6)
r = io.complex13(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = [1.2, 2, -3.4, 4.5, 5.6]
r = io.complex14(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = (1, 0, 1, 1, 0, 3, 1, 1, 1)
r = io.complex15(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = "This is a sequence of char with a \0 in it"
r = io.complex16(s)
if r == s:
tresult("+")
else:
ok = 0
tresult("-")
s = "This is a sequence of octet, which is remarkably similar to a sequence of char. It also has a \0 in it."
r = io.complex17(s)
if r == s:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Invalid sequences")
ok = 1
s = [1, 2, "Hi", 4, 5]
try:
r = io.complex9(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (-6, 7, -8, 9, 10, 11)
try:
r = io.complex10(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (1, 2, -3.5, 4, 5)
try:
r = io.complex11(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = [1L, 2, -3L, 4L, 5]
try:
r = io.complex12(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (1.2, 2, -3.4, None, 5.6)
try:
r = io.complex13(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = [1.2, "Hi", -3.4, 4.5, 5.6]
try:
r = io.complex14(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (1, 0, 1, 1.2, 0, 3, 1, 1, 1)
try:
r = io.complex15(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = [1, 2, 3, 4, 5]
try:
r = io.complex16(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = [1, 2, 3, 4, 5]
try:
r = io.complex17(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Bounded Sequences")
ok = 1
s = [1, 2, 3, 4, 5]
r = io.complex18(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (-6, 7, -8, 9, 10, 11)
r = io.complex18(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = [1, 2, 3, 4, 5]
r = io.complex19(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (1, 2, -3, 4, 5)
r = io.complex20(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = [1L, 2, 3L, 4L, 5]
r = io.complex21(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (1.2, 2, -3.4, 4.5, 5.6)
r = io.complex22(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = [1.2, 2, -3.4, 4.5, 5.6]
r = io.complex23(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = (1, 0, 1, 1, 0, 3, 1, 1, 1, 1)
r = io.complex24(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = "This is a "
r = io.complex25(s)
if r == s:
tresult("+")
else:
ok = 0
tresult("-")
s = "This is a "
r = io.complex26(s)
if r == s:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Invalid bounded sequences")
ok = 1
s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
try:
r = io.complex18(s)
ok = 0
tresult("-")
except CORBA.MARSHAL:
tresult("+")
s = (-6, 7, -8, 9, 10, 11)
try:
r = io.complex19(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (1, 2, -3, 4, 5, 6, 7, 8, 9, 10, 12, 134)
try:
r = io.complex20(s)
ok = 0
tresult("-")
except CORBA.MARSHAL:
tresult("+")
s = [1L, 2, 3L, 4L, 5, 6L, 7L, 8, 9, 10, 11, 12]
try:
r = io.complex21(s)
ok = 0
tresult("-")
except CORBA.MARSHAL:
tresult("+")
s = (1.2, 2, -3.4, 4.5, 5.6, 8, 9, 10, 11, 12, 14)
try:
r = io.complex22(s)
ok = 0
tresult("-")
except CORBA.MARSHAL:
tresult("+")
s = [1.2, 2.3, -3.4, 4.5, 5.6, 7, 8, 9, 10, 11, 12, 145]
try:
r = io.complex23(s)
ok = 0
tresult("-")
except CORBA.MARSHAL:
tresult("+")
s = (1, 0, 1, 1.2, 0, 3, 1, 1, 1)
try:
r = io.complex24(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = "This is a long sequence<char>"
try:
r = io.complex25(s)
ok = 0
tresult("-")
except CORBA.MARSHAL:
tresult("+")
s = "This is a long sequence<octet>"
try:
r = io.complex26(s)
ok = 0
tresult("-")
except CORBA.MARSHAL:
tresult("+")
if ok: tpass()
else: tfail()
tstart("Arrays")
ok = 1
s = [1, 2, -3, 4, 5]
r = io.complex27(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = [1, 2, 3, 4, 5]
r = io.complex28(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (1, 2, -3, 4, 5)
r = io.complex29(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = [1L, 2, 3L, 4L, 5]
r = io.complex30(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (1.2, 2, -3.4, 4.5, 5.6)
r = io.complex31(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = [1.2, 2, -3.4, 4.5, 5.6]
r = io.complex32(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = (1, 0, 1, 3, 0)
r = io.complex33(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = "12345"
r = io.complex34(s)
if r == s:
tresult("+")
else:
ok = 0
tresult("-")
s = "ab\0de"
r = io.complex35(s)
if r == s:
tresult("+")
else:
ok = 0
tresult("-")
s = ["12345", "hello", "abc", "aa", "This is a long string"]
r = io.complex36(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Invalid arrays")
ok = 1
s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
try:
r = io.complex27(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (-6, 7, -8, 9, 10)
try:
r = io.complex28(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (1, 2, -3, 4, 5, 6, 7, 8, 9, 10, 12, 134)
try:
r = io.complex29(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = [1L, 2, 3L, 4L, 5, 6L, 7L, 8, 9, 10, 11, 12]
try:
r = io.complex30(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (1.2, 2, -3.4, 4.5, 5.6, 8, 9, 10, 11, 12, 14)
try:
r = io.complex31(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = [1.2, 2.3, -3.4, 4.5, 5.6, 7, 8, 9, 10, 11, 12, 145]
try:
r = io.complex32(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = (1, 0, 1, 1.2, 0, 3, 1, 1, 1)
try:
r = io.complex33(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = "This is a long sequence<char>"
try:
r = io.complex34(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = "Hi!"
try:
r = io.complex35(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = ["12345", 12345, "abc", "", "This is a long string"]
try:
r = io.complex36(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Short with Long")
r = io.simple3(42L)
if r == 42: tpass()
else: tfail()
tstart("UShort with Long")
r = io.simple4(42L)
if r == 42: tpass()
else: tfail()
tstart("Long with Long")
r = io.simple5(42L)
if r == 42: tpass()
else: tfail()
tstart("Float with Long")
r = io.simple7(42L)
if r == 42: tpass()
else: tfail()
tstart("Double with Long")
r = io.simple8(42L)
if r == 42: tpass()
else: tfail()
tstart("Octet with Long")
r = io.simple11(123L)
if r == 123: tpass()
else: tfail()
tstart("Sequences with Longs")
ok = 1
s = [1L, 2L, 3L, 4L, 5L]
r = io.complex9(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (-6L, 7L, -8L, 9L, 10L, 11L)
r = io.complex9(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = [1L, 2L, 3L, 4L, 5L]
r = io.complex10(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (1L, 2L, -3L, 4L, 5L)
r = io.complex11(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = [1L, 2, 3L, 4L, 5]
r = io.complex12(s)
if list(r) == list(s):
tresult("+")
else:
ok = 0
tresult("-")
s = (1L, 2L, -3L, 4L, 5L)
r = io.complex13(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = [1L, 2L, -3L, 4L, 5L]
r = io.complex14(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
s = (1L, 0L, 1L, 1L, 0L, 3L, 1L, 1L, 1L)
r = io.complex15(s)
if len(r) == len(s):
tresult(r)
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Struct S2")
ok = 1
jo = J_i()._this()
s2 = TypeTest.S2(TypeTest.S1(1, 2, 3, 4, 5.6, 7, 8, "a", 10),
TypeTest.U1(a = 5),
jo,
[1, 2, 3, 4, 5, 6, 7],
"octet sequence",
"string")
r = io.complex37(s2)
if r.c._is_equivalent(jo):
tresult("+")
else:
ok = 0
tresult("-")
id = poa.reference_to_id(jo)
poa.deactivate_object(id)
s2 = TypeTest.S2(TypeTest.S1(1, 2, 3, 4, 5.6, 7, 8, "a", 10),
TypeTest.U1(a = 5),
None,
[1, 2, 3, 4, 5, 6, 7],
"octet sequence",
"string")
r = io.complex37(s2)
if r.c is None:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Invalid Struct S2")
s2 = TypeTest.S2(TypeTest.S1(1, 2, 3, 4, 5.6, 7, 8, "a", 10),
TypeTest.U1(a = 5),
12345,
[1, 2, 3, 4, 5, 6, 7],
"octet sequence",
"string")
try:
r = io.complex37(s2)
tfail()
except CORBA.BAD_PARAM, ex:
exc(ex)
tpass()
tstart("Struct S3")
i1 = TypeTest.S3(42, TypeTest.S3.U(0,None))
i2 = TypeTest.S3(5, TypeTest.S3.U(a=[i1]))
i3 = TypeTest.S3(17, TypeTest.S3.U(a=[i2, i1]))
r = io.complex38(i3)
if r.b.a[0].a == 5: tpass()
else: tfail()
tstart("Sequence of struct S1")
ok = 1
s = [TypeTest.S1(1, 2, 3, 4, 5.6, 7, 8, "a", 10),
TypeTest.S1(-10, 0xffff, -1234567, 42L, 1.234, 5.678, 0, "z", 255)]
r = io.complex39(s)
if r[1].b == 0xffff:
tresult("+")
else:
ok = 0
tresult("-")
s = []
r = io.complex39(s)
if r == s:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Invalid sequence of struct S1")
ok = 1
s = [TypeTest.S1(1, 2, 3, 4, 5.6, 7, 8, "a", 10),
TypeTest.S1(-10, 0x10000, -1234567, 42L, 1.234, 5.678, 0, "z", 255)]
try:
r = io.complex39(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
s = [123]
try:
r = io.complex39(s)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Long sequence of struct S1")
ok = 1
s = [TypeTest.S1(1, 2, 3, 4, 5.6, 7, 8, "a", 10),
TypeTest.S1(-10, 0xffff, -1234567, 42L, 1.234, 5.678, 0, "z", 255)]
s = s * 1000
r = io.complex39(s)
if r[1].b == 0xffff:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Fixed")
ok = 1
f = TypeTest.F1(123456789)
r = io.complex40(f)
if r == f:
tresult("+")
else:
ok = 0
tresult("-")
try:
f = CORBA.fixed("123456")
r = io.complex40(f)
ok = 0
tresult("-")
except CORBA.DATA_CONVERSION:
tresult("+")
if ok: tpass()
else: tfail()
tstart("WString")
ok = 1
s = u"Hello there"
r = io.complex41(s)
if r == s:
tresult("+")
else:
tresult("-")
ok = 0
s = u"This is a much longer string with lots of stuff in it. Blah blah blah"
r = io.complex41(s)
if r == s:
tresult("+")
else:
tresult("-")
ok = 0
s = u"This is a string with a \0 in it."
try:
r = io.complex41(s)
tresult("-")
ok = 0
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Bounded wstring")
ok = 1
s = u"Hello"
r = io.complex42(s)
if r == s:
tresult("+")
else:
tresult("-")
ok = 0
s = u"This is a much longer string with lots of stuff in it. Blah blah blah"
try:
r = io.complex42(s)
tresult("-")
ok = 0
except CORBA.MARSHAL:
tresult("+")
s = u"str\0ing"
try:
r = io.complex42(s)
tresult("-")
ok = 0
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("ValueType")
ok = 1
v1 = TypeTest.V1("hello", 5)
r = io.complex43(v1)
if r.s == v1.s and r.l == v1.l:
tresult("+")
else:
ok = 0
tresult("-")
r = io.complex43(None)
if r is None:
tresult("+")
else:
ok = 0
tresult("-")
v2 = TypeTest.V2("two", 42, v1)
r = io.complex43(v2)
if r.s == v2.s and r.l == v2.l:
tresult("+")
else:
ok = 0
tresult("-")
r = io.complex43(None)
if r is None:
tresult("+")
else:
ok = 0
tresult("-")
r = io.complex44(v2)
if r.s == v2.s and r.l == v2.l and r.v.s == v1.s:
tresult("+")
else:
ok = 0
tresult("-")
v2.v = v2
r = io.complex43(v2)
if r.s == v2.s and r.l == v2.l:
tresult("+")
else:
ok = 0
tresult("-")
r = io.complex44(v2)
if r.s == v2.s and r.l == v2.l and r.v.s == v2.s:
tresult("+")
else:
ok = 0
tresult("-")
r = io.complex44(None)
if r is None:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("ValueBox")
ok = 1
r = io.complex45(1234)
if r == 1234:
tresult("+")
else:
ok = 0
tresult("-")
r = io.complex45(None)
if r is None:
tresult("+")
else:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("Exceptions")
ok = 1
try:
io.except1()
ok = 0
tresult("-")
except TypeTest.X1:
tresult("+")
try:
io.except2(1)
ok = 0
tresult("-")
except TypeTest.X1:
tresult("+")
try:
io.except2(2)
ok = 0
tresult("-")
except TypeTest.X2, ex:
tresult("+" + ex.b)
try:
io.except2(3)
tresult("+")
except:
ok = 0
tresult("-")
try:
io.except3(1)
ok = 0
tresult("-")
except CORBA.UNKNOWN:
tresult("+")
try:
io.except3(2)
ok = 0
tresult("-")
except CORBA.NO_PERMISSION:
tresult("+")
try:
io.except3(3)
tresult("+")
except:
ok = 0
tresult("-")
if ok: tpass()
else: tfail()
tstart("BAD_PARAM returns")
try:
io.except4(0)
except:
ok = 0
tresult("-")
for i in range(1, 10):
try:
io.except4(i)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("TypeCode")
ok = 1
io.tc1(CORBA._tc_long); tresult("+")
io.tc1(CORBA._tc_short); tresult("+")
io.tc1(CORBA._tc_boolean); tresult("+")
io.tc1(CORBA._tc_string); tresult("+")
io.tc1(TypeTest._tc_J); tresult("+")
io.tc1(TypeTest._tc_S1); tresult("+")
io.tc1(TypeTest._tc_U1); tresult("+")
io.tc1(TypeTest._tc_U2); tresult("+")
io.tc1(TypeTest._tc_E1); tresult("+")
io.tc1(TypeTest._tc_E2); tresult("+")
io.tc1(TypeTest._tc_Q1); tresult("+")
io.tc1(TypeTest._tc_Q2); tresult("+")
io.tc1(TypeTest._tc_Q3); tresult("+")
io.tc1(TypeTest._tc_Q4); tresult("+")
io.tc1(TypeTest._tc_Q5); tresult("+")
io.tc1(TypeTest._tc_Q6); tresult("+")
io.tc1(TypeTest._tc_Q7); tresult("+")
io.tc1(TypeTest._tc_Q8); tresult("+")
io.tc1(TypeTest._tc_Q9); tresult("+")
io.tc1(TypeTest._tc_Q10); tresult("+")
io.tc1(TypeTest._tc_BQ1); tresult("+")
io.tc1(TypeTest._tc_BQ2); tresult("+")
io.tc1(TypeTest._tc_BQ3); tresult("+")
io.tc1(TypeTest._tc_BQ4); tresult("+")
io.tc1(TypeTest._tc_BQ5); tresult("+")
io.tc1(TypeTest._tc_BQ6); tresult("+")
io.tc1(TypeTest._tc_BQ7); tresult("+")
io.tc1(TypeTest._tc_BQ8); tresult("+")
io.tc1(TypeTest._tc_BQ9); tresult("+")
io.tc1(TypeTest._tc_A1); tresult("+")
io.tc1(TypeTest._tc_A2); tresult("+")
io.tc1(TypeTest._tc_A3); tresult("+")
io.tc1(TypeTest._tc_A4); tresult("+")
io.tc1(TypeTest._tc_A5); tresult("+")
io.tc1(TypeTest._tc_A6); tresult("+")
io.tc1(TypeTest._tc_A7); tresult("+")
io.tc1(TypeTest._tc_A8); tresult("+")
io.tc1(TypeTest._tc_A9); tresult("+")
io.tc1(TypeTest._tc_A10); tresult("+")
io.tc1(TypeTest._tc_S2); tresult("+")
r = io.tc1(TypeTest._tc_S3)
if r.equivalent(TypeTest._tc_S3):
tresult("+")
else:
ok = 0
tresult("-")
io.tc1(TypeTest._tc_X1); tresult("+")
io.tc1(TypeTest._tc_X2); tresult("+")
io.tc1(TypeTest._tc_I); tresult("+")
io.tc1(TypeTest._tc_F1); tresult("+")
if ok: tpass()
else: tfail()
tstart("Any")
ok = 1
a = CORBA.Any(TypeTest._tc_S1,
TypeTest.S1(1, 2, 3, 4, 5.6, 7.8, 1, "a", 9))
r = io.any1(a)
if r.value().c == 3:
tresult("+")
else:
ok = 0
tresult("-")
i1 = TypeTest.S3(42, TypeTest.S3.U(0,None))
i2 = TypeTest.S3(5, TypeTest.S3.U(a=[i1]))
i3 = TypeTest.S3(17, TypeTest.S3.U(a=[i2, i1]))
a = CORBA.Any(TypeTest._tc_S3, i3)
r = io.any1(a)
if r.value().b.a[0].a == 5:
tresult("+")
else:
ok = 0
tresult("-")
ji = J_i()
jo = ji._this()
a = CORBA.Any(TypeTest._tc_J, jo)
r = io.any1(a)
if r.value()._is_equivalent(jo):
tresult("+")
else:
ok = 0
tresult("-")
id = poa.reference_to_id(jo)
del ji, jo
r = None
poa.deactivate_object(id)
a = CORBA.Any(CORBA._tc_IMP_LIMIT,
CORBA.IMP_LIMIT(12345,CORBA.COMPLETED_YES))
r = io.any1(a)
tresult(str(r.value()))
a = CORBA.Any(CORBA._tc_OBJECT_NOT_EXIST,
CORBA.OBJECT_NOT_EXIST(omniORB.OBJECT_NOT_EXIST_NoMatch,
CORBA.COMPLETED_YES))
r = io.any1(a)
tresult(str(r.value()))
if ok: tpass()
else: tfail()
tstart("Invalid Any")
ok = 1
a = CORBA.Any(TypeTest._tc_S1,
TypeTest.S1(1, 2, "Hi", 4, 5.6, 7.8, 1, "a", 9))
try:
r = io.any1(a)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
i1 = TypeTest.S3(42, TypeTest.S3.U(a="Hi"))
i2 = TypeTest.S3(5, TypeTest.S3.U(a=[i1]))
i3 = TypeTest.S3(17, TypeTest.S3.U(a=[i2, i1]))
a = CORBA.Any(TypeTest._tc_S3, i3)
try:
r = io.any1(a)
ok = 0
tresult("-")
except CORBA.BAD_PARAM, ex:
exc(ex)
tresult("+")
if ok: tpass()
else: tfail()
tstart("Empty Any")
a = CORBA.Any(CORBA._tc_null, None)
try:
r = io.any1(a)
tpass()
except:
tfail()
tstart("Context")
ok = 1
try:
ctxt = orb.get_default_context()
ctxt.set_one_value("test", "hello")
ctxt.set_one_value("test2", "there")
ctxt.set_values({"foo": "wib", "foo2": "wob", "foobarbaz": "wuz"})
r = io.context1(5, ctxt)
if r == ["test", "hello"]:
tresult("+")
else:
ok = 0
tresult("-")
r = io.context2(5, ctxt)
if len(r) == 10:
tresult("+")
else:
ok = 0
tresult("-")
except:
ok = 0
tresult("!")
if ok:
tpass()
else:
tfail()
tdone()
if __name__ == "__main__":
orb = CORBA.ORB_init(sys.argv, CORBA.ORB_ID)
poa = orb.resolve_initial_references("RootPOA")
poa._get_the_POAManager().activate()
io = orb.string_to_object(sys.argv[1])
doTests(orb, poa, io)
if "-r" in sys.argv:
print "\nRepeating tests..."
output = 0
while 1:
doTests(orb, poa, io)
orb.destroy()
|
|
#!/usr/bin/env python
import os
import re
import string
import sys
import time
import posixpath
import subprocess
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class NfsShares (ClusterSetup):
"""
Automatically mounts external NFS shares on StarCluster nodes
"""
def __init__(self, privatekey, publiccert, interval, sourcedirs, mountpoints, portmapport, nfsport, mountdport, cluster):
log.info("Loaded plugin: automount.NfsShares")
log.debug("automount.NfsShares.__init__ Initialising AutoMount plugin.")
log.debug("automount.NfsShares.__init__ privatekey %s" % privatekey)
log.debug("automount.NfsShares.__init__ publiccert %s" % publiccert)
log.debug("automount.NfsShares.__init__ interval %s" % interval)
log.debug("automount.NfsShares.__init__ sourcedirs %s" % sourcedirs)
log.debug("automount.NfsShares.__init__ mountpoints %s" % mountpoints)
log.debug("automount.NfsShares.__init__ portmapport %s" % portmapport)
log.debug("automount.NfsShares.__init__ nfsport %s" % nfsport)
log.debug("automount.NfsShares.__init__ mountdport %s" % mountdport)
log.debug("automount.NfsShares.__init__ cluster %s" % cluster)
self.privatekey = privatekey
self.publiccert = publiccert
self.portmapport = portmapport
self.nfsport = nfsport
self.mountdport = mountdport
self.cluster = cluster
# set default interval
if not interval: interval = 10
self.interval = interval
self.sourcedirs = sourcedirs.split(",")
self.mountpoints = mountpoints.split(",")
if len(self.sourcedirs) != len(self.mountpoints):
log.info("automount.NfsShares.__init__ length of sourcedirs ("
+ len(self.sourcedirs)
+ ") is not the same as length of mountpoints ("
+ len(self.mountpoints)
+ ")"
)
sys.exit(0)
def run(self, nodes, master, user, user_shell, volumes):
"""
Mount NFS shares on master and all nodes
"""
log.info("Running plugin automount")
log.debug("automount.NfsShares.run automount.NfsShares.run(nodes, master, user, user_shell, volumes)")
#### OPEN NFS-RELATED PORTS FOR THIS CLUSTER
self.openNfsPorts("default")
self.openNfsPorts('@sc-' + self.cluster)
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
#### FIX mountd PORT ON head AND MASTER/NODES
mountdport = "32767"
for node in nodes:
self.setMountdOnNode(node, mountdport)
self.setMountdOnHead(mountdport)
self.restartServicesOnHead()
#### MOUNT ON ALL NODES
for node in nodes:
self.mount(node)
log.info("Completed plugin automount")
def openNfsPorts(self, group):
"""
Open (fixed) NFS-related ports (portmap, nfs and mountd)
"""
portmapport = self.portmapport
nfsport = self.nfsport
mountdport = self.mountdport
log.info("Opening NFS-related ports for group: %s", group)
log.debug("automount.openNfsPorts group; %s", group)
log.debug("automount.openNfsPorts portmapport; %s", portmapport)
log.debug("automount.openNfsPorts nfsport; %s", nfsport)
log.debug("automount.openNfsPorts mountdport; %s", mountdport)
permissions = [
dict(group=group, port=nfsport, type="tcp"),
dict(group=group, port=nfsport, type="udp"),
dict(group=group, port=portmapport, type="tcp"),
dict(group=group, port=portmapport, type="udp"),
dict(group=group, port=mountdport, type="tcp"),
dict(group=group, port=mountdport, type="udp")
]
#### OPEN PORTS FROM HEAD NODE (NO SSH FROM MASTER)
commands = self.setPortCommands(group, permissions)
for command in commands:
self.runSystemCommand(command);
def setPortCommands(self, group, permissions):
groupPermissions = self.getGroupPermissions(group)
log.debug("automount.NfsShares.setPortCommands groupPermissions: %s", groupPermissions)
#### FILTER OUT EXISTING PERMISSIONS
permissions = self.filterPermissions(permissions, groupPermissions)
#### SET EC2 KEY FILE ENVIRONMENT VARIABLES
ec2vars = self.getEC2Vars()
commands = []
for permission in permissions:
command = ec2vars + 'ec2-authorize ' + permission['group'] + ' -p ' + permission['port'] + ' -P ' + permission['type']
commands.append(command)
return commands
def getGroupPermissions(self, group):
ec2vars = self.getEC2Vars()
ec2dgrp = self.runSystemCommand(ec2vars + 'ec2dgrp ' + group)
permissions = ec2dgrp.split("\n")
permissions[:1] = []
return permissions
def filterPermissions(self, permissions, groupPermissions):
log.info("Filtering permissions to exclude existing permissions")
missing = []
for i, v in enumerate(permissions):
found = 0
for index, value in enumerate(groupPermissions):
if value == '': break
elements = value.split("\t")
type = elements[4]
port = elements[5]
if type == '' or value == '': break
if type == v['type'] and port == v['port']:
found = 1
break
if found == 0:
missing.append(v)
return missing
def getEC2Vars(self):
ec2vars = "export EC2_PRIVATE_KEY=" + self.privatekey + "; "
ec2vars += "export EC2_CERT=" + self.publiccert + "; "
return ec2vars
def runSystemCommand(self, command):
log.debug(command)
return subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).stdout.read()
def getHeadIp(self):
log.info("automount.NfsShares.getHeadIp Getting headnode internal IP")
p = os.popen('curl -s http://169.254.169.254/latest/meta-data/instance-id');
instanceid = p.read()
log.debug("automount.NfsShares.getHeadIp instanceid: %s" % instanceid)
command = "ec2-describe-instances -K " + self.privatekey \
+ " -C " + self.publiccert \
+ " " + instanceid
log.debug("automount.NfsShares.getHeadIp command: %s" % command)
p = os.popen(command);
reservation = p.read()
log.debug("automount.NfsShares.getHeadIp reservation: %s" % reservation)
instance = reservation.split("INSTANCE")[1];
log.debug("automount.NfsShares.getHeadIp instance: %s" % instance)
instanceRow = instance.split('\t')
self.head_ip = instanceRow[17]
log.debug("automount.NfsShares.getHeadIp self.head_ip: %s" % self.head_ip)
def mount(self, node):
"""
Mount shares from head node on master and exec nodes
"""
log.info("Mounting shared from head node to %s", node.alias)
log.debug("automount.NfsShares.mount node.private_dns_name: %s" % node.private_dns_name)
log.debug("automount.NfsShares.mount self.head_ip: %s" % self.head_ip)
#### INSERT MOUNT POINT ENTRIES INTO /etc/fstab ON NODE
log.debug("automount.NfsShares.on_add_node Doing self._addToFstab")
for i in range(len(self.sourcedirs)):
self._addToFstab(node, self.sourcedirs[i], self.head_ip, self.mountpoints[i], self.interval)
#### INSERT ENTRIES FOR MASTER/NODES INTO /etc/exports ON HEAD NODE
log.debug("automount.NfsShares.mount Doing self._addToExports")
for i in range(len(self.sourcedirs)):
self._addToExports(node, self.sourcedirs[i])
#### MOUNT THE SHARES
for i in range(len(self.sourcedirs)):
self.mountShares(node, self.sourcedirs[i], self.head_ip, self.mountpoints[i], self.interval)
def _addToFstab(self, node, sourcedir, sourceip, mountpoint, interval):
"""
Add entries to /etc/fstab on master/exec nodes
"""
log.info("Adding /etc/fstab entry (%s on %s)", mountpoint, node.alias)
insert = self.head_ip + ":" + sourcedir + " " + mountpoint + " nfs nfsvers=3,defaults 0 0"
cmd = "echo '" + insert + "' >> /etc/fstab ;"
log.debug(cmd)
node.ssh.execute(cmd)
def _addToExports(self, node, sourcedir):
"""
Add entries to /etc/exports on head node
"""
log.info("Adding /etc/exports entry (%s to %s)", sourcedir, node.alias)
insert = sourcedir + " " + node.private_ip_address + "(async,no_root_squash,no_subtree_check,rw)"
f = open("/etc/exports", 'rb')
contents = f.read()
f.close()
insert = sourcedir + " " + node.private_ip_address + "(async,no_root_squash,no_subtree_check,rw)\n"
contents = string.replace(contents, insert,"")
contents += insert
f = open("/etc/exports", 'w')
f.write(contents)
f.close()
os.system("exportfs -ra")
os.system("service portmap restart")
os.system("service nfs restart")
def _removeFromExports(self, node, sourcedir):
"""
Remove entries from /etc/exports on head node
"""
log.info("Removing from /etc/exports entry (%s to %s)", sourcedir, node.alias)
f = open("/etc/exports", 'rb')
contents = f.read()
f.close()
insert = sourcedir + " " + node.private_ip_address + "(async,no_root_squash,no_subtree_check,rw)\n"
contents = string.replace(contents, insert,"")
f = open("/etc/exports", 'w')
f.write(contents)
f.close()
def setMountdOnNode(self, node, mountdport):
"""
Fix mountd port to same number on all hosts - head, master and exec nodes
"""
log.info("Setting mountd port on %s", node.alias)
cmd = self.mountdCommand(mountdport)
log.debug("Doing node.ssh.execute: " + cmd)
node.ssh.execute(cmd)
def setMountdOnHead(self, mountdport):
cmd = self.mountdCommand(mountdport)
log.debug("Doing os.system: " + cmd)
os.system(cmd)
def restartServicesOnNode(self, node):
node.ssh.execute("service portmap restart")
node.ssh.execute("service nfs restart")
def restartServicesOnHead(self):
os.system("service portmap restart")
os.system("service nfs restart")
def mountdCommand(self, mountdport):
"""
LATER: DETERMINE COMMAND USING uname -a ON NODE
E.G.: centos
nfsconfig = "/etc/sysconfig/nfs"
insert = "MOUNTD_PORT=" + mountdport
"""
#### ubuntu
nfsconfig = "/etc/default/nfs-kernel-server"
insert = "RPCMOUNTDOPTS=\"--port " + mountdport + " --manage-gids\""
return "echo '" + insert + "' >> " + nfsconfig + ";"
def mountShares(self, node, sourcedir, sourceip, mountpoint, interval):
"""
Mount the shares on the local filesystem - wait <interval> seconds between tries
"""
log.info("Mounting NFS shares on %s", node.alias)
cmd = "mount -t nfs " + sourceip + ":" + sourcedir + " " + mountpoint
log.info(cmd)
if not node.ssh.isdir(mountpoint): node.ssh.makedirs(mountpoint)
# TRY REPEATEDLY TO MOUNT
file_list = []
while not file_list:
log.debug("automount.NfsShares.mountShares cmd: %s" % cmd)
node.ssh.execute(cmd)
file_list = node.ssh.ls(mountpoint)
if file_list: break
log.debug("Sleeping %s seconds" % interval)
time.sleep(float(interval))
def on_add_node(self, node, nodes, master, user, user_shell, volumes):
log.info("Doing 'on_add_node' for plugin: automount.NfsShares");
log.info("Adding node %s", node.alias)
log.debug("automount.NfsShares.on_add_node ")
log.debug("automount.NfsShares.on_add_node node.private_dns_name: %s" % node.private_dns_name)
#### SET HEAD NODE INTERNAL IP
self.getHeadIp();
#### INSERT MOUNT POINT ENTRIES INTO /etc/fstab ON NODE
log.debug("automount.NfsShares.on_add_node Doing self._addToFstab")
for i in range(len(self.sourcedirs)):
self._addToFstab(node, self.sourcedirs[i], self.head_ip, self.mountpoints[i], self.interval)
#### INSERT EXPORT ENTRIES FOR NODE INTO /etc/exports ON HEAD NODE
log.debug("automount.NfsShares.on_add_node Doing self._addToExports")
for i in range(len(self.sourcedirs)):
self._addToExports(node, self.sourcedirs[i])
#### FIX mountd PORT ON head AND MASTER/
mountdport = "32767"
self.setMountdOnNode(node, mountdport)
self.setMountdOnHead(mountdport)
self.restartServicesOnHead()
#### MOUNT THE SHARES
for i in range(len(self.sourcedirs)):
self.mountShares(node, self.sourcedirs[i], self.head_ip, self.mountpoints[i], self.interval)
log.info("Completed 'on_add_node' for plugin: automount.NfsShares");
def on_remove_node(self, node, nodes, master, user, user_shell, volumes):
log.info("Doing on_remove_node for plugin: automount.NfsShares")
log.info("Removing %s " % node.alias)
log.debug("automount.NfsShares.on_remove_node Removing %s from cluster" % node.alias)
log.debug("automount.NfsShares.on_remove_node node.private_dns_name: %s" % node.private_dns_name)
# REMOVE ENTRIES FROM /etc/exports ON HEAD NODE
for i in range(len(self.sourcedirs)):
self._removeFromExports(node, self.sourcedirs[i])
# RESTART NFS ON HEAD
log.info("automount.NfsShares.on_remove_node Restarting NFS on head node")
os.system("service portmap restart")
os.system("service nfs restart")
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import unittest
import telemetry.timeline.async_slice as tracing_async_slice
import telemetry.timeline.bounds as timeline_bounds
from telemetry.perf_tests_helper import FlattenList
from telemetry.timeline import model
from telemetry.util.statistics import DivideIfPossibleOrZero
from telemetry.web_perf.metrics.rendering_stats import (
BEGIN_COMP_NAME,
BEGIN_SCROLL_UPDATE_COMP_NAME,
END_COMP_NAME,
FORWARD_SCROLL_UPDATE_COMP_NAME,
GESTURE_SCROLL_UPDATE_EVENT_NAME,
ORIGINAL_COMP_NAME,
SCROLL_UPDATE_EVENT_NAME,
UI_COMP_NAME)
from telemetry.web_perf.metrics.rendering_stats import (
ComputeInputEventLatencies)
from telemetry.web_perf.metrics.rendering_stats import GetInputLatencyEvents
from telemetry.web_perf.metrics.rendering_stats import HasRenderingStats
from telemetry.web_perf.metrics.rendering_stats import RenderingStats
class MockTimer(object):
"""A mock timer class which can generate random durations.
An instance of this class is used as a global timer to generate random
durations for stats and consistent timestamps for all mock trace events.
The unit of time is milliseconds.
"""
def __init__(self):
self.milliseconds = 0
def Get(self):
return self.milliseconds
def Advance(self, low=0, high=1):
delta = random.uniform(low, high)
self.milliseconds += delta
return delta
class ReferenceRenderingStats(object):
""" Stores expected data for comparison with actual RenderingStats """
def __init__(self):
self.frame_timestamps = []
self.frame_times = []
self.paint_times = []
self.painted_pixel_counts = []
self.record_times = []
self.recorded_pixel_counts = []
self.rasterize_times = []
self.rasterized_pixel_counts = []
self.approximated_pixel_percentages = []
def AppendNewRange(self):
self.frame_timestamps.append([])
self.frame_times.append([])
self.paint_times.append([])
self.painted_pixel_counts.append([])
self.record_times.append([])
self.recorded_pixel_counts.append([])
self.rasterize_times.append([])
self.rasterized_pixel_counts.append([])
self.approximated_pixel_percentages.append([])
class ReferenceInputLatencyStats(object):
""" Stores expected data for comparison with actual input latency stats """
def __init__(self):
self.input_event_latency = []
self.input_event = []
def AddMainThreadRenderingStats(mock_timer, thread, first_frame,
ref_stats = None):
""" Adds a random main thread rendering stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create randonm data and timestap for main thread rendering stats.
data = { 'frame_count': 0,
'paint_time': 0.0,
'painted_pixel_count': 0,
'record_time': mock_timer.Advance(2, 4) / 1000.0,
'recorded_pixel_count': 3000*3000 }
timestamp = mock_timer.Get()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark', 'BenchmarkInstrumentation::MainThreadRenderingStats',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if data['frame_count'] == 1:
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(round(timestamp - prev_timestamp, 2))
ref_stats.frame_timestamps[-1].append(timestamp)
ref_stats.paint_times[-1].append(data['paint_time'] * 1000.0)
ref_stats.painted_pixel_counts[-1].append(data['painted_pixel_count'])
ref_stats.record_times[-1].append(data['record_time'] * 1000.0)
ref_stats.recorded_pixel_counts[-1].append(data['recorded_pixel_count'])
def AddImplThreadRenderingStats(mock_timer, thread, first_frame,
ref_stats = None):
""" Adds a random impl thread rendering stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create randonm data and timestap for impl thread rendering stats.
data = { 'frame_count': 1,
'rasterize_time': mock_timer.Advance(5, 10) / 1000.0,
'rasterized_pixel_count': 1280*720,
'visible_content_area': random.uniform(0, 100),
'approximated_visible_content_area': random.uniform(0, 5)}
timestamp = mock_timer.Get()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark', 'BenchmarkInstrumentation::ImplThreadRenderingStats',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if data['frame_count'] == 1:
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(round(timestamp - prev_timestamp, 2))
ref_stats.frame_timestamps[-1].append(timestamp)
ref_stats.rasterize_times[-1].append(data['rasterize_time'] * 1000.0)
ref_stats.rasterized_pixel_counts[-1].append(data['rasterized_pixel_count'])
ref_stats.approximated_pixel_percentages[-1].append(
round(DivideIfPossibleOrZero(data['approximated_visible_content_area'],
data['visible_content_area']) * 100.0, 3))
def AddInputLatencyStats(mock_timer, start_thread, end_thread,
ref_latency_stats = None):
""" Adds a random input latency stats event.
start_thread: The start thread on which the async slice is added.
end_thread: The end thread on which the async slice is ended.
ref_latency_stats: A ReferenceInputLatencyStats object for expected values.
"""
mock_timer.Advance(2, 4)
original_comp_time = mock_timer.Get() * 1000.0
mock_timer.Advance(2, 4)
ui_comp_time = mock_timer.Get() * 1000.0
mock_timer.Advance(2, 4)
begin_comp_time = mock_timer.Get() * 1000.0
mock_timer.Advance(2, 4)
forward_comp_time = mock_timer.Get() * 1000.0
mock_timer.Advance(10, 20)
end_comp_time = mock_timer.Get() * 1000.0
data = { ORIGINAL_COMP_NAME: {'time': original_comp_time},
UI_COMP_NAME: {'time': ui_comp_time},
BEGIN_COMP_NAME: {'time': begin_comp_time},
END_COMP_NAME: {'time': end_comp_time} }
timestamp = mock_timer.Get()
async_slice = tracing_async_slice.AsyncSlice(
'benchmark', 'InputLatency', timestamp)
async_sub_slice = tracing_async_slice.AsyncSlice(
'benchmark', GESTURE_SCROLL_UPDATE_EVENT_NAME, timestamp)
async_sub_slice.args = {'data': data}
async_sub_slice.parent_slice = async_slice
async_sub_slice.start_thread = start_thread
async_sub_slice.end_thread = end_thread
async_slice.sub_slices.append(async_sub_slice)
async_slice.start_thread = start_thread
async_slice.end_thread = end_thread
start_thread.AddAsyncSlice(async_slice)
# Add scroll update latency info.
scroll_update_data = {
BEGIN_SCROLL_UPDATE_COMP_NAME: {'time': begin_comp_time},
FORWARD_SCROLL_UPDATE_COMP_NAME: {'time': forward_comp_time},
END_COMP_NAME: {'time': end_comp_time} }
scroll_async_slice = tracing_async_slice.AsyncSlice(
'benchmark', 'InputLatency', timestamp)
scroll_async_sub_slice = tracing_async_slice.AsyncSlice(
'benchmark', SCROLL_UPDATE_EVENT_NAME, timestamp)
scroll_async_sub_slice.args = {'data': scroll_update_data}
scroll_async_sub_slice.parent_slice = scroll_async_slice
scroll_async_sub_slice.start_thread = start_thread
scroll_async_sub_slice.end_thread = end_thread
scroll_async_slice.sub_slices.append(scroll_async_sub_slice)
scroll_async_slice.start_thread = start_thread
scroll_async_slice.end_thread = end_thread
start_thread.AddAsyncSlice(scroll_async_slice)
# Also add some dummy frame statistics so we can feed the resulting timeline
# to RenderingStats.
AddMainThreadRenderingStats(mock_timer, start_thread, False)
AddImplThreadRenderingStats(mock_timer, end_thread, False)
if not ref_latency_stats:
return
ref_latency_stats.input_event.append(async_sub_slice)
ref_latency_stats.input_event.append(scroll_async_sub_slice)
ref_latency_stats.input_event_latency.append((
GESTURE_SCROLL_UPDATE_EVENT_NAME,
(data[END_COMP_NAME]['time'] -
data[ORIGINAL_COMP_NAME]['time']) / 1000.0))
ref_latency_stats.input_event_latency.append((
SCROLL_UPDATE_EVENT_NAME,
(scroll_update_data[END_COMP_NAME]['time'] -
scroll_update_data[BEGIN_SCROLL_UPDATE_COMP_NAME]['time']) / 1000.0))
class RenderingStatsUnitTest(unittest.TestCase):
def testHasRenderingStats(self):
timeline = model.TimelineModel()
timer = MockTimer()
# A process without rendering stats
process_without_stats = timeline.GetOrCreateProcess(pid = 1)
thread_without_stats = process_without_stats.GetOrCreateThread(tid = 11)
process_without_stats.FinalizeImport()
self.assertFalse(HasRenderingStats(thread_without_stats))
# A process with rendering stats, but no frames in them
process_without_frames = timeline.GetOrCreateProcess(pid = 2)
thread_without_frames = process_without_frames.GetOrCreateThread(tid = 21)
AddMainThreadRenderingStats(timer, thread_without_frames, True, None)
process_without_frames.FinalizeImport()
self.assertFalse(HasRenderingStats(thread_without_frames))
# A process with rendering stats and frames in them
process_with_frames = timeline.GetOrCreateProcess(pid = 3)
thread_with_frames = process_with_frames.GetOrCreateThread(tid = 31)
AddImplThreadRenderingStats(timer, thread_with_frames, True, None)
process_with_frames.FinalizeImport()
self.assertTrue(HasRenderingStats(thread_with_frames))
def testRangeWithoutFrames(self):
timer = MockTimer()
timeline = model.TimelineModel()
# Create a renderer process, with a main thread and impl thread.
renderer = timeline.GetOrCreateProcess(pid = 2)
renderer_main = renderer.GetOrCreateThread(tid = 21)
renderer_compositor = renderer.GetOrCreateThread(tid = 22)
# Create 10 main and impl rendering stats events for Action A.
timer.Advance(2, 4)
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
for i in xrange(0, 10):
first = (i == 0)
AddMainThreadRenderingStats(timer, renderer_main, first, None)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
timer.Advance(2, 4)
renderer_main.EndSlice(timer.Get())
# Create 5 main and impl rendering stats events not within any action.
for i in xrange(0, 5):
first = (i == 0)
AddMainThreadRenderingStats(timer, renderer_main, first, None)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
# Create Action B without any frames. This should trigger
# NotEnoughFramesError when the RenderingStats object is created.
timer.Advance(2, 4)
renderer_main.BeginSlice('webkit.console', 'ActionB', timer.Get(), '')
timer.Advance(2, 4)
renderer_main.EndSlice(timer.Get())
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(['ActionA', 'ActionB'])
timeline_ranges = [ timeline_bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers ]
stats = RenderingStats(renderer, None, timeline_ranges)
self.assertEquals(0, len(stats.frame_timestamps[1]))
def testFromTimeline(self):
timeline = model.TimelineModel()
# Create a browser process and a renderer process, and a main thread and
# impl thread for each.
browser = timeline.GetOrCreateProcess(pid = 1)
browser_main = browser.GetOrCreateThread(tid = 11)
browser_compositor = browser.GetOrCreateThread(tid = 12)
renderer = timeline.GetOrCreateProcess(pid = 2)
renderer_main = renderer.GetOrCreateThread(tid = 21)
renderer_compositor = renderer.GetOrCreateThread(tid = 22)
timer = MockTimer()
renderer_ref_stats = ReferenceRenderingStats()
browser_ref_stats = ReferenceRenderingStats()
# Create 10 main and impl rendering stats events for Action A.
timer.Advance(2, 4)
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddMainThreadRenderingStats(
timer, renderer_main, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddMainThreadRenderingStats(
timer, browser_main, first, browser_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, browser_ref_stats)
timer.Advance(2, 4)
renderer_main.EndSlice(timer.Get())
# Create 5 main and impl rendering stats events not within any action.
for i in xrange(0, 5):
first = (i == 0)
AddMainThreadRenderingStats(timer, renderer_main, first, None)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
AddMainThreadRenderingStats(timer, browser_main, first, None)
AddImplThreadRenderingStats(timer, browser_compositor, first, None)
# Create 10 main and impl rendering stats events for Action B.
timer.Advance(2, 4)
renderer_main.BeginSlice('webkit.console', 'ActionB', timer.Get(), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddMainThreadRenderingStats(
timer, renderer_main, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddMainThreadRenderingStats(
timer, browser_main, first, browser_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, browser_ref_stats)
timer.Advance(2, 4)
renderer_main.EndSlice(timer.Get())
# Create 10 main and impl rendering stats events for Action A.
timer.Advance(2, 4)
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddMainThreadRenderingStats(
timer, renderer_main, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddMainThreadRenderingStats(
timer, browser_main, first, browser_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, browser_ref_stats)
timer.Advance(2, 4)
renderer_main.EndSlice(timer.Get())
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(
['ActionA', 'ActionB', 'ActionA'])
timeline_ranges = [ timeline_bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers ]
stats = RenderingStats(renderer, browser, timeline_ranges)
# Compare rendering stats to reference.
self.assertEquals(stats.frame_timestamps,
browser_ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, browser_ref_stats.frame_times)
self.assertEquals(stats.rasterize_times, renderer_ref_stats.rasterize_times)
self.assertEquals(stats.rasterized_pixel_counts,
renderer_ref_stats.rasterized_pixel_counts)
self.assertEquals(stats.approximated_pixel_percentages,
renderer_ref_stats.approximated_pixel_percentages)
self.assertEquals(stats.paint_times, renderer_ref_stats.paint_times)
self.assertEquals(stats.painted_pixel_counts,
renderer_ref_stats.painted_pixel_counts)
self.assertEquals(stats.record_times, renderer_ref_stats.record_times)
self.assertEquals(stats.recorded_pixel_counts,
renderer_ref_stats.recorded_pixel_counts)
def testInputLatencyFromTimeline(self):
timeline = model.TimelineModel()
# Create a browser process and a renderer process.
browser = timeline.GetOrCreateProcess(pid = 1)
browser_main = browser.GetOrCreateThread(tid = 11)
renderer = timeline.GetOrCreateProcess(pid = 2)
renderer_main = renderer.GetOrCreateThread(tid = 21)
timer = MockTimer()
ref_latency = ReferenceInputLatencyStats()
# Create 10 input latency stats events for Action A.
timer.Advance(2, 4)
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
timer.Advance(2, 4)
renderer_main.EndSlice(timer.Get())
# Create 5 input latency stats events not within any action.
timer.Advance(2, 4)
for _ in xrange(0, 5):
AddInputLatencyStats(timer, browser_main, renderer_main, None)
# Create 10 input latency stats events for Action B.
timer.Advance(2, 4)
renderer_main.BeginSlice('webkit.console', 'ActionB', timer.Get(), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
timer.Advance(2, 4)
renderer_main.EndSlice(timer.Get())
# Create 10 input latency stats events for Action A.
timer.Advance(2, 4)
renderer_main.BeginSlice('webkit.console', 'ActionA', timer.Get(), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
timer.Advance(2, 4)
renderer_main.EndSlice(timer.Get())
browser.FinalizeImport()
renderer.FinalizeImport()
input_events = []
timeline_markers = timeline.FindTimelineMarkers(
['ActionA', 'ActionB', 'ActionA'])
timeline_ranges = [timeline_bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers]
for timeline_range in timeline_ranges:
if timeline_range.is_empty:
continue
input_events.extend(GetInputLatencyEvents(browser, timeline_range))
self.assertEquals(input_events, ref_latency.input_event)
input_event_latency_result = ComputeInputEventLatencies(input_events)
self.assertEquals(input_event_latency_result,
ref_latency.input_event_latency)
stats = RenderingStats(renderer, browser, timeline_ranges)
self.assertEquals(FlattenList(stats.input_event_latency), [
latency for name, latency in ref_latency.input_event_latency
if name != SCROLL_UPDATE_EVENT_NAME])
self.assertEquals(FlattenList(stats.scroll_update_latency), [
latency for name, latency in ref_latency.input_event_latency
if name == SCROLL_UPDATE_EVENT_NAME])
self.assertEquals(FlattenList(stats.gesture_scroll_update_latency), [
latency for name, latency in ref_latency.input_event_latency
if name == GESTURE_SCROLL_UPDATE_EVENT_NAME])
|
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_firewall_shaper_per_ip_shaper
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_firewall_shaper_per_ip_shaper.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_firewall_shaper_per_ip_shaper_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_shaper_per_ip_shaper': {
'bandwidth_unit': 'kbps',
'diffserv_forward': 'enable',
'diffserv_reverse': 'enable',
'diffservcode_forward': 'test_value_6',
'diffservcode_rev': 'test_value_7',
'max_bandwidth': '8',
'max_concurrent_session': '9',
'name': 'default_name_10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_shaper_per_ip_shaper.fortios_firewall_shaper(input_data, fos_instance)
expected_data = {
'bandwidth-unit': 'kbps',
'diffserv-forward': 'enable',
'diffserv-reverse': 'enable',
'diffservcode-forward': 'test_value_6',
'diffservcode-rev': 'test_value_7',
'max-bandwidth': '8',
'max-concurrent-session': '9',
'name': 'default_name_10'
}
set_method_mock.assert_called_with('firewall.shaper', 'per-ip-shaper', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_shaper_per_ip_shaper_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_shaper_per_ip_shaper': {
'bandwidth_unit': 'kbps',
'diffserv_forward': 'enable',
'diffserv_reverse': 'enable',
'diffservcode_forward': 'test_value_6',
'diffservcode_rev': 'test_value_7',
'max_bandwidth': '8',
'max_concurrent_session': '9',
'name': 'default_name_10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_shaper_per_ip_shaper.fortios_firewall_shaper(input_data, fos_instance)
expected_data = {
'bandwidth-unit': 'kbps',
'diffserv-forward': 'enable',
'diffserv-reverse': 'enable',
'diffservcode-forward': 'test_value_6',
'diffservcode-rev': 'test_value_7',
'max-bandwidth': '8',
'max-concurrent-session': '9',
'name': 'default_name_10'
}
set_method_mock.assert_called_with('firewall.shaper', 'per-ip-shaper', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_shaper_per_ip_shaper_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_shaper_per_ip_shaper': {
'bandwidth_unit': 'kbps',
'diffserv_forward': 'enable',
'diffserv_reverse': 'enable',
'diffservcode_forward': 'test_value_6',
'diffservcode_rev': 'test_value_7',
'max_bandwidth': '8',
'max_concurrent_session': '9',
'name': 'default_name_10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_shaper_per_ip_shaper.fortios_firewall_shaper(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall.shaper', 'per-ip-shaper', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_shaper_per_ip_shaper_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_shaper_per_ip_shaper': {
'bandwidth_unit': 'kbps',
'diffserv_forward': 'enable',
'diffserv_reverse': 'enable',
'diffservcode_forward': 'test_value_6',
'diffservcode_rev': 'test_value_7',
'max_bandwidth': '8',
'max_concurrent_session': '9',
'name': 'default_name_10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_shaper_per_ip_shaper.fortios_firewall_shaper(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall.shaper', 'per-ip-shaper', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_shaper_per_ip_shaper_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_shaper_per_ip_shaper': {
'bandwidth_unit': 'kbps',
'diffserv_forward': 'enable',
'diffserv_reverse': 'enable',
'diffservcode_forward': 'test_value_6',
'diffservcode_rev': 'test_value_7',
'max_bandwidth': '8',
'max_concurrent_session': '9',
'name': 'default_name_10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_shaper_per_ip_shaper.fortios_firewall_shaper(input_data, fos_instance)
expected_data = {
'bandwidth-unit': 'kbps',
'diffserv-forward': 'enable',
'diffserv-reverse': 'enable',
'diffservcode-forward': 'test_value_6',
'diffservcode-rev': 'test_value_7',
'max-bandwidth': '8',
'max-concurrent-session': '9',
'name': 'default_name_10'
}
set_method_mock.assert_called_with('firewall.shaper', 'per-ip-shaper', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_firewall_shaper_per_ip_shaper_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_shaper_per_ip_shaper': {
'random_attribute_not_valid': 'tag',
'bandwidth_unit': 'kbps',
'diffserv_forward': 'enable',
'diffserv_reverse': 'enable',
'diffservcode_forward': 'test_value_6',
'diffservcode_rev': 'test_value_7',
'max_bandwidth': '8',
'max_concurrent_session': '9',
'name': 'default_name_10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_shaper_per_ip_shaper.fortios_firewall_shaper(input_data, fos_instance)
expected_data = {
'bandwidth-unit': 'kbps',
'diffserv-forward': 'enable',
'diffserv-reverse': 'enable',
'diffservcode-forward': 'test_value_6',
'diffservcode-rev': 'test_value_7',
'max-bandwidth': '8',
'max-concurrent-session': '9',
'name': 'default_name_10'
}
set_method_mock.assert_called_with('firewall.shaper', 'per-ip-shaper', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
from savanna.openstack.common import excutils
from savanna.openstack.common.gettextutils import _ # noqa
from savanna.openstack.common import local
from savanna.openstack.common import log as logging
from savanna.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
ack_on_error=True):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name,
ack_on_error)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
msg = {'result': reply, 'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager.
Used by the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback.
Allows it to be invoked in a green thread.
"""
def __init__(self, conf, callback, connection_pool):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending' flag."""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
|
|
# -*- coding: utf-8 -*-
"""Module providing user dashboard content type and functionality"""
import json
import datetime
import os
import time
import uuid as uuid_tool
from Acquisition import aq_inner
from Acquisition import aq_parent
from five import grok
from plone import api
from plone.app.contentlisting.interfaces import IContentListing
from plone.app.uuid.utils import uuidToObject
from plone.dexterity.content import Container
from plone.directives import form
from plone.event.utils import pydt
from plone.keyring import django_random
from plone.namedfile.field import NamedBlobImage
from plone.namedfile.interfaces import IImageScaleTraversable
from string import Template
from zope import schema
from zope.lifecycleevent import modified
from xpose.seodash.project import IProject
from xpose.seodash.report import IReport
from xpose.seodash import MessageFactory as _
class IDashboard(form.Schema, IImageScaleTraversable):
"""
A project dashboard
"""
ga_id = schema.TextLine(
title=_(u"GA Site ID"),
required=False,
)
projects = schema.List(
title=_(u"Dashboard settings"),
description=_(u"Settings containing a project list"),
value_type=schema.TextLine(
title=_(u"Project"),
),
required=False,
)
logo = NamedBlobImage(
title=_(u"Logo Image"),
description=_(u"Upload optional customer logo"),
required=False,
)
class Dashboard(Container):
grok.implements(IDashboard)
class View(grok.View):
grok.context(IDashboard)
grok.require('zope2.View')
grok.name('view')
def update(self):
self.has_reports = self.report_idx() > 0
def reports(self):
context = aq_inner(self.context)
catalog = api.portal.get_tool(name='portal_catalog')
items = catalog(object_provides=IReport.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()),
depth=2),
sort_on='created',
sort_order='reverse')
return IContentListing(items)
def report_idx(self):
return len(self.reports())
def timestamp(self, uid):
item = api.content.get(UID=uid)
date = item.created()
date = pydt(date)
timestamp = {}
timestamp['day'] = date.strftime("%d")
timestamp['month'] = date.strftime("%m")
timestamp['year'] = date.strftime("%Y")
timestamp['date'] = date
return timestamp
def can_edit(self):
context = aq_inner(self.context)
is_adm = False
if not api.user.is_anonymous():
user = api.user.get_current()
roles = api.user.get_roles(username=user.getId(), obj=context)
if 'Manager' or 'Site Administrator' in roles:
is_adm = True
return is_adm
class Reports(grok.View):
grok.context(IDashboard)
grok.require('zope2.View')
grok.name('reports')
def update(self):
self.has_reports = self.report_idx() > 0
def reports(self):
context = aq_inner(self.context)
catalog = api.portal.get_tool(name='portal_catalog')
items = catalog(object_provides=IReport.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()),
depth=2),
sort_on='modified',
sort_order='reverse')
return items
def report_idx(self):
return len(self.reports())
def timestamp(self, uid):
item = api.content.get(UID=uid)
date = item.created()
date = pydt(date)
timestamp = {}
timestamp['day'] = date.strftime("%d")
timestamp['month'] = date.strftime("%m")
timestamp['year'] = date.strftime("%Y")
timestamp['date'] = date
return timestamp
def can_edit(self):
context = aq_inner(self.context)
is_adm = False
if not api.user.is_anonymous():
user = api.user.get_current()
roles = api.user.get_roles(username=user.getId(), obj=context)
if 'Manager' or 'Site Administrator' in roles:
is_adm = True
return is_adm
class ReportView(grok.View):
grok.context(IDashboard)
grok.require('cmf.ModifyPortalContent')
grok.name('report')
@property
def traverse_subpath(self):
return self.subpath
def publishTraverse(self, request, name):
if not hasattr(self, 'subpath'):
self.subpath = []
self.subpath.append(name)
return self
def report(self):
uuid = self.traverse_subpath[0]
return api.content.get(UID=uuid)
def report_data(self):
data = {}
if self.report():
item = self.report()
data = getattr(item, 'report')
return data
class ReportLayout(grok.View):
grok.context(IDashboard)
grok.require('zope2.View')
grok.name('report-layout')
def update(self):
self.has_projects = len(self.projects()) > 0
self.show_projectlist = len(self.projects()) > 1
self.has_reports = len(self.reports()) > 0
def reports(self):
context = aq_inner(self.context)
catalog = api.portal.get_tool(name='portal_catalog')
items = catalog(object_provides=IReport.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()),
depth=2),
sort_on='modified',
sort_order='reverse')
return items
def get_latest_report(self, uuid):
item = uuidToObject(uuid)
results = item.restrictedTraverse('@@folderListing')(
portal_type='xpose.seodash.report',
sort_on='modified',
sort_order='reverse')
return results[0]
def render_report(self, uuid):
item = uuidToObject(uuid)
return item.restrictedTraverse('@@content-view')()
def active_project(self):
return self.projects()[0]
def projects(self):
context = aq_inner(self.context)
catalog = api.portal.get_tool(name='portal_catalog')
items = catalog(object_provides=IProject.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()),
depth=1),
sort_on='getObjPositionInParent')
return items
def can_edit(self):
context = aq_inner(self.context)
is_adm = False
if not api.user.is_anonymous():
user = api.user.get_current()
roles = api.user.get_roles(username=user.getId(), obj=context)
if 'Manager' or 'Site Administrator' in roles:
is_adm = True
return is_adm
class AddReport(grok.View):
grok.context(IDashboard)
grok.require('cmf.ModifyPortalContent')
grok.name('add-report')
def render(self):
context = aq_inner(self.context)
uuid = self._create_report()
next_url = '{0}/report/{1}'.format(context.absolute_url(), uuid)
return self.request.response.redirect(next_url)
def project_info(self):
context = aq_inner(self.context)
parent = aq_parent(context)
from xpose.seodash.dashboard import IDashboard
if IDashboard.providedBy(parent):
container = parent
else:
container = aq_parent(parent)
return getattr(container, 'projects')
def timestamp(self):
date = datetime.datetime.now()
timestamp = {}
timestamp['month'] = date.strftime("%m")
timestamp['year'] = date.strftime("%Y")
return timestamp
def _create_report(self):
context = aq_inner(self.context)
project_list = getattr(context, 'projects')
date = datetime.datetime.now()
token = django_random.get_random_string(length=24)
item = api.content.create(
type='xpose.seodash.report',
id=token,
title='Report {0} {1}'.format(date.strftime("%B"),
date.strftime("%Y")),
container=context,
safe_id=True
)
uuid = api.content.get_uuid(obj=item)
template_file = os.path.join(os.path.dirname(__file__),
'report.json')
template = Template(open(template_file).read())
template_vars = {
'id': uuid_tool.uuid4(),
'uid': uuid,
'timestamp': int(time.time()),
'created': datetime.datetime.now(),
'dashboard': api.content.get_uuid(obj=context),
'project': json.dumps(project_list[0]),
'xd1uid': uuid_tool.uuid4(),
'xd2uid': uuid_tool.uuid4(),
'xd3uid': uuid_tool.uuid4(),
'xd4uid': uuid_tool.uuid4(),
'xd5uid': uuid_tool.uuid4(),
'xd6uid': uuid_tool.uuid4(),
'xd7uid': uuid_tool.uuid4()
}
report = template.substitute(template_vars)
tmpl = report.replace('\n', '')
setattr(item, 'report', tmpl)
projects = self.project_info()
project = projects[0]
pid = project['title']
setattr(item, 'projectId', pid)
modified(item)
item.reindexObject(idxs='modified')
return uuid
|
|
import asyncio
import functools
import json
import os
import string
import escapism
from jupyterhub.proxy import Proxy
from jupyterhub.utils import exponential_backoff
from kubernetes_asyncio import client
from traitlets import Unicode
from .clients import load_config
from .clients import shared_client
from .objects import make_ingress
from .reflector import ResourceReflector
from .utils import generate_hashed_slug
class IngressReflector(ResourceReflector):
kind = 'ingresses'
api_group_name = 'ExtensionsV1beta1Api'
@property
def ingresses(self):
return self.resources
class ServiceReflector(ResourceReflector):
kind = 'services'
@property
def services(self):
return self.resources
class EndpointsReflector(ResourceReflector):
kind = 'endpoints'
@property
def endpoints(self):
return self.resources
class KubeIngressProxy(Proxy):
namespace = Unicode(
config=True,
help="""
Kubernetes namespace to spawn ingresses for single-user servers in.
If running inside a kubernetes cluster with service accounts enabled,
defaults to the current namespace. If not, defaults to 'default'
""",
)
def _namespace_default(self):
"""
Set namespace default to current namespace if running in a k8s cluster
If not in a k8s cluster with service accounts enabled, default to
'default'
"""
ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
if os.path.exists(ns_path):
with open(ns_path) as f:
return f.read().strip()
return 'default'
component_label = Unicode(
'singleuser-server',
config=True,
help="""
The component label used to tag the user pods. This can be used to override
the spawner behavior when dealing with multiple hub instances in the same
namespace. Usually helpful for CI workflows.
""",
)
k8s_api_ssl_ca_cert = Unicode(
"",
config=True,
help="""
Location (absolute filepath) for CA certs of the k8s API server.
Typically this is unnecessary, CA certs are picked up by
config.load_incluster_config() or config.load_kube_config.
In rare non-standard cases, such as using custom intermediate CA
for your cluster, you may need to mount root CA's elsewhere in
your Pod/Container and point this variable to that filepath
""",
)
k8s_api_host = Unicode(
"",
config=True,
help="""
Full host name of the k8s API server ("https://hostname:port").
Typically this is unnecessary, the hostname is picked up by
config.load_incluster_config() or config.load_kube_config.
""",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
load_config(host=self.k8s_api_host, ssl_ca_cert=self.k8s_api_ssl_ca_cert)
self.core_api = shared_client('CoreV1Api')
self.extension_api = shared_client('ExtensionsV1beta1Api')
labels = {
'component': self.component_label,
'hub.jupyter.org/proxy-route': 'true',
}
self.ingress_reflector = IngressReflector(
parent=self, namespace=self.namespace, labels=labels
)
self.service_reflector = ServiceReflector(
parent=self, namespace=self.namespace, labels=labels
)
self.endpoint_reflector = EndpointsReflector(
self, namespace=self.namespace, labels=labels
)
# schedule our reflectors to start in the event loop,
# reflectors first load can be awaited with:
#
# await some_reflector._first_load_future
#
asyncio.ensure_future(self.ingress_reflector.start())
asyncio.ensure_future(self.service_reflector.start())
asyncio.ensure_future(self.endpoint_reflector.start())
def safe_name_for_routespec(self, routespec):
safe_chars = set(string.ascii_lowercase + string.digits)
safe_name = generate_hashed_slug(
'jupyter-'
+ escapism.escape(routespec, safe=safe_chars, escape_char='-')
+ '-route'
)
return safe_name
async def delete_if_exists(self, kind, safe_name, future):
try:
await future
self.log.info('Deleted %s/%s', kind, safe_name)
except client.rest.ApiException as e:
if e.status != 404:
raise
self.log.warn("Could not delete %s/%s: does not exist", kind, safe_name)
async def add_route(self, routespec, target, data):
# Create a route with the name being escaped routespec
# Use full routespec in label
# 'data' is JSON encoded and put in an annotation - we don't need to query for it
safe_name = self.safe_name_for_routespec(routespec).lower()
labels = {
'heritage': 'jupyterhub',
'component': self.component_label,
'hub.jupyter.org/proxy-route': 'true',
}
endpoint, service, ingress = make_ingress(
safe_name, routespec, target, labels, data
)
async def ensure_object(create_func, patch_func, body, kind):
try:
resp = await create_func(namespace=self.namespace, body=body)
self.log.info('Created %s/%s', kind, safe_name)
except client.rest.ApiException as e:
if e.status == 409:
# This object already exists, we should patch it to make it be what we want
self.log.warn(
"Trying to patch %s/%s, it already exists", kind, safe_name
)
resp = await patch_func(
namespace=self.namespace,
body=body,
name=body.metadata.name,
)
else:
raise
if endpoint is not None:
await ensure_object(
self.core_api.create_namespaced_endpoints,
self.core_api.patch_namespaced_endpoints,
body=endpoint,
kind='endpoints',
)
await exponential_backoff(
lambda: f'{self.namespace}/{safe_name}'
in self.endpoint_reflector.endpoints.keys(),
'Could not find endpoints/%s after creating it' % safe_name,
)
else:
delete_endpoint = await self.core_api.delete_namespaced_endpoints(
name=safe_name,
namespace=self.namespace,
body=client.V1DeleteOptions(grace_period_seconds=0),
)
await self.delete_if_exists('endpoint', safe_name, delete_endpoint)
await ensure_object(
self.core_api.create_namespaced_service,
self.core_api.patch_namespaced_service,
body=service,
kind='service',
)
await exponential_backoff(
lambda: f'{self.namespace}/{safe_name}'
in self.service_reflector.services.keys(),
'Could not find service/%s after creating it' % safe_name,
)
await ensure_object(
self.extension_api.create_namespaced_ingress,
self.extension_api.patch_namespaced_ingress,
body=ingress,
kind='ingress',
)
await exponential_backoff(
lambda: f'{self.namespace}/{safe_name}'
in self.ingress_reflector.ingresses.keys(),
'Could not find ingress/%s after creating it' % safe_name,
)
async def delete_route(self, routespec):
# We just ensure that these objects are deleted.
# This means if some of them are already deleted, we just let it
# be.
safe_name = self.safe_name_for_routespec(routespec).lower()
delete_options = client.V1DeleteOptions(grace_period_seconds=0)
delete_endpoint = await self.core_api.delete_namespaced_endpoints(
name=safe_name,
namespace=self.namespace,
body=delete_options,
)
delete_service = await self.core_api.delete_namespaced_service(
name=safe_name,
namespace=self.namespace,
body=delete_options,
)
delete_ingress = await self.extension_api.delete_namespaced_ingress(
name=safe_name,
namespace=self.namespace,
body=delete_options,
grace_period_seconds=0,
)
# This seems like cleanest way to parallelize all three of these while
# also making sure we only ignore the exception when it's a 404.
# The order matters for endpoint & service - deleting the service deletes
# the endpoint in the background. This can be racy however, so we do so
# explicitly ourselves as well. In the future, we can probably try a
# foreground cascading deletion (https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#foreground-cascading-deletion)
# instead, but for now this works well enough.
await asyncio.gather(
self.delete_if_exists('endpoint', safe_name, delete_endpoint),
self.delete_if_exists('service', safe_name, delete_service),
self.delete_if_exists('ingress', safe_name, delete_ingress),
)
async def get_all_routes(self):
if not self.ingress_reflector.first_load_future.done():
await self.ingress_reflector.first_load_future
routes = {
ingress["metadata"]["annotations"]['hub.jupyter.org/proxy-routespec']: {
'routespec': ingress["metadata"]["annotations"][
'hub.jupyter.org/proxy-routespec'
],
'target': ingress["metadata"]["annotations"][
'hub.jupyter.org/proxy-target'
],
'data': json.loads(
ingress["metadata"]["annotations"]['hub.jupyter.org/proxy-data']
),
}
for ingress in self.ingress_reflector.ingresses.values()
}
return routes
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import routes
from trove.backup.service import BackupController
from trove.cluster.service import ClusterController
from trove.common import wsgi
from trove.configuration.service import ConfigurationsController
from trove.configuration.service import ParametersController
from trove.datastore.service import DatastoreController
from trove.flavor.service import FlavorController
from trove.instance.service import InstanceController
from trove.limits.service import LimitsController
from trove.module.service import ModuleController
from trove.versions import VersionsController
from trove.volume_types.service import VolumeTypesController
class API(wsgi.Router):
"""Defines the API routes."""
def __init__(self):
mapper = routes.Mapper()
super(API, self).__init__(mapper)
self._instance_router(mapper)
self._cluster_router(mapper)
self._datastore_router(mapper)
self._flavor_router(mapper)
self._volume_type_router(mapper)
self._versions_router(mapper)
self._limits_router(mapper)
self._backups_router(mapper)
self._configurations_router(mapper)
self._modules_router(mapper)
def _versions_router(self, mapper):
versions_resource = VersionsController().create_resource()
mapper.connect("/",
controller=versions_resource,
action="show",
conditions={'method': ['GET']})
def _datastore_router(self, mapper):
datastore_resource = DatastoreController().create_resource()
mapper.resource("datastore", "/{tenant_id}/datastores",
controller=datastore_resource)
mapper.connect("/{tenant_id}/datastores/{datastore}/versions",
controller=datastore_resource,
action="version_index")
mapper.connect("/{tenant_id}/datastores/{datastore}/versions/{id}",
controller=datastore_resource,
action="version_show")
mapper.connect(
"/{tenant_id}/datastores/{datastore}/versions/"
"{version_id}/flavors",
controller=datastore_resource,
action="list_associated_flavors",
conditions={'method': ['GET']}
)
mapper.connect(
"/{tenant_id}/datastores/{datastore}/versions/"
"{version_id}/volume-types",
controller=datastore_resource,
action="list_associated_volume_types",
conditions={'method': ['GET']}
)
mapper.connect("/{tenant_id}/datastores/versions/{uuid}",
controller=datastore_resource,
action="version_show_by_uuid")
def _instance_router(self, mapper):
instance_resource = InstanceController().create_resource()
mapper.connect("/{tenant_id}/instances",
controller=instance_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances",
controller=instance_resource,
action="create",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/instances/{id}",
controller=instance_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/action",
controller=instance_resource,
action="action",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/instances/{id}",
controller=instance_resource,
action="update",
conditions={'method': ['PUT']})
mapper.connect("/{tenant_id}/instances/{id}",
controller=instance_resource,
action="edit",
conditions={'method': ['PATCH']})
mapper.connect("/{tenant_id}/instances/{id}",
controller=instance_resource,
action="delete",
conditions={'method': ['DELETE']})
mapper.connect("/{tenant_id}/instances/{id}/backups",
controller=instance_resource,
action="backups",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/configuration",
controller=instance_resource,
action="configuration",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/log",
controller=instance_resource,
action="guest_log_list",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/log",
controller=instance_resource,
action="guest_log_action",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/instances/{id}/modules",
controller=instance_resource,
action="module_list",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/instances/{id}/modules",
controller=instance_resource,
action="module_apply",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/instances/{id}/modules/{module_id}",
controller=instance_resource,
action="module_remove",
conditions={'method': ['DELETE']})
def _cluster_router(self, mapper):
cluster_resource = ClusterController().create_resource()
mapper.connect("/{tenant_id}/clusters",
controller=cluster_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/clusters/{id}",
controller=cluster_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/clusters",
controller=cluster_resource,
action="create",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/clusters/{id}",
controller=cluster_resource,
action="action",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/clusters/{cluster_id}/instances/"
"{instance_id}",
controller=cluster_resource,
action="show_instance",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/clusters/{id}",
controller=cluster_resource,
action="delete",
conditions={'method': ['DELETE']})
def _flavor_router(self, mapper):
flavor_resource = FlavorController().create_resource()
mapper.connect("/{tenant_id}/flavors",
controller=flavor_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/flavors/{id}",
controller=flavor_resource,
action="show",
conditions={'method': ['GET']})
def _volume_type_router(self, mapper):
volume_type_resource = VolumeTypesController().create_resource()
mapper.connect("/{tenant_id}/volume-types",
controller=volume_type_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/volume-types/{id}",
controller=volume_type_resource,
action="show",
conditions={'method': ['GET']})
def _limits_router(self, mapper):
limits_resource = LimitsController().create_resource()
mapper.connect("/{tenant_id}/limits",
controller=limits_resource,
action="index",
conditions={'method': ['GET']})
def _backups_router(self, mapper):
backups_resource = BackupController().create_resource()
mapper.connect("/{tenant_id}/backups",
controller=backups_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/backups",
controller=backups_resource,
action="create",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/backups/{id}",
controller=backups_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/backups/{id}",
controller=backups_resource,
action="action",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/backups/{id}",
controller=backups_resource,
action="delete",
conditions={'method': ['DELETE']})
def _modules_router(self, mapper):
modules_resource = ModuleController().create_resource()
mapper.resource("modules", "/{tenant_id}/modules",
controller=modules_resource)
mapper.connect("/{tenant_id}/modules",
controller=modules_resource,
action="index",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/modules",
controller=modules_resource,
action="create",
conditions={'method': ['POST']})
mapper.connect("/{tenant_id}/modules/{id}",
controller=modules_resource,
action="show",
conditions={'method': ['GET']})
mapper.connect("/{tenant_id}/modules/{id}",
controller=modules_resource,
action="update",
conditions={'method': ['PUT']})
mapper.connect("/{tenant_id}/modules/{id}",
controller=modules_resource,
action="delete",
conditions={'method': ['DELETE']})
mapper.connect("/{tenant_id}/modules/{id}/instances",
controller=modules_resource,
action="instances",
conditions={'method': ['GET']})
def _configurations_router(self, mapper):
parameters_resource = ParametersController().create_resource()
path = '/{tenant_id}/datastores/versions/{version}/parameters'
mapper.connect(path,
controller=parameters_resource,
action='index_by_version',
conditions={'method': ['GET']})
path = '/{tenant_id}/datastores/versions/{version}/parameters/{name}'
mapper.connect(path,
controller=parameters_resource,
action='show_by_version',
conditions={'method': ['GET']})
path = '/{tenant_id}/datastores/{datastore}/versions/{id}'
mapper.connect(path + '/parameters',
controller=parameters_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect(path + '/parameters/{name}',
controller=parameters_resource,
action='show',
conditions={'method': ['GET']})
configuration_resource = ConfigurationsController().create_resource()
mapper.connect('/{tenant_id}/configurations',
controller=configuration_resource,
action='index',
conditions={'method': ['GET']})
mapper.connect('/{tenant_id}/configurations',
controller=configuration_resource,
action='create',
conditions={'method': ['POST']})
mapper.connect('/{tenant_id}/configurations/{id}',
controller=configuration_resource,
action='show',
conditions={'method': ['GET']})
mapper.connect('/{tenant_id}/configurations/{id}/instances',
controller=configuration_resource,
action='instances',
conditions={'method': ['GET']})
mapper.connect('/{tenant_id}/configurations/{id}',
controller=configuration_resource,
action='edit',
conditions={'method': ['PATCH']})
mapper.connect('/{tenant_id}/configurations/{id}',
controller=configuration_resource,
action='update',
conditions={'method': ['PUT']})
mapper.connect('/{tenant_id}/configurations/{id}',
controller=configuration_resource,
action='delete',
conditions={'method': ['DELETE']})
def app_factory(global_conf, **local_conf):
return API()
|
|
'''
Kivy framework
==============
Kivy is an open source library for developing multi-touch applications. It is
cross-platform (Linux/OSX/Windows/Android/iOS) and released under
the terms of the `MIT License <https://en.wikipedia.org/wiki/MIT_License>`_.
It comes with native support for many multi-touch input devices, a growing
library of multi-touch aware widgets and hardware accelerated OpenGL drawing.
Kivy is designed to let you focus on building custom and highly interactive
applications as quickly and easily as possible.
With Kivy, you can take full advantage of the dynamic nature of Python. There
are thousands of high-quality, free libraries that can be integrated in your
application. At the same time, performance-critical parts are implemented
using `Cython <http://cython.org/>`_.
See http://kivy.org for more information.
'''
__all__ = (
'require',
'kivy_configure', 'kivy_register_post_configuration',
'kivy_options', 'kivy_base_dir',
'kivy_modules_dir', 'kivy_data_dir', 'kivy_shader_dir',
'kivy_icons_dir', 'kivy_home_dir',
'kivy_config_fn', 'kivy_usermodules_dir',
)
import sys
import shutil
from getopt import getopt, GetoptError
from os import environ, mkdir
from os.path import dirname, join, basename, exists, expanduser
import pkgutil
from kivy.compat import PY2
from kivy.logger import Logger, LOG_LEVELS
from kivy.utils import platform
MAJOR = 1
MINOR = 11
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE and '.dev0' not in __version__:
__version__ += '.dev0'
try:
from kivy.version import __hash__, __date__
__hash__ = __hash__[:7]
except ImportError:
__hash__ = __date__ = ''
# internals for post-configuration
__kivy_post_configuration = []
if platform == 'macosx' and sys.maxsize < 9223372036854775807:
r = '''Unsupported Python version detected!:
Kivy requires a 64 bit version of Python to run on OS X. We strongly
advise you to use the version of Python that is provided by Apple
(don't use ports, fink or homebrew unless you know what you're
doing).
See http://kivy.org/docs/installation/installation-macosx.html for
details.
'''
Logger.critical(r)
def require(version):
'''Require can be used to check the minimum version required to run a Kivy
application. For example, you can start your application code like this::
import kivy
kivy.require('1.0.1')
If a user attempts to run your application with a version of Kivy that is
older than the specified version, an Exception is raised.
The Kivy version string is built like this::
X.Y.Z[-tag[-tagrevision]]
X is the major version
Y is the minor version
Z is the bugfixes revision
The tag is optional, but may be one of 'dev', 'alpha', or 'beta'.
The tagrevision is the revision of the tag.
.. warning::
You must not ask for a version with a tag, except -dev. Asking for a
'dev' version will just warn the user if the current Kivy
version is not a -dev, but it will never raise an exception.
You must not ask for a version with a tagrevision.
'''
def parse_version(version):
# check for tag
tag = None
tagrev = None
if '-' in version:
v = version.split('-')
if len(v) == 2:
version, tag = v
elif len(v) == 3:
version, tag, tagrev = v
else:
raise Exception('Revision format must be X.Y.Z[-tag]')
# check x y z
v = version.split('.')
if len(v) != 3:
if 'dev0' in v:
tag = v.pop()
else:
raise Exception('Revision format must be X.Y.Z[-tag]')
return [int(x) for x in v], tag, tagrev
# user version
revision, tag, tagrev = parse_version(version)
# current version
sysrevision, systag, systagrev = parse_version(__version__)
# ensure that the required version don't contain tag, except dev
if tag not in (None, 'dev'):
raise Exception('Revision format must not have any tag except "dev"')
if tag == 'dev' and systag != 'dev':
Logger.warning('Application requested a -dev version of Kivy. '
'(You have %s, but the application requires %s)' % (
__version__, version))
# not tag rev (-alpha-1, -beta-x) allowed.
if tagrev is not None:
raise Exception('Revision format must not contain any tagrevision')
# finally, checking revision
if sysrevision < revision:
raise Exception('The version of Kivy installed on this system '
'is too old. '
'(You have %s, but the application requires %s)' % (
__version__, version))
def kivy_configure():
'''Call post-configuration of Kivy.
This function must be called if you create the window yourself.
'''
for callback in __kivy_post_configuration:
callback()
def get_includes():
'''Retrieves the directories containing includes needed to build new Cython
modules with Kivy as a dependency. Currently returns the location of the
kivy.graphics module.
.. versionadded:: 1.9.1
'''
root_dir = dirname(__file__)
return [join(root_dir, 'graphics'), join(root_dir, 'tools', 'gles_compat'),
join(root_dir, 'include')]
def kivy_register_post_configuration(callback):
'''Register a function to be called when kivy_configure() is called.
.. warning::
Internal use only.
'''
__kivy_post_configuration.append(callback)
def kivy_usage():
'''Kivy Usage: %s [OPTION...]::
-h, --help
Prints this help message.
-d, --debug
Shows debug log.
-a, --auto-fullscreen
Force 'auto' fullscreen mode (no resolution change).
Uses your display's resolution. This is most likely what you want.
-c, --config section:key[:value]
Set a custom [section] key=value in the configuration object.
-f, --fullscreen
Force running in fullscreen mode.
-k, --fake-fullscreen
Force 'fake' fullscreen mode (no window border/decoration).
Uses the resolution specified by width and height in your config.
-w, --windowed
Force running in a window.
-p, --provider id:provider[,options]
Add an input provider (eg: ccvtable1:tuio,192.168.0.1:3333).
-m mod, --module=mod
Activate a module (use "list" to get a list of available modules).
-r, --rotation
Rotate the window's contents (0, 90, 180, 270).
-s, --save
Save current Kivy configuration.
--size=640x480
Size of window geometry.
--dpi=96
Manually overload the Window DPI (for testing only.)
'''
print(kivy_usage.__doc__ % (basename(sys.argv[0])))
#: Global settings options for kivy
kivy_options = {
'window': ('egl_rpi', 'sdl2', 'pygame', 'sdl', 'x11'),
'text': ('pil', 'sdl2', 'pygame', 'sdlttf'),
'video': (
'gstplayer', 'ffmpeg', 'ffpyplayer', 'null'),
'audio': (
'gstplayer', 'pygame', 'ffpyplayer', 'sdl2',
'avplayer'),
'image': ('tex', 'imageio', 'dds', 'sdl2', 'pygame', 'pil', 'ffpy', 'gif'),
'camera': ('opencv', 'gi', 'avfoundation',
'android', 'picamera'),
'spelling': ('enchant', 'osxappkit', ),
'clipboard': (
'android', 'winctypes', 'xsel', 'xclip', 'dbusklipper', 'nspaste',
'sdl2', 'pygame', 'dummy', 'gtk3', )}
# Read environment
for option in kivy_options:
key = 'KIVY_%s' % option.upper()
if key in environ:
try:
if type(kivy_options[option]) in (list, tuple):
kivy_options[option] = environ[key].split(',')
else:
kivy_options[option] = environ[key].lower() in \
('true', '1', 'yes')
except Exception:
Logger.warning('Core: Wrong value for %s environment key' % key)
Logger.exception('')
# Extract all needed path in kivy
#: Kivy directory
kivy_base_dir = dirname(sys.modules[__name__].__file__)
#: Kivy modules directory
kivy_modules_dir = environ.get('KIVY_MODULES_DIR',
join(kivy_base_dir, 'modules'))
#: Kivy data directory
kivy_data_dir = environ.get('KIVY_DATA_DIR',
join(kivy_base_dir, 'data'))
#: Kivy binary deps directory
kivy_binary_deps_dir = environ.get('KIVY_BINARY_DEPS',
join(kivy_base_dir, 'binary_deps'))
#: Kivy glsl shader directory
kivy_shader_dir = join(kivy_data_dir, 'glsl')
#: Kivy icons config path (don't remove the last '')
kivy_icons_dir = join(kivy_data_dir, 'icons', '')
#: Kivy user-home storage directory
kivy_home_dir = ''
#: Kivy configuration filename
kivy_config_fn = ''
#: Kivy user modules directory
kivy_usermodules_dir = ''
# if there are deps, import them so they can do their magic.
import kivy.deps
_packages = []
for importer, modname, ispkg in pkgutil.iter_modules(kivy.deps.__path__):
if not ispkg:
continue
if modname.startswith('gst'):
_packages.insert(0, (importer, modname))
else:
_packages.append((importer, modname))
for importer, modname in _packages:
try:
importer.find_module(modname).load_module(modname)
except ImportError as e:
Logger.warning("deps: Error importing dependency: {}".format(str(e)))
# Don't go further if we generate documentation
if any(name in sys.argv[0] for name in ('sphinx-build', 'autobuild.py')):
environ['KIVY_DOC'] = '1'
if 'sphinx-build' in sys.argv[0]:
environ['KIVY_DOC_INCLUDE'] = '1'
if any('nosetests' in arg for arg in sys.argv):
environ['KIVY_UNITTEST'] = '1'
if any('pyinstaller' in arg.lower() for arg in sys.argv):
environ['KIVY_PACKAGING'] = '1'
if not environ.get('KIVY_DOC_INCLUDE'):
# Configuration management
if 'KIVY_HOME' in environ:
kivy_home_dir = expanduser(environ['KIVY_HOME'])
else:
user_home_dir = expanduser('~')
if platform == 'android':
user_home_dir = environ['ANDROID_APP_PATH']
elif platform == 'ios':
user_home_dir = join(expanduser('~'), 'Documents')
kivy_home_dir = join(user_home_dir, '.kivy')
if PY2:
kivy_home_dir = kivy_home_dir.decode(sys.getfilesystemencoding())
kivy_config_fn = join(kivy_home_dir, 'config.ini')
kivy_usermodules_dir = join(kivy_home_dir, 'mods')
icon_dir = join(kivy_home_dir, 'icon')
if 'KIVY_NO_CONFIG' not in environ:
if not exists(kivy_home_dir):
mkdir(kivy_home_dir)
if not exists(kivy_usermodules_dir):
mkdir(kivy_usermodules_dir)
if not exists(icon_dir):
try:
shutil.copytree(join(kivy_data_dir, 'logo'), icon_dir)
except:
Logger.exception('Error when copying logo directory')
# configuration
from kivy.config import Config
# Set level of logger
level = LOG_LEVELS.get(Config.get('kivy', 'log_level'))
Logger.setLevel(level=level)
# Can be overrided in command line
if ('KIVY_UNITTEST' not in environ and
'KIVY_PACKAGING' not in environ and
'KIVY_NO_ARGS' not in environ):
# save sys argv, otherwise, gstreamer use it and display help..
sys_argv = sys.argv
sys.argv = sys.argv[:1]
try:
opts, args = getopt(sys_argv[1:], 'hp:fkawFem:sr:dc:', [
'help', 'fullscreen', 'windowed', 'fps', 'event',
'module=', 'save', 'fake-fullscreen', 'auto-fullscreen',
'multiprocessing-fork', 'display=', 'size=', 'rotate=',
'config=', 'debug', 'dpi='])
except GetoptError as err:
Logger.error('Core: %s' % str(err))
kivy_usage()
sys.exit(2)
mp_fork = None
try:
for opt, arg in opts:
if opt == '--multiprocessing-fork':
mp_fork = True
break
except:
pass
# set argv to the non-read args
sys.argv = sys_argv[0:1] + args
if mp_fork is not None:
# Needs to be first opt for support_freeze to work
sys.argv.insert(1, '--multiprocessing-fork')
else:
opts = []
args = []
need_save = False
for opt, arg in opts:
if opt in ('-h', '--help'):
kivy_usage()
sys.exit(0)
elif opt in ('-p', '--provider'):
try:
pid, args = arg.split(':', 1)
Config.set('input', pid, args)
except ValueError:
# when we are doing an executable on macosx with
# pyinstaller, they are passing information with -p. so
# it will conflict with our current -p option. since the
# format is not the same, just avoid it.
pass
elif opt in ('-a', '--auto-fullscreen'):
Config.set('graphics', 'fullscreen', 'auto')
elif opt in ('-c', '--config'):
ol = arg.split(':', 2)
if len(ol) == 2:
Config.set(ol[0], ol[1], '')
elif len(ol) == 3:
Config.set(ol[0], ol[1], ol[2])
else:
raise Exception('Invalid --config value')
if ol[0] == 'kivy' and ol[1] == 'log_level':
level = LOG_LEVELS.get(Config.get('kivy', 'log_level'))
Logger.setLevel(level=level)
elif opt in ('-k', '--fake-fullscreen'):
Config.set('graphics', 'fullscreen', 'fake')
elif opt in ('-f', '--fullscreen'):
Config.set('graphics', 'fullscreen', '1')
elif opt in ('-w', '--windowed'):
Config.set('graphics', 'fullscreen', '0')
elif opt in ('--size', ):
w, h = str(arg).split('x')
Config.set('graphics', 'width', w)
Config.set('graphics', 'height', h)
elif opt in ('--display', ):
Config.set('graphics', 'display', str(arg))
elif opt in ('-m', '--module'):
if str(arg) == 'list':
from kivy.modules import Modules
Modules.usage_list()
sys.exit(0)
args = arg.split(':', 1)
if len(args) == 1:
args += ['']
Config.set('modules', args[0], args[1])
elif opt in ('-s', '--save'):
need_save = True
elif opt in ('-r', '--rotation'):
Config.set('graphics', 'rotation', arg)
elif opt in ('-d', '--debug'):
level = LOG_LEVELS.get('debug')
Logger.setLevel(level=level)
elif opt == '--dpi':
environ['KIVY_DPI'] = arg
if need_save and 'KIVY_NO_CONFIG' not in environ:
try:
with open(kivy_config_fn, 'w') as fd:
Config.write(fd)
except Exception as e:
Logger.exception('Core: error while saving default'
'configuration file:', str(e))
Logger.info('Core: Kivy configuration saved.')
sys.exit(0)
# configure all activated modules
from kivy.modules import Modules
Modules.configure()
# android hooks: force fullscreen and add android touch input provider
if platform in ('android', 'ios'):
from kivy.config import Config
Config.set('graphics', 'fullscreen', 'auto')
Config.remove_section('input')
Config.add_section('input')
if platform == 'android':
Config.set('input', 'androidtouch', 'android')
if RELEASE:
Logger.info('Kivy: v%s' % (__version__))
elif not RELEASE and __hash__ and __date__:
Logger.info('Kivy: v%s, git-%s, %s' % (__version__, __hash__, __date__))
Logger.info('Python: v{}'.format(sys.version))
|
|
"""Test the Opentherm Gateway config flow."""
import asyncio
from pyotgw.vars import OTGW_ABOUT
from serial import SerialException
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.opentherm_gw.const import (
CONF_FLOOR_TEMP,
CONF_PRECISION,
DOMAIN,
)
from homeassistant.const import CONF_DEVICE, CONF_ID, CONF_NAME, PRECISION_HALVES
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_form_user(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.opentherm_gw.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.opentherm_gw.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"pyotgw.pyotgw.connect",
return_value={OTGW_ABOUT: "OpenTherm Gateway 4.2.5"},
) as mock_pyotgw_connect, patch(
"pyotgw.pyotgw.disconnect", return_value=None
) as mock_pyotgw_disconnect:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_NAME: "Test Entry 1", CONF_DEVICE: "/dev/ttyUSB0"}
)
assert result2["type"] == "create_entry"
assert result2["title"] == "Test Entry 1"
assert result2["data"] == {
CONF_NAME: "Test Entry 1",
CONF_DEVICE: "/dev/ttyUSB0",
CONF_ID: "test_entry_1",
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_pyotgw_connect.mock_calls) == 1
assert len(mock_pyotgw_disconnect.mock_calls) == 1
async def test_form_import(hass):
"""Test import from existing config."""
await setup.async_setup_component(hass, "persistent_notification", {})
with patch(
"homeassistant.components.opentherm_gw.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.opentherm_gw.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"pyotgw.pyotgw.connect",
return_value={OTGW_ABOUT: "OpenTherm Gateway 4.2.5"},
) as mock_pyotgw_connect, patch(
"pyotgw.pyotgw.disconnect", return_value=None
) as mock_pyotgw_disconnect:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_ID: "legacy_gateway", CONF_DEVICE: "/dev/ttyUSB1"},
)
assert result["type"] == "create_entry"
assert result["title"] == "legacy_gateway"
assert result["data"] == {
CONF_NAME: "legacy_gateway",
CONF_DEVICE: "/dev/ttyUSB1",
CONF_ID: "legacy_gateway",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_pyotgw_connect.mock_calls) == 1
assert len(mock_pyotgw_disconnect.mock_calls) == 1
async def test_form_duplicate_entries(hass):
"""Test duplicate device or id errors."""
flow1 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
flow2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
flow3 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.opentherm_gw.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.opentherm_gw.async_setup_entry",
return_value=True,
) as mock_setup_entry, patch(
"pyotgw.pyotgw.connect",
return_value={OTGW_ABOUT: "OpenTherm Gateway 4.2.5"},
) as mock_pyotgw_connect, patch(
"pyotgw.pyotgw.disconnect", return_value=None
) as mock_pyotgw_disconnect:
result1 = await hass.config_entries.flow.async_configure(
flow1["flow_id"], {CONF_NAME: "Test Entry 1", CONF_DEVICE: "/dev/ttyUSB0"}
)
result2 = await hass.config_entries.flow.async_configure(
flow2["flow_id"], {CONF_NAME: "Test Entry 1", CONF_DEVICE: "/dev/ttyUSB1"}
)
result3 = await hass.config_entries.flow.async_configure(
flow3["flow_id"], {CONF_NAME: "Test Entry 2", CONF_DEVICE: "/dev/ttyUSB0"}
)
assert result1["type"] == "create_entry"
assert result2["type"] == "form"
assert result2["errors"] == {"base": "id_exists"}
assert result3["type"] == "form"
assert result3["errors"] == {"base": "already_configured"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_pyotgw_connect.mock_calls) == 1
assert len(mock_pyotgw_disconnect.mock_calls) == 1
async def test_form_connection_timeout(hass):
"""Test we handle connection timeout."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"pyotgw.pyotgw.connect", side_effect=(asyncio.TimeoutError)
) as mock_connect:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_NAME: "Test Entry 1", CONF_DEVICE: "socket://192.0.2.254:1234"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "timeout"}
assert len(mock_connect.mock_calls) == 1
async def test_form_connection_error(hass):
"""Test we handle serial connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("pyotgw.pyotgw.connect", side_effect=(SerialException)) as mock_connect:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_NAME: "Test Entry 1", CONF_DEVICE: "/dev/ttyUSB0"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "serial_error"}
assert len(mock_connect.mock_calls) == 1
async def test_options_form(hass):
"""Test the options form."""
entry = MockConfigEntry(
domain=DOMAIN,
title="Mock Gateway",
data={
CONF_NAME: "Mock Gateway",
CONF_DEVICE: "/dev/null",
CONF_ID: "mock_gateway",
},
options={},
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_FLOOR_TEMP: True, CONF_PRECISION: PRECISION_HALVES},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_PRECISION] == PRECISION_HALVES
assert result["data"][CONF_FLOOR_TEMP] is True
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_PRECISION: 0}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_PRECISION] is None
assert result["data"][CONF_FLOOR_TEMP] is True
|
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from __future__ import print_function
from rez.config import config
from rez.vendor.memcache.memcache import Client as Client_, \
SERVER_MAX_KEY_LENGTH, __version__ as memcache_client_version
from rez.utils import py23
from threading import local
from contextlib import contextmanager
from functools import update_wrapper
from inspect import isgeneratorfunction
from hashlib import md5
from uuid import uuid4
from rez.vendor.six import six
basestring = six.string_types[0]
# this version should be changed if and when the caching interface changes
cache_interface_version = 2
class Client(object):
"""Wrapper for memcache.Client instance.
Adds the features:
- unlimited key length;
- hard/soft flushing;
- ability to cache None.
"""
class _Miss(object):
def __nonzero__(self):
return False
__bool__ = __nonzero__ # py3 compat
miss = _Miss()
logger = config.debug_printer("memcache")
def __init__(self, servers, debug=False):
"""Create a memcached client.
Args:
servers (str or list of str): Server URI(s), eg '127.0.0.1:11211'.
debug (bool): If True, quasi human readable keys are used. This helps
debugging - run 'memcached -vv' in the foreground to see the keys
being get/set/stored.
"""
self.servers = [servers] if isinstance(servers, basestring) else servers
self.key_hasher = self._debug_key_hash if debug else self._key_hash
self._client = None
self.debug = debug
self.current = ''
def __nonzero__(self):
return bool(self.servers)
__bool__ = __nonzero__ # py3 compat
@property
def client(self):
"""Get the native memcache client.
Returns:
`memcache.Client` instance.
"""
if self._client is None:
self._client = Client_(self.servers)
return self._client
def test_servers(self):
"""Test that memcached servers are servicing requests.
Returns:
set: URIs of servers that are responding.
"""
responders = set()
for server in self.servers:
client = Client_([server])
key = uuid4().hex
client.set(key, 1)
if client.get(key) == 1:
responders.add(server)
return responders
def set(self, key, val, time=0, min_compress_len=0):
"""See memcache.Client."""
if not self.servers:
return
key = self._qualified_key(key)
hashed_key = self.key_hasher(key)
val = (key, val)
self.client.set(key=hashed_key,
val=val,
time=time,
min_compress_len=min_compress_len)
self.logger("SET: %s", key)
def get(self, key):
"""See memcache.Client.
Returns:
object: A value if cached, else `self.miss`. Note that this differs
from `memcache.Client`, which returns None on cache miss, and thus
cannot cache the value None itself.
"""
if not self.servers:
return self.miss
key = self._qualified_key(key)
hashed_key = self.key_hasher(key)
entry = self.client.get(hashed_key)
if isinstance(entry, tuple) and len(entry) == 2:
key_, result = entry
if key_ == key:
self.logger("HIT: %s", key)
return result
self.logger("MISS: %s", key)
return self.miss
def delete(self, key):
"""See memcache.Client."""
if self.servers:
key = self._qualified_key(key)
hashed_key = self.key_hasher(key)
self.client.delete(hashed_key)
def flush(self, hard=False):
"""Drop existing entries from the cache.
Args:
hard (bool): If True, all current entries are flushed from the
server(s), which affects all users. If False, only the local
process is affected.
"""
if not self.servers:
return
if hard:
self.client.flush_all()
self.reset_stats()
else:
from uuid import uuid4
tag = uuid4().hex
if self.debug:
tag = "flushed" + tag
self.current = tag
def get_stats(self):
"""Get server statistics.
Returns:
A list of tuples (server_identifier, stats_dictionary).
"""
return self._get_stats()
def reset_stats(self):
"""Reset the server stats."""
self._get_stats("reset")
def disconnect(self):
"""Disconnect from server(s). Behaviour is undefined after this call."""
if self.servers and self._client:
self._client.disconnect_all()
# print("Disconnected memcached client %s" % str(self))
def _qualified_key(self, key):
"""
Qualify cache key so that:
* changes to schemas don't break compatibility (cache_interface_version)
* we're shielded from potential compatibility bugs in newer versions of
python-memcached
"""
return "%s:%s:%s:%s" % (
memcache_client_version,
cache_interface_version,
self.current,
key
)
def _get_stats(self, stat_args=None):
return self.client.get_stats(stat_args=stat_args)
@classmethod
def _key_hash(cls, key):
return md5(key.encode("utf-8")).hexdigest()
@classmethod
def _debug_key_hash(cls, key):
import re
h = cls._key_hash(key)[:16]
value = "%s:%s" % (h, key)
value = value[:SERVER_MAX_KEY_LENGTH]
value = re.sub("[^0-9a-zA-Z]+", '_', value)
return value
class _ScopedInstanceManager(local):
def __init__(self):
self.clients = {}
def acquire(self, servers, debug=False):
key = (tuple(servers or []), debug)
entry = self.clients.get(key)
if entry:
entry[1] += 1
return entry[0], key
else:
client = Client(servers, debug=debug)
self.clients[key] = [client, 1]
return client, key
def release(self, key):
entry = self.clients.get(key)
assert entry
entry[1] -= 1
if not entry[1]:
client = entry[0]
del self.clients[key]
client.disconnect()
scoped_instance_manager = _ScopedInstanceManager()
@contextmanager
def memcached_client(servers=config.memcached_uri, debug=config.debug_memcache):
"""Get a shared memcached instance.
This function shares the same memcached instance across nested invocations.
This is done so that memcached connections can be kept to a minimum, but at
the same time unnecessary extra reconnections are avoided. Typically an
initial scope (using 'with' construct) is made around parts of code that hit
the cache server many times - such as a resolve, or executing a context. On
exit of the topmost scope, the memcached client is disconnected.
Returns:
`Client`: Memcached instance.
"""
key = None
try:
client, key = scoped_instance_manager.acquire(servers, debug=debug)
yield client
finally:
if key:
scoped_instance_manager.release(key)
def pool_memcached_connections(func):
"""Function decorator to pool memcached connections.
Use this to wrap functions that might make multiple calls to memcached. This
will cause a single memcached client to be shared for all connections.
"""
if isgeneratorfunction(func):
def wrapper(*nargs, **kwargs):
with memcached_client():
for result in func(*nargs, **kwargs):
yield result
else:
def wrapper(*nargs, **kwargs):
with memcached_client():
return func(*nargs, **kwargs)
return update_wrapper(wrapper, func)
def memcached(servers, key=None, from_cache=None, to_cache=None, time=0,
min_compress_len=0, debug=False):
"""memcached memoization function decorator.
The wrapped function is expected to return a value that is stored to a
memcached server, first translated by `to_cache` if provided. In the event
of a cache hit, the data is translated by `from_cache` if provided, before
being returned. If you do not want a result to be cached, wrap the return
value of your function in a `DoNotCache` object.
Example:
@memcached('127.0.0.1:11211')
def _listdir(path):
return os.path.listdir(path)
Note:
If using the default key function, ensure that repr() is implemented on
all your arguments and that they are hashable.
Note:
`from_cache` and `to_cache` both accept the value as first parameter,
then the target function's arguments follow.
Args:
servers (str or list of str): memcached server uri(s), eg '127.0.0.1:11211'.
This arg can be None also, in which case memcaching is disabled.
key (callable, optional): Function that, given the target function's args,
returns the string key to use in memcached.
from_cache (callable, optional): If provided, and a cache hit occurs, the
cached value will be translated by this function before being returned.
to_cache (callable, optional): If provided, and a cache miss occurs, the
function's return value will be translated by this function before
being cached.
time (int): Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
min_compress_len (int): The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
debug (bool): If True, memcache keys are kept human readable, so you can
read them if running a foreground memcached proc with 'memcached -vv'.
However this increases chances of key clashes so should not be left
turned on.
"""
def default_key(func, *nargs, **kwargs):
parts = [func.__module__]
argnames = py23.get_function_arg_names(func)
if argnames:
if argnames[0] == "cls":
cls_ = nargs[0]
parts.append(cls_.__name__)
nargs = nargs[1:]
elif argnames[0] == "self":
cls_ = nargs[0].__class__
parts.append(cls_.__name__)
nargs = nargs[1:]
parts.append(func.__name__)
value = ('.'.join(parts), nargs, tuple(sorted(kwargs.items())))
# make sure key is hashable. We don't strictly need it to be, but this
# is a way of hopefully avoiding object types that are not ordered (these
# would give an unreliable key). If you need to key on unhashable args,
# you should provide your own `key` functor.
#
_ = hash(value) # noqa
return repr(value)
def identity(value, *nargs, **kwargs):
return value
from_cache = from_cache or identity
to_cache = to_cache or identity
def decorator(func):
if servers:
def wrapper(*nargs, **kwargs):
with memcached_client(servers, debug=debug) as client:
if key:
cache_key = key(*nargs, **kwargs)
else:
cache_key = default_key(func, *nargs, **kwargs)
# get
result = client.get(cache_key)
if result is not client.miss:
return from_cache(result, *nargs, **kwargs)
# cache miss - run target function
result = func(*nargs, **kwargs)
if isinstance(result, DoNotCache):
return result.result
# store
cache_result = to_cache(result, *nargs, **kwargs)
client.set(key=cache_key,
val=cache_result,
time=time,
min_compress_len=min_compress_len)
return result
else:
def wrapper(*nargs, **kwargs):
result = func(*nargs, **kwargs)
if isinstance(result, DoNotCache):
return result.result
return result
def forget():
"""Forget entries in the cache.
Note that this does not delete entries from a memcached server - that
would be slow and error-prone. Calling this function only ensures
that entries set by the current process will no longer be seen during
this process.
"""
with memcached_client(servers, debug=debug) as client:
client.flush()
wrapper.forget = forget
wrapper.__wrapped__ = func
return update_wrapper(wrapper, func)
return decorator
class DoNotCache(object):
def __init__(self, result):
self.result = result
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
server_name: str,
service_objective_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2014-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/serviceObjectives/{serviceObjectiveName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"serviceObjectiveName": _SERIALIZER.url("service_objective_name", service_objective_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_server_request(
subscription_id: str,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2014-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/serviceObjectives')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ServiceObjectivesOperations(object):
"""ServiceObjectivesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
service_objective_name: str,
**kwargs: Any
) -> "_models.ServiceObjective":
"""Gets a database service objective.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param service_objective_name: The name of the service objective to retrieve.
:type service_objective_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceObjective, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ServiceObjective
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceObjective"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
service_objective_name=service_objective_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceObjective', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/serviceObjectives/{serviceObjectiveName}'} # type: ignore
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> Iterable["_models.ServiceObjectiveListResult"]:
"""Returns database service objectives.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceObjectiveListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.ServiceObjectiveListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceObjectiveListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
server_name=server_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ServiceObjectiveListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/serviceObjectives'} # type: ignore
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that act as activation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.LeakyReLU')
class LeakyReLU(Layer):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active:
```
f(x) = alpha * x if x < 0
f(x) = x if x >= 0
```
Usage:
>>> layer = tf.keras.layers.LeakyReLU()
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.9, -0.3, 0.0, 2.0]
>>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-0.3, -0.1, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha: Float >= 0. Negative slope coefficient. Default to 0.3.
"""
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.relu(inputs, alpha=self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.PReLU')
class PReLU(Layer):
"""Parametric Rectified Linear Unit.
It follows:
```
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
```
where `alpha` is a learned array with the same shape as x.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha_initializer: Initializer function for the weights.
alpha_regularizer: Regularizer for the weights.
alpha_constraint: Constraint for the weights.
shared_axes: The axes along which to share learnable
parameters for the activation function.
For example, if the incoming feature maps
are from a 2D convolution
with output shape `(batch, height, width, channels)`,
and you wish to share parameters across space
so that each filter only has one set of parameters,
set `shared_axes=[1, 2]`.
"""
def __init__(self,
alpha_initializer='zeros',
alpha_regularizer=None,
alpha_constraint=None,
shared_axes=None,
**kwargs):
super(PReLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha_initializer = initializers.get(alpha_initializer)
self.alpha_regularizer = regularizers.get(alpha_regularizer)
self.alpha_constraint = constraints.get(alpha_constraint)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
@tf_utils.shape_type_conversion
def build(self, input_shape):
param_shape = list(input_shape[1:])
if self.shared_axes is not None:
for i in self.shared_axes:
param_shape[i - 1] = 1
self.alpha = self.add_weight(
shape=param_shape,
name='alpha',
initializer=self.alpha_initializer,
regularizer=self.alpha_regularizer,
constraint=self.alpha_constraint)
# Set input spec
axes = {}
if self.shared_axes:
for i in range(1, len(input_shape)):
if i not in self.shared_axes:
axes[i] = input_shape[i]
self.input_spec = InputSpec(ndim=len(input_shape), axes=axes)
self.built = True
def call(self, inputs):
pos = K.relu(inputs)
neg = -self.alpha * K.relu(-inputs)
return pos + neg
def get_config(self):
config = {
'alpha_initializer': initializers.serialize(self.alpha_initializer),
'alpha_regularizer': regularizers.serialize(self.alpha_regularizer),
'alpha_constraint': constraints.serialize(self.alpha_constraint),
'shared_axes': self.shared_axes
}
base_config = super(PReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ELU')
class ELU(Layer):
"""Exponential Linear Unit.
It follows:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
alpha: Scale for the negative factor.
"""
def __init__(self, alpha=1.0, **kwargs):
super(ELU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.elu(inputs, self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(ELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ThresholdedReLU')
class ThresholdedReLU(Layer):
"""Thresholded Rectified Linear Unit.
It follows:
```
f(x) = x for x > theta
f(x) = 0 otherwise`
```
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
theta: Float >= 0. Threshold location of activation.
"""
def __init__(self, theta=1.0, **kwargs):
super(ThresholdedReLU, self).__init__(**kwargs)
self.supports_masking = True
self.theta = K.cast_to_floatx(theta)
def call(self, inputs):
theta = math_ops.cast(self.theta, inputs.dtype)
return inputs * math_ops.cast(math_ops.greater(inputs, theta), inputs.dtype)
def get_config(self):
config = {'theta': float(self.theta)}
base_config = super(ThresholdedReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
def _large_compatible_negative(tensor_type):
"""Large negative number as Tensor.
This function is necessary because the standard value for epsilon
in this module (-1e9) cannot be represented using tf.float16
Args:
tensor_type: a dtype to determine the type.
Returns:
a large negative number.
"""
if tensor_type == dtypes.float16:
return dtypes.float16.min
return -1e9
@keras_export('keras.layers.Softmax')
class Softmax(Layer):
"""Softmax activation function.
Example without mask:
>>> inp = np.asarray([1., 2., 1.])
>>> layer = tf.keras.layers.Softmax()
>>> layer(inp).numpy()
array([0.21194157, 0.5761169 , 0.21194157], dtype=float32)
>>> mask = np.asarray([True, False, True], dtype=bool)
>>> layer(inp, mask).numpy()
array([0.5, 0. , 0.5], dtype=float32)
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
Call arguments:
inputs: The inputs, or logits to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. Defaults to `None`.
Returns:
softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super(Softmax, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs, mask=None):
if mask is not None:
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -1e.9 for masked positions.
adder = (1.0 - math_ops.cast(mask, inputs.dtype)) * (
_large_compatible_negative(inputs.dtype))
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return math_ops.exp(inputs - math_ops.reduce_logsumexp(
inputs, axis=self.axis, keepdims=True))
else:
return K.softmax(inputs, axis=self.axis[0])
return K.softmax(inputs, axis=self.axis)
def get_config(self):
config = {'axis': self.axis}
base_config = super(Softmax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.ReLU')
class ReLU(Layer):
"""Rectified Linear Unit activation function.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
```
f(x) = max_value if x >= max_value
f(x) = x if threshold <= x < max_value
f(x) = negative_slope * (x - threshold) otherwise
```
Usage:
>>> layer = tf.keras.layers.ReLU()
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
>>> layer = tf.keras.layers.ReLU(max_value=1.0)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 1.0]
>>> layer = tf.keras.layers.ReLU(negative_slope=1.0)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[-3.0, -1.0, 0.0, 2.0]
>>> layer = tf.keras.layers.ReLU(threshold=1.5)
>>> output = layer([-3.0, -1.0, 1.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as the input.
Arguments:
max_value: Float >= 0. Maximum activation value. Default to None, which
means unlimited.
negative_slope: Float >= 0. Negative slope coefficient. Default to 0.
threshold: Float. Threshold value for thresholded activation. Default to 0.
"""
def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs):
super(ReLU, self).__init__(**kwargs)
if max_value is not None and max_value < 0.:
raise ValueError('max_value of Relu layer '
'cannot be negative value: ' + str(max_value))
if negative_slope < 0.:
raise ValueError('negative_slope of Relu layer '
'cannot be negative value: ' + str(negative_slope))
if threshold is None:
raise ValueError('threshold of Relu layer '
'cannot be None. Required a float')
self.support_masking = True
if max_value is not None:
max_value = K.cast_to_floatx(max_value)
self.max_value = max_value
self.negative_slope = K.cast_to_floatx(negative_slope)
self.threshold = K.cast_to_floatx(threshold)
def call(self, inputs):
# alpha is used for leaky relu slope in activations instead of
# negative_slope.
return K.relu(inputs,
alpha=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold)
def get_config(self):
config = {
'max_value': self.max_value,
'negative_slope': self.negative_slope,
'threshold': self.threshold
}
base_config = super(ReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
|
|
"""
A place for internal code
Some things are more easily handled Python.
"""
import ast
import re
import sys
import platform
from .multiarray import dtype, array, ndarray
try:
import ctypes
except ImportError:
ctypes = None
IS_PYPY = platform.python_implementation() == 'PyPy'
if sys.byteorder == 'little':
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict, align):
allfields = []
for fname, obj in adict.items():
n = len(obj)
if not isinstance(obj, tuple) or n not in (2, 3):
raise ValueError("entry not a 2- or 3- tuple")
if n > 2 and obj[2] == fname:
continue
num = int(obj[1])
if num < 0:
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if n > 2:
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if len(res) > 2:
titles.append(res[2])
else:
titles.append(None)
return dtype({"names": names,
"formats": formats,
"offsets": offsets,
"titles": titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('', f'|V{num}'))
offset += num
elif field[1] < offset:
raise ValueError(
"dtype.descr is not defined for types with overlapping or "
"out-of-order fields")
if len(field) > 3:
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
result.append(('', f'|V{num}'))
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibility.
def _reconstruct(subtype, shape, dtype):
return ndarray.__new__(subtype, shape, dtype)
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(r'(?P<order1>[<>|=]?)'
r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
r'(?P<order2>[<>|=]?)'
r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
sep_re = re.compile(r'\s*,\s*')
space_re = re.compile(r'\s+$')
# astr is a string (perhaps comma separated)
_convorder = {'=': _nbo}
def _commastring(astr):
startindex = 0
result = []
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError(
f'format number {len(result)+1} of "{astr}" is not recognized'
) from None
startindex = mo.end()
# Separator or ending padding
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
if order2 == '':
order = order1
elif order1 == '':
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError(
'inconsistent byte-order specification %s and %s' %
(order1, order2))
order = order1
if order in ('|', '=', _nbo):
order = ''
dtype = order + dtype
if (repeats == ''):
newitem = dtype
else:
newitem = (dtype, ast.literal_eval(repeats))
result.append(newitem)
return result
class dummy_ctype:
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
def __ne__(self, other):
return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
if ctypes is None:
import numpy as np
val = dummy_ctype(np.intp)
else:
char = dtype('p').char
if char == 'i':
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes:
def cast(self, num, obj):
return num.value
class c_void_p:
def __init__(self, ptr):
self.value = ptr
class _ctypes:
def __init__(self, array, ptr=None):
self._arr = array
if ctypes:
self._ctypes = ctypes
self._data = self._ctypes.c_void_p(ptr)
else:
# fake a pointer-like object that holds onto the reference
self._ctypes = _missing_ctypes()
self._data = self._ctypes.c_void_p(ptr)
self._data._objects = array
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
"""
Return the data pointer cast to a particular c-types object.
For example, calling ``self._as_parameter_`` is equivalent to
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
The returned pointer will keep a reference to the array.
"""
# _ctypes.cast function causes a circular reference of self._data in
# self._data._objects. Attributes of self._data cannot be released
# until gc.collect is called. Make a copy of the pointer first then let
# it hold the array reference. This is a workaround to circumvent the
# CPython bug https://bugs.python.org/issue12836
ptr = self._ctypes.cast(self._data, obj)
ptr._arr = self._arr
return ptr
def shape_as(self, obj):
"""
Return the shape tuple as an array of some other c-types
type. For example: ``self.shape_as(ctypes.c_short)``.
"""
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
"""
Return the strides tuple as an array of some other
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
"""
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
@property
def data(self):
"""
A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as ``self._array_interface_['data'][0]``.
Note that unlike ``data_as``, a reference will not be kept to the array:
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
pointer to a deallocated array, and should be spelt
``(a + b).ctypes.data_as(ctypes.c_void_p)``
"""
return self._data.value
@property
def shape(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to ``dtype('p')`` on this
platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or
`ctypes.c_longlong` depending on the platform.
The c_intp type is defined accordingly in `numpy.ctypeslib`.
The ctypes array contains the shape of the underlying array.
"""
return self.shape_as(_getintp_ctype())
@property
def strides(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
"""
return self.strides_as(_getintp_ctype())
@property
def _as_parameter_(self):
"""
Overrides the ctypes semi-magic method
Enables `c_func(some_array.ctypes)`
"""
return self.data_as(ctypes.c_void_p)
# kept for compatibility
get_data = data.fget
get_shape = shape.fget
get_strides = strides.fget
get_as_parameter = _as_parameter_.fget
def _newnames(datatype, order):
"""
Given a datatype and an order object, return a new names tuple, with the
order indicated
"""
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
if name in seen:
raise ValueError(f"duplicate field name: {name}") from None
else:
raise ValueError(f"unknown field name: {name}") from None
seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError(f"unsupported order value: {order}")
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
Parameters
----------
ary : ndarray
Structured array from which to remove padding bytes
Returns
-------
ary_copy : ndarray
Copy of ary with padding bytes removed
"""
dt = ary.dtype
copy_dtype = {'names': dt.names,
'formats': [dt.fields[name][0] for name in dt.names]}
return array(ary, dtype=copy_dtype, copy=True)
def _getfield_is_safe(oldtype, newtype, offset):
""" Checks safety of getfield for object arrays.
As in _view_is_safe, we need to check that memory containing objects is not
reinterpreted as a non-object datatype and vice versa.
Parameters
----------
oldtype : data-type
Data type of the original ndarray.
newtype : data-type
Data type of the field being accessed by ndarray.getfield
offset : int
Offset of the field being accessed by ndarray.getfield
Raises
------
TypeError
If the field access is invalid
"""
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
if oldtype.names is not None:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
return
raise TypeError("Cannot get/set field of an object array")
return
def _view_is_safe(oldtype, newtype):
""" Checks safety of a view involving object arrays, for example when
doing::
np.zeros(10, dtype=oldtype).view(newtype)
Parameters
----------
oldtype : data-type
Data type of original ndarray
newtype : data-type
Data type of the view
Raises
------
TypeError
If the new type is incompatible with the old type.
"""
# if the types are equivalent, there is no problem.
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
if oldtype == newtype:
return
if newtype.hasobject or oldtype.hasobject:
raise TypeError("Cannot change data-type for object array.")
return
# Given a string containing a PEP 3118 format specifier,
# construct a NumPy dtype
_pep3118_native_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
_pep3118_unsupported_map = {
'u': 'UCS-2 strings',
'&': 'pointers',
't': 'bitfields',
'X': 'function pointers',
}
class _Stream:
def __init__(self, s):
self.s = s
self.byteorder = '@'
def advance(self, n):
res = self.s[:n]
self.s = self.s[n:]
return res
def consume(self, c):
if self.s[:len(c)] == c:
self.advance(len(c))
return True
return False
def consume_until(self, c):
if callable(c):
i = 0
while i < len(self.s) and not c(self.s[i]):
i = i + 1
return self.advance(i)
else:
i = self.s.index(c)
res = self.advance(i)
self.advance(len(c))
return res
@property
def next(self):
return self.s[0]
def __bool__(self):
return bool(self.s)
def _dtype_from_pep3118(spec):
stream = _Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
def __dtype_from_pep3118(stream, is_subdtype):
field_spec = dict(
names=[],
formats=[],
offsets=[],
itemsize=0
)
offset = 0
common_alignment = 1
is_padding = False
# Parse spec
while stream:
value = None
# End of structure, bail out to upper level
if stream.consume('}'):
break
# Sub-arrays (1)
shape = None
if stream.consume('('):
shape = stream.consume_until(')')
shape = tuple(map(int, shape.split(',')))
# Byte order
if stream.next in ('@', '=', '<', '>', '^', '!'):
byteorder = stream.advance(1)
if byteorder == '!':
byteorder = '>'
stream.byteorder = byteorder
# Byte order characters also control native vs. standard type sizes
if stream.byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
if itemsize_str:
itemsize = int(itemsize_str)
else:
itemsize = 1
# Data types
is_padding = False
if stream.consume('T{'):
value, align = __dtype_from_pep3118(
stream, is_subdtype=True)
elif stream.next in type_map_chars:
if stream.next == 'Z':
typechar = stream.advance(2)
else:
typechar = stream.advance(1)
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
elif stream.next in _pep3118_unsupported_map:
desc = _pep3118_unsupported_map[stream.next]
raise NotImplementedError(
"Unrepresentable PEP 3118 data type {!r} ({})"
.format(stream.next, desc))
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if stream.byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = _lcm(align, common_alignment)
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
if stream.consume(':'):
name = stream.consume_until(':')
else:
name = None
if not (is_padding and name is None):
if name is not None and name in field_spec['names']:
raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
field_spec['names'].append(name)
field_spec['formats'].append(value)
field_spec['offsets'].append(offset)
offset += value.itemsize
offset += extra_offset
field_spec['itemsize'] = offset
# extra final padding for aligned types
if stream.byteorder == '@':
field_spec['itemsize'] += (-offset) % common_alignment
# Check if this was a simple 1-item type, and unwrap it
if (field_spec['names'] == [None]
and field_spec['offsets'][0] == 0
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
and not is_subdtype):
ret = field_spec['formats'][0]
else:
_fix_names(field_spec)
ret = dtype(field_spec)
# Finished
return ret, common_alignment
def _fix_names(field_spec):
""" Replace names which are None with the next unused f%d name """
names = field_spec['names']
for i, name in enumerate(names):
if name is not None:
continue
j = 0
while True:
name = f'f{j}'
if name not in names:
break
j = j + 1
names[i] = name
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
if value.fields is None:
field_spec = dict(
names=['f0'],
formats=[value],
offsets=[0],
itemsize=value.itemsize
)
else:
fields = value.fields
names = value.names
field_spec = dict(
names=names,
formats=[fields[name][0] for name in names],
offsets=[fields[name][1] for name in names],
itemsize=value.itemsize
)
field_spec['itemsize'] += padding
return dtype(field_spec)
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a % b
return a
def _lcm(a, b):
return a // _gcd(a, b) * b
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
""" Format the error message for when __array_ufunc__ gives up. """
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
['{}={!r}'.format(k, v)
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
return ('operand type(s) all returned NotImplemented from '
'__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
def array_function_errmsg_formatter(public_api, types):
""" Format the error message for when __array_ufunc__ gives up. """
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
return ("no implementation found for '{}' on types that implement "
'__array_function__: {}'.format(func_name, list(types)))
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
This is used to construct the first line of the docstring
"""
# input arguments are simple
if ufunc.nin == 1:
in_args = 'x'
else:
in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
# output arguments are both keyword or positional
if ufunc.nout == 0:
out_args = ', /, out=()'
elif ufunc.nout == 1:
out_args = ', /, out=None'
else:
out_args = '[, {positional}], / [, out={default}]'.format(
positional=', '.join(
'out{}'.format(i+1) for i in range(ufunc.nout)),
default=repr((None,)*ufunc.nout)
)
# keyword only args depend on whether this is a gufunc
kwargs = (
", casting='same_kind'"
", order='K'"
", dtype=None"
", subok=True"
"[, signature"
", extobj]"
)
if ufunc.signature is None:
kwargs = ", where=True" + kwargs
# join all the parts together
return '{name}({in_args}{out_args}, *{kwargs})'.format(
name=ufunc.__name__,
in_args=in_args,
out_args=out_args,
kwargs=kwargs
)
def npy_ctypes_check(cls):
# determine if a class comes from ctypes, in order to work around
# a bug in the buffer protocol for those objects, bpo-10746
try:
# ctypes class are new-style, so have an __mro__. This probably fails
# for ctypes classes with multiple inheritance.
if IS_PYPY:
# (..., _ctypes.basics._CData, Bufferable, object)
ctype_base = cls.__mro__[-3]
else:
# # (..., _ctypes._CData, object)
ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
return '_ctypes' in ctype_base.__module__
except Exception:
return False
class recursive:
'''
A decorator class for recursive nested functions.
Naive recursive nested functions hold a reference to themselves:
def outer(*args):
def stringify_leaky(arg0, *arg1):
if len(arg1) > 0:
return stringify_leaky(*arg1) # <- HERE
return str(arg0)
stringify_leaky(*args)
This design pattern creates a reference cycle that is difficult for a
garbage collector to resolve. The decorator class prevents the
cycle by passing the nested function in as an argument `self`:
def outer(*args):
@recursive
def stringify(self, arg0, *arg1):
if len(arg1) > 0:
return self(*arg1)
return str(arg0)
stringify(*args)
'''
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(self, *args, **kwargs)
|
|
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import (
datetime,
timedelta,
)
import operator
import numpy as np
import pytest
from pandas.errors import OutOfBoundsTimedelta
import pandas as pd
from pandas import (
NaT,
Timedelta,
Timestamp,
offsets,
)
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
def test_td_add_timestamp_overflow(self):
msg = "int too (large|big) to convert"
with pytest.raises(OverflowError, match=msg):
Timestamp("1700-01-01") + Timedelta(13 * 19999, unit="D")
with pytest.raises(OutOfBoundsTimedelta, match=msg):
Timestamp("1700-01-01") + timedelta(days=13 * 19999)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
msg = "unsupported operand type"
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError, match=msg):
td + other
with pytest.raises(TypeError, match=msg):
other + td
with pytest.raises(TypeError, match=msg):
td - other
with pytest.raises(TypeError, match=msg):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
msg = r"unsupported operand type\(s\) for \-: 'Timedelta' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = "|".join(
[
(
r"unsupported operand type\(s\) for \+: 'numpy.ndarray' "
"and 'Timedelta'"
),
"Concatenation operation is not implemented for NumPy arrays",
]
)
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types "
r"dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
typs = "|".join(["numpy.timedelta64", "NaTType", "Timedelta"])
msg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'",
r"ufunc '?multiply'? cannot use operands with types",
]
)
with pytest.raises(TypeError, match=msg):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError, match=msg):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_td64_non_nano(self):
# truediv
td = Timedelta("1 days 2 hours 3 ns")
result = td / np.timedelta64(1, "D")
assert result == td.value / (86400 * 10 ** 9)
result = td / np.timedelta64(1, "s")
assert result == td.value / 10 ** 9
result = td / np.timedelta64(1, "ns")
assert result == td.value
# floordiv
td = Timedelta("1 days 2 hours 3 ns")
result = td // np.timedelta64(1, "D")
assert result == 1
result = td // np.timedelta64(1, "s")
assert result == 93600
result = td // np.timedelta64(1, "ns")
assert result == td.value
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize(
"nan",
[
np.nan,
np.float64("NaN"),
float("nan"),
],
)
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
def test_td_rdiv_na_scalar(self):
# GH#31869 None gets cast to NaT
td = Timedelta(10, unit="d")
result = NaT / td
assert np.isnan(result)
result = None / td
assert np.isnan(result)
result = np.timedelta64("NaT") / td
assert np.isnan(result)
msg = r"unsupported operand type\(s\) for /: 'numpy.datetime64' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.datetime64("NaT") / td
msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.nan / td
def test_td_rdiv_ndarray(self):
td = Timedelta(10, unit="d")
arr = np.array([td], dtype=object)
result = arr / td
expected = np.array([1], dtype=np.float64)
tm.assert_numpy_array_equal(result, expected)
arr = np.array([None])
result = arr / td
expected = np.array([np.nan])
tm.assert_numpy_array_equal(result, expected)
arr = np.array([np.nan], dtype=object)
msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
arr / td
arr = np.array([np.nan], dtype=np.float64)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
arr / td
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
msg = "|".join(
[
r"Invalid dtype datetime64\[D\] for __floordiv__",
"'dtype' is an invalid keyword argument for this function",
r"ufunc '?floor_divide'? cannot use operands with types",
]
)
with pytest.raises(TypeError, match=msg):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
assert td.__rfloordiv__(dt64) is NotImplemented
msg = (
r"unsupported operand type\(s\) for //: 'numpy.datetime64' and 'Timedelta'"
)
with pytest.raises(TypeError, match=msg):
dt64 // td
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
assert td.__rfloordiv__(np.float64(2.0)) is NotImplemented
assert td.__rfloordiv__(np.uint8(9)) is NotImplemented
assert td.__rfloordiv__(np.int32(2.0)) is NotImplemented
msg = r"unsupported operand type\(s\) for //: '.*' and 'Timedelta"
with pytest.raises(TypeError, match=msg):
np.float64(2.0) // td
with pytest.raises(TypeError, match=msg):
np.uint8(9) // td
with pytest.raises(TypeError, match=msg):
# deprecated GH#19761, enforced GH#29797
np.int32(2.0) // td
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_intarray(self):
# deprecated GH#19761, enforced GH#29797
ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10 ** 9
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
ints // Timedelta(1, unit="s")
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
# Deprecated GH#19761, enforced GH#29797
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError, match=msg):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = Timedelta(minutes=3)
result = timedelta(minutes=4) % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=1)
def test_rmod_timedelta64(self):
# GH#19365
td = Timedelta(minutes=3)
result = np.timedelta64(5, "m") % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=2)
def test_rmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
msg = "unsupported operand"
with pytest.raises(TypeError, match=msg):
Timestamp("2018-01-22") % td
with pytest.raises(TypeError, match=msg):
15 % td
with pytest.raises(TypeError, match=msg):
16.0 % td
msg = "Invalid dtype int"
with pytest.raises(TypeError, match=msg):
np.array([22, 24]) % td
# ----------------------------------------------------------------
# Timedelta.__divmod__, __rdivmod__
def test_divmod_numeric(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, 53 * 3600 * 1e9)
assert result[0] == Timedelta(1, unit="ns")
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=1)
assert result
result = divmod(td, np.nan)
assert result[0] is NaT
assert result[1] is NaT
def test_divmod(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
result = divmod(td, 54)
assert result[0] == Timedelta(hours=1)
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(0)
result = divmod(td, NaT)
assert np.isnan(result[0])
assert result[1] is NaT
def test_divmod_offset(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, offsets.Hour(-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_divmod_invalid(self):
# GH#19365
td = Timedelta(days=2, hours=6)
msg = r"unsupported operand type\(s\) for //: 'Timedelta' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
divmod(td, Timestamp("2018-01-22"))
def test_rdivmod_pytimedelta(self):
# GH#19365
result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
def test_rdivmod_offset(self):
result = divmod(offsets.Hour(54), Timedelta(hours=-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_rdivmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
divmod(Timestamp("2018-01-22"), td)
with pytest.raises(TypeError, match=msg):
divmod(15, td)
with pytest.raises(TypeError, match=msg):
divmod(16.0, td)
msg = "Invalid dtype int"
with pytest.raises(TypeError, match=msg):
divmod(np.array([22, 24]), td)
# ----------------------------------------------------------------
@pytest.mark.parametrize(
"op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub]
)
@pytest.mark.parametrize(
"arr",
[
np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]),
np.array([Timestamp.now(), Timedelta("1D")]),
],
)
def test_td_op_timedelta_timedeltalike_array(self, op, arr):
msg = "unsupported operand type|cannot use operands with types"
with pytest.raises(TypeError, match=msg):
op(arr, Timedelta("1D"))
class TestTimedeltaComparison:
def test_compare_tick(self, tick_classes):
cls = tick_classes
off = cls(4)
td = off.delta
assert isinstance(td, Timedelta)
assert td == off
assert not td != off
assert td <= off
assert td >= off
assert not td < off
assert not td > off
assert not td == 2 * off
assert td != 2 * off
assert td <= 2 * off
assert td < 2 * off
assert not td >= 2 * off
assert not td > 2 * off
def test_comparison_object_array(self):
# analogous to GH#15183
td = Timedelta("2 days")
other = Timedelta("3 hours")
arr = np.array([other, td], dtype=object)
res = arr == td
expected = np.array([False, True], dtype=bool)
assert (res == expected).all()
# 2D case
arr = np.array([[other, td], [td, other]], dtype=object)
res = arr != td
expected = np.array([[True, False], [False, True]], dtype=bool)
assert res.shape == expected.shape
assert (res == expected).all()
def test_compare_timedelta_ndarray(self):
# GH#11835
periods = [Timedelta("0 days 01:00:00"), Timedelta("0 days 01:00:00")]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_compare_td64_ndarray(self):
# GG#33441
arr = np.arange(5).astype("timedelta64[ns]")
td = Timedelta(arr[1])
expected = np.array([False, True, False, False, False], dtype=bool)
result = td == arr
tm.assert_numpy_array_equal(result, expected)
result = arr == td
tm.assert_numpy_array_equal(result, expected)
result = td != arr
tm.assert_numpy_array_equal(result, ~expected)
result = arr != td
tm.assert_numpy_array_equal(result, ~expected)
@pytest.mark.skip(reason="GH#20829 is reverted until after 0.24.0")
def test_compare_custom_object(self):
"""
Make sure non supported operations on Timedelta returns NonImplemented
and yields to other operand (GH#20829).
"""
class CustomClass:
def __init__(self, cmp_result=None):
self.cmp_result = cmp_result
def generic_result(self):
if self.cmp_result is None:
return NotImplemented
else:
return self.cmp_result
def __eq__(self, other):
return self.generic_result()
def __gt__(self, other):
return self.generic_result()
t = Timedelta("1s")
assert not (t == "string")
assert not (t == 1)
assert not (t == CustomClass())
assert not (t == CustomClass(cmp_result=False))
assert t < CustomClass(cmp_result=True)
assert not (t < CustomClass(cmp_result=False))
assert t == CustomClass(cmp_result=True)
@pytest.mark.parametrize("val", ["string", 1])
def test_compare_unknown_type(self, val):
# GH#20829
t = Timedelta("1s")
msg = "not supported between instances of 'Timedelta' and '(int|str)'"
with pytest.raises(TypeError, match=msg):
t >= val
with pytest.raises(TypeError, match=msg):
t > val
with pytest.raises(TypeError, match=msg):
t <= val
with pytest.raises(TypeError, match=msg):
t < val
def test_ops_notimplemented():
class Other:
pass
other = Other()
td = Timedelta("1 day")
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_ops_error_str():
# GH#13624
td = Timedelta("1 day")
for left, right in [(td, "a"), ("a", td)]:
msg = "|".join(
[
"unsupported operand type",
r'can only concatenate str \(not "Timedelta"\) to str',
"must be str, not Timedelta",
]
)
with pytest.raises(TypeError, match=msg):
left + right
msg = "not supported between instances of"
with pytest.raises(TypeError, match=msg):
left > right
assert not left == right
assert left != right
|
|
import lrparsing
from lrparsing import Keyword, List, Prio, Ref, Token, Opt
from arithmeticExpressions import ALitteral, Addition, Subtraction, Product, Division, EuclideanDivision, Modulo, \
Power, Func, UndefinedLitteral, Min, Max, globalsHeightExpression, globalsWidthExpression, globalsFpsExpression
from triggerExpressions import BLitteral, Timer, eLock, \
Equals, GreaterThan, LowerThan, GeqThan, LeqThan, \
NotEquals, And, Or, Not, Is, AnyEval, RandomEval, Del, \
SelectMinEval, SelectMaxEval, UniqueEval, PropertyTriggerExpression, \
EventTriggerExpression, SpriteTriggerExpression, TextTriggerExpression, \
LineTriggerExpression, OvalTriggerExpression, RectTriggerExpression, PolygonTriggerExpression
from database import Variable
from keywords import KEYWORD_ID, KEYWORD_FILENAME, KEYWORD_COLOR, KEYWORD_FONT_NAME, KEYWORD_FONT_SIZE, KEYWORD_H, \
KEYWORD_TEXT, KEYWORD_WIDTH, KEYWORD_W, KEYWORD_X_INT, KEYWORD_X, KEYWORD_Y_INT, KEYWORD_Y, KEYWORD_Z, \
KEYWORD_ROTATE, KEYWORD_SCALE
from utils.mathutils import sign
from random import random, randint
from math import cos, sin, tan, exp, log, floor, ceil, acos, asin, atan, cosh, sinh, tanh, acosh, atanh, asinh
class TriggerParser(lrparsing.Grammar):
class T(lrparsing.TokenRegistry):
integer = Token(re='[0-9]+')
float = Token(re='[0-9]+\.[0-9]+')
string = Token(re='\'[^\']*\'')
true = Token('true')
false = Token('false')
variable = Token(re='[A-Z][A-Z_0-9]*')
uvariable = Token('_')
prop = Token(re='p[A-Z][A-Za-z_0-9]*')
event = Token(re='e[A-Z][A-Za-z_0-9]*')
graphicsSprite = Token(re='gs[A-Z][A-Za-z_0-9]*')
graphicsLine = Token(re='gl[A-Z][A-Za-z_0-9]*')
graphicsOval = Token(re='go[A-Z][A-Za-z_0-9]*')
graphicsRect = Token(re='gr[A-Z][A-Za-z_0-9]*')
graphicsPolygon = Token(re='gp[A-Z][A-Za-z_0-9]*')
graphicsText = Token(re='gt[A-Z][A-Za-z_0-9]*')
idkw = Token('id')
coordX = Token('x')
coordY = Token('y')
coordZ = Token('z')
coordXInt = Token(re='x[1-9][0-9]*')
coordYInt = Token(re='y[1-9][0-9]*')
coordW = Token('w')
coordH = Token('h')
rotate = Token('rotate')
scale = Token('scale')
fileName = Token('fileName')
color = Token('color')
width = Token('width')
text = Token('text')
fontName = Token('fontName')
fontSize = Token('fontSize')
cosf = Token('cos')
sinf = Token('sin')
tanf = Token('tan')
expf = Token('exp')
logf = Token('log')
absf = Token('abs')
signf = Token('sign')
floorf = Token('floor')
ceilf = Token('ceil')
roundf = Token('round')
acosf = Token('acos')
asinf = Token('asin')
atanf = Token('atan')
chf = Token('ch')
shf = Token('sh')
thf = Token('th')
achf = Token('ach')
ashf = Token('ash')
athf = Token('ath')
rand = Token('rand')
randint = Token('randint')
lenf = Token('len')
minf = Token('min')
maxf = Token('max')
globalsKw = Token('globals')
globalsFpsKw = Token('fps')
globalsHeightKw = Token('screenHeight')
globalsWidthKw = Token('screenWidth')
elock = Keyword('eLock')
timer = Token('timer')
iskw = Token('is')
delkw = Token('del')
andkw = Token('and')
orkw = Token('or')
notkw = Token('not')
anyEval = Token('anyEval')
randomEval = Token('randomEval')
minEvalKw = Token('minEval')
maxEvalKw = Token('maxEval')
uniqueEval = Token('uniqueEval')
arithmExpr = Ref('arithmExpr')
boolExpr = Ref('boolExpr')
litExpr = T.true | T.false
timerExpr = T.timer + '(' + arithmExpr + ')'
eLockParameters = List(arithmExpr, Token(','), min=1)
eLockExpr = T.elock + '(' + arithmExpr + Opt(',' + eLockParameters) + ')'
parameter = Prio(T.variable, arithmExpr) | T.uvariable
namedParameterKW = arithmExpr | T.idkw | \
T.coordX | T.coordY | T.coordZ | \
T.coordXInt | T.coordYInt | \
T.coordH | T.coordW | \
T.rotate | T.scale | \
T.fileName | \
T.color | T.width | \
T.text | T.fontName | T.fontSize
namedParameter = namedParameterKW + '=' + parameter
parameters = \
Prio(List(parameter, Token(',')) + Opt(',' + List(namedParameter, Token(','))),
List(namedParameter, Token(',')))
parameterizedType = T.prop | T.event | T.graphicsSprite | T.graphicsText | T.graphicsLine | \
T.graphicsOval | T.graphicsRect | T.graphicsPolygon
parameterizedExpr = parameterizedType + '(' + parameters + ')'
compareArithmExpr = arithmExpr << (Token('==') | Token('>') | Token('<') | Token('<=') |
Token('>=') | Token('!=')) << arithmExpr
andExpr = boolExpr >> T.andkw >> boolExpr
orExpr = boolExpr >> T.orkw >> boolExpr
notExpr = T.notkw + boolExpr
isExpr = T.variable + T.iskw + arithmExpr
delExpr = T.delkw + T.variable
parExpr = '(' + boolExpr + ')'
anyEvalExpr = T.anyEval + parExpr
randomEvalExpr = T.randomEval + parExpr
minEvalExpr = T.minEvalKw + '[' + arithmExpr + ']' + parExpr
maxEvalExpr = T.maxEvalKw + '[' + arithmExpr + ']' + parExpr
uniqueEvalExpr = T.uniqueEval + parExpr
boolExpr = Prio(litExpr,
timerExpr,
eLockExpr,
parameterizedExpr,
parExpr,
isExpr,
delExpr,
compareArithmExpr,
notExpr,
andExpr,
orExpr,
anyEvalExpr,
randomEvalExpr,
minEvalExpr,
maxEvalExpr,
uniqueEvalExpr
)
addArithmExpr = arithmExpr << Token('+') << arithmExpr
minusArithmExpr = Opt(arithmExpr) << Token('-') << arithmExpr
multArithmExpr = arithmExpr << (Token('*') | Token('/') | Token('//') | Token('%')) << arithmExpr
powerArithmExpr = arithmExpr << Token('**') << arithmExpr
constantArithmExpr = Token('pi') | Token('e')
parArithmExpr = '(' + arithmExpr + ')'
unaryFuncArithmExpr = (T.cosf | T.sinf | T.tanf | T.expf | T.logf | T.absf | T.signf | T.floorf | T.ceilf | T.roundf
| T.acosf | T.asinf | T.atanf | T.shf | T.chf | T.thf | T.ashf | T.achf | T.athf | T.lenf
| T.rand | T.randint) \
+ parArithmExpr
binaryFuncArithmExpr = (T.minf | T.maxf) + '(' + arithmExpr + ',' + arithmExpr + ')'
globalsKeyWord = T.globalsFpsKw | T.globalsHeightKw | T.globalsWidthKw
globalsExpr = T.globalsKw + '(' + globalsKeyWord + ')'
arithmExpr = Prio(T.integer, T.float, T.variable, T.string, constantArithmExpr,
globalsExpr, parArithmExpr,
unaryFuncArithmExpr, binaryFuncArithmExpr,
powerArithmExpr, multArithmExpr, minusArithmExpr, addArithmExpr)
START = boolExpr
COMMENTS = ( # Allow C and Python comments
Token(re="#(?:[^\r\n]*(?:\r\n?|\n\r?))") |
Token(re="/[*](?:[^*]|[*][^/])*[*]/"))
@classmethod
def parse(cls, expr, tree_factory=None, on_error=None, log=None):
tree = super(TriggerParser, cls).parse(expr, tree_factory, on_error, log)
return cls.buildExpression(tree)
@classmethod
def buildExpression(cls, tree):
rootName = tree[0]
def buildAnd():
a1 = cls.buildExpression((tree[1]))
a2 = cls.buildExpression((tree[3]))
return And(a1, a2)
def buildAnyEval():
expr = cls.buildExpression(tree[2])
return AnyEval(expr)
def buildArithmetic():
return cls.buildArithmeticExpression(tree)
def buildCompare():
a1 = cls.buildExpression(tree[1])
a2 = cls.buildExpression(tree[3])
if tree[2][1] == '==':
return Equals(a1, a2)
elif tree[2][1] == '>':
return GreaterThan(a1, a2)
elif tree[2][1] == '<':
return LowerThan(a1, a2)
elif tree[2][1] == '>=':
return GeqThan(a1, a2)
elif tree[2][1] == '<=':
return LeqThan(a1, a2)
elif tree[2][1] == '!=':
return NotEquals(a1, a2)
def buildDel():
variable = cls.buildExpression(tree[2])
return Del(variable)
def buildDoubleNext():
return cls.buildExpression(tree[2])
def buildElock():
priority = cls.buildExpression(tree[3])
if len(tree) >= 6:
args = cls.buildExpression(tree[5])
else:
args = []
return eLock(priority, args)
def buildELockParameters():
return [cls.buildExpression(arg) for arg in tree[1::2]]
def buildIs():
variable = cls.buildExpression(tree[1])
function = cls.buildExpression(tree[3])
return Is(variable, function)
def buildLitteral():
return BLitteral(tree[1][1] == 'true')
def buildMaxEvalExpr():
arithmExpr = cls.buildExpression(tree[3])
expr = cls.buildExpression(tree[5])
return SelectMaxEval(expr, arithmExpr)
def buildMinEvalExpr():
arithmExpr = cls.buildExpression(tree[3])
expr = cls.buildExpression(tree[5])
return SelectMinEval(expr, arithmExpr)
def buildNamedParameter():
name = cls.buildExpression(tree[1])
parameter = cls.buildExpression(tree[3])
return name, parameter
def buildNext():
return cls.buildExpression(tree[1])
def buildNot():
a1 = cls.buildExpression((tree[2]))
return Not(a1)
def buildOr():
a1 = cls.buildExpression((tree[1]))
a2 = cls.buildExpression((tree[3]))
return Or(a1, a2)
def buildParameterized():
exprType, exprValue = cls.buildExpression(tree[1])
exprTypeAction = {
TriggerParser.T.prop: (PropertyTriggerExpression, 1),
TriggerParser.T.event: (EventTriggerExpression, 1),
TriggerParser.T.graphicsSprite: (SpriteTriggerExpression, 2),
TriggerParser.T.graphicsLine: (LineTriggerExpression, 2),
TriggerParser.T.graphicsOval: (OvalTriggerExpression, 2),
TriggerParser.T.graphicsRect: (RectTriggerExpression, 2),
TriggerParser.T.graphicsPolygon: (PolygonTriggerExpression, 2),
TriggerParser.T.graphicsText: (TextTriggerExpression, 2)
}
clsCons, offset = exprTypeAction[exprType]
args, kwargs = cls.buildExpression(tree[3])
if offset > 0:
name = exprValue[offset:]
return clsCons(name, args, kwargs)
else:
return clsCons(args, kwargs)
def buildParameterizedType():
return tree[1][0], tree[1][1]
def buildParameters():
buildArgs = [cls.buildExpression(arg) for arg in tree[1::2]]
args = [arg for arg in buildArgs if not isinstance(arg, tuple)]
kwargs = {kwarg[0]: kwarg[1] for kwarg in buildArgs if isinstance(kwarg, tuple)}
return args, kwargs
def buildRandomEval():
expr = cls.buildExpression(tree[2])
return RandomEval(expr)
def buildTimer():
nbFrames = cls.buildExpression((tree[3]))
return Timer(nbFrames)
def buildUniqueEvalExpr():
expr = cls.buildExpression(tree[2])
return UniqueEval(expr)
def keywordColorValue():
return KEYWORD_COLOR
def keywordFileNameValue():
return KEYWORD_FILENAME
def keywordFontNameValue():
return KEYWORD_FONT_NAME
def keywordFontSizeValue():
return KEYWORD_FONT_SIZE
def keywordHValue():
return KEYWORD_H
def keywordIdValue():
return KEYWORD_ID
def keywordRotateValue():
return KEYWORD_ROTATE
def keywordScaleValue():
return KEYWORD_SCALE
def keywordTextValue():
return KEYWORD_TEXT
def keywordWidthValue():
return KEYWORD_WIDTH
def keywordWValue():
return KEYWORD_W
def keywordXIntValue():
value = int(tree[1][1:])
return KEYWORD_X_INT[value]
def keywordXValue():
return KEYWORD_X
def keywordYIntValue():
value = int(tree[1][1:])
return KEYWORD_Y_INT[value]
def keywordYValue():
return KEYWORD_Y
def keywordZValue():
return KEYWORD_Z
def unnamedVariableValue():
return UndefinedLitteral()
def value():
return tree[1]
def variableValue():
return Variable(tree[1])
booleanSymbols = {
TriggerParser.T.variable: variableValue,
TriggerParser.T.uvariable: unnamedVariableValue,
TriggerParser.T.idkw: keywordIdValue,
TriggerParser.T.coordX: keywordXValue,
TriggerParser.T.coordY: keywordYValue,
TriggerParser.T.coordZ: keywordZValue,
TriggerParser.T.coordXInt: keywordXIntValue,
TriggerParser.T.coordYInt: keywordYIntValue,
TriggerParser.T.coordW: keywordWValue,
TriggerParser.T.coordH: keywordHValue,
TriggerParser.T.rotate: keywordRotateValue,
TriggerParser.T.scale: keywordScaleValue,
TriggerParser.T.fileName: keywordFileNameValue,
TriggerParser.T.color: keywordColorValue,
TriggerParser.T.width: keywordWidthValue,
TriggerParser.T.text: keywordTextValue,
TriggerParser.T.fontName: keywordFontNameValue,
TriggerParser.T.fontSize: keywordFontSizeValue,
TriggerParser.arithmExpr: buildArithmetic,
TriggerParser.boolExpr: buildNext,
TriggerParser.litExpr: buildLitteral,
TriggerParser.timerExpr: buildTimer,
TriggerParser.eLockParameters: buildELockParameters,
TriggerParser.eLockExpr: buildElock,
TriggerParser.parameter: buildNext,
TriggerParser.namedParameterKW: buildNext,
TriggerParser.namedParameter: buildNamedParameter,
TriggerParser.parameters: buildParameters,
TriggerParser.parameterizedType: buildParameterizedType,
TriggerParser.parameterizedExpr: buildParameterized,
TriggerParser.compareArithmExpr: buildCompare,
TriggerParser.andExpr: buildAnd,
TriggerParser.orExpr: buildOr,
TriggerParser.notExpr: buildNot,
TriggerParser.isExpr: buildIs,
TriggerParser.delExpr: buildDel,
TriggerParser.parExpr: buildDoubleNext,
TriggerParser.anyEvalExpr: buildAnyEval,
TriggerParser.randomEvalExpr: buildRandomEval,
TriggerParser.minEvalExpr: buildMinEvalExpr,
TriggerParser.maxEvalExpr: buildMaxEvalExpr,
TriggerParser.uniqueEvalExpr: buildUniqueEvalExpr,
TriggerParser.parArithmExpr: buildArithmetic,
TriggerParser.START: buildNext,
}
return booleanSymbols[rootName]()
@classmethod
def buildArithmeticExpression(cls, tree):
rootName = tree[0]
def buildBinaryExpression():
a1 = cls.buildArithmeticExpression(tree[1])
a3 = cls.buildArithmeticExpression(tree[3])
if tree[2][1] == '+':
return Addition(a1, a3)
elif tree[2][1] == '-':
return Subtraction(a1, a3)
elif tree[2][1] == '*':
return Product(a1, a3)
elif tree[2][1] == '/':
return Division(a1, a3)
elif tree[2][1] == '//':
return EuclideanDivision(a1, a3)
elif tree[2][1] == '%':
return Modulo(a1, a3)
elif tree[2][1] == '**':
return Power(a1, a3)
def buildBinaryFunctionExpression():
x1 = cls.buildArithmeticExpression(tree[3])
x2 = cls.buildArithmeticExpression(tree[5])
if tree[1][1] == 'min':
return Min(x1, x2)
elif tree[1][1] == 'max':
return Max(x1, x2)
def buildConstant():
from math import pi, e
if tree[1][1] == 'pi':
value = pi
else:
value = e
return ALitteral(value)
def buildGlobalFpsKeyWord():
return globalsFpsExpression
def buildGlobalWidthKeyWord():
return globalsWidthExpression
def buildGlobalHeightKeyWord():
return globalsHeightExpression
def buildMinusExpression():
if len(tree) == 4:
return buildBinaryExpression()
else:
a1 = cls.buildArithmeticExpression(tree[2])
return Subtraction(ALitteral(0), a1)
def buildNext(i):
def _buildNext():
return cls.buildArithmeticExpression(tree[i])
return _buildNext
def buildUnaryFunctionExpression():
a = cls.buildArithmeticExpression(tree[2])
if tree[1][1] == 'cos':
return Func(a, cos)
elif tree[1][1] == 'sin':
return Func(a, sin)
elif tree[1][1] == 'tan':
return Func(a, tan)
elif tree[1][1] == 'acos':
return Func(a, acos)
elif tree[1][1] == 'asin':
return Func(a, asin)
elif tree[1][1] == 'atan':
return Func(a, atan)
elif tree[1][1] == 'ch':
return Func(a, cosh)
elif tree[1][1] == 'sh':
return Func(a, sinh)
elif tree[1][1] == 'th':
return Func(a, tanh)
elif tree[1][1] == 'ash':
return Func(a, acosh)
elif tree[1][1] == 'ash':
return Func(a, asinh)
elif tree[1][1] == 'ath':
return Func(a, atanh)
elif tree[1][1] == 'exp':
return Func(a, exp)
elif tree[1][1] == 'log':
return Func(a, log)
elif tree[1][1] == 'abs':
return Func(a, abs)
elif tree[1][1] == 'sign':
return Func(a, sign)
elif tree[1][1] == 'ceil':
return Func(a, ceil)
elif tree[1][1] == 'floor':
return Func(a, floor)
elif tree[1][1] == 'round':
return Func(a, round)
elif tree[1][1] == 'len':
return Func(a, len)
elif tree[1][1] == 'rand':
def _random(x):
return random() * x
return Func(a, _random)
elif tree[1][1] == 'randint':
def _randint(x):
return randint(0, x - 1)
return Func(a, _randint)
def intvalue():
return ALitteral(int(tree[1]))
def floatvalue():
return ALitteral(float(tree[1]))
def stringWithoutQuotes():
return ALitteral(tree[1][1:-1])
def variableValue():
return ALitteral(Variable(tree[1]))
arithmeticSymbols = {
TriggerParser.T.integer: intvalue,
TriggerParser.T.float: floatvalue,
TriggerParser.T.string: stringWithoutQuotes,
TriggerParser.T.variable: variableValue,
TriggerParser.T.globalsFpsKw: buildGlobalFpsKeyWord,
TriggerParser.T.globalsHeightKw: buildGlobalHeightKeyWord,
TriggerParser.T.globalsWidthKw: buildGlobalWidthKeyWord,
TriggerParser.arithmExpr: buildNext(1),
TriggerParser.addArithmExpr: buildBinaryExpression,
TriggerParser.minusArithmExpr: buildMinusExpression,
TriggerParser.multArithmExpr: buildBinaryExpression,
TriggerParser.powerArithmExpr: buildBinaryExpression,
TriggerParser.constantArithmExpr: buildConstant,
TriggerParser.parArithmExpr: buildNext(2),
TriggerParser.unaryFuncArithmExpr: buildUnaryFunctionExpression,
TriggerParser.binaryFuncArithmExpr: buildBinaryFunctionExpression,
TriggerParser.globalsKeyWord: buildNext(1),
TriggerParser.globalsExpr: buildNext(3)
}
return arithmeticSymbols[rootName]()
if __name__ == '__main__':
# print BooleanExpressionParser.pre_compile_grammar()
# from database import Property
# from triggerExpressions import BExpression
#
# Property.add('Test', [1, 2], {})
# Property.add('Test', [1, 3], {})
# Property.add('Test', [2, 4], {})
# Property.add('Test', [1, 5], {})
#
# expr = 'pTest(X,Y)'
# expr = BExpression(TriggerParser.parse(expr))
# print expr
# for evaluation in expr.eval(None):
# print evaluation
#
# print
#
# expr = 'anyEval(pTest(X,Y))'
# expr = BExpression(TriggerParser.parse(expr))
# print expr
# for evaluation in expr.eval(None):
# print evaluation
#
# print
#
# expr = 'randomEval(pTest(X,Y))'
# expr = BExpression(TriggerParser.parse(expr))
# print expr
# for evaluation in expr.eval(None):
# print evaluation
#
# print
#
# expr = 'minEval(pTest(X,Y) or Z is 3 and X is 2 or Z is 2 and X is 1)[X + Z]'
# expr = BExpression(TriggerParser.parse(expr))
# print expr
# for evaluation in expr.eval(None):
# print evaluation
#
# print
#
# expr = 'maxEval(pTest(X,Y) or Z is 8 and X is 2 or Z is 2 and X is 1)[X + Y]'
# expr = BExpression(TriggerParser.parse(expr))
# print expr
# for evaluation in expr.eval(None):
# print evaluation
from database import Property
from triggerExpressions import BExpression
expr = '(A + B ** 2) != (A + B ** 2)'
expr = BExpression(TriggerParser.parse(expr))
print expr
|
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
print(len(cw154))
print(len(trito))
totalfiles = normalB + mcell + pcell + cd19cell + cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr14"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("total_chrom14.phy", header=None, index=None)
print(tott.shape)
|
|
#!/usr/bin/env python3
"""
Grole is a python (3.5+) nano web framework based on asyncio. It's goals are to be simple, embedable (single file and standard library only) and easy to use.
"""
import asyncio
import socket
import json
import re
import urllib
import traceback
import inspect
import io
import mimetypes
import pathlib
import html
import sys
import argparse
import logging
from collections import defaultdict
__author__ = 'witchard'
__version__ = '0.3.0'
class Request:
"""
Represents a single HTTP request
The following members are populated with the request details:
* method: The request method
* location: The request location as it is sent
* path: The unescaped path part of the location
* query: The query string part of the location (if present)
* version: The request version, e.g. HTTP/1.1
* headers: Dictionary of headers from the request
* data: Raw data from the request body
* match: The re.MatchObject from the successful path matching
"""
async def _read(self, reader):
"""
Parses HTTP request into member variables
"""
start_line = await self._readline(reader)
self.method, self.location, self.version = start_line.decode().split()
path_query = urllib.parse.unquote(self.location).split('?', 1)
self.path = path_query[0]
self.query = {}
if len(path_query) > 1:
for q in path_query[1].split('&'):
try:
k, v = q.split('=', 1)
self.query[k] = v
except ValueError:
self.query[q] = None
self.headers = {}
while True:
header_raw = await self._readline(reader)
if header_raw.strip() == b'':
break
header = header_raw.decode().split(':', 1)
self.headers[header[0]] = header[1].strip()
# TODO implement chunked handling
self.data = b''
await self._buffer_body(reader)
async def _readline(self, reader):
"""
Readline helper
"""
ret = await reader.readline()
if len(ret) == 0 and reader.at_eof():
raise EOFError()
return ret
async def _buffer_body(self, reader):
"""
Buffers the body of the request
"""
remaining = int(self.headers.get('Content-Length', 0))
if remaining > 0:
try:
self.data = await reader.readexactly(remaining)
except asyncio.IncompleteReadError:
raise EOFError()
def body(self):
"""
Decodes body as string
"""
return self.data.decode()
def json(self):
"""
Decodes json object from the body
"""
return json.loads(self.body())
class ResponseBody:
"""
Response body from a byte string
"""
def __init__(self, data=b'', content_type='text/plain'):
"""
Initialise object, data is the data to send
Parameters:
* data: Byte data to send
* content_type: Value of Content-Type header, default text/plain
"""
self._headers = {'Content-Length': len(data),
'Content-Type': content_type}
self._data = data
def _set_headers(self, headers):
"""
Merge internal headers into the passed in dictionary
"""
headers.update(self._headers)
async def _write(self, writer):
"""
Write out the data
"""
writer.write(self._data)
await writer.drain()
class ResponseString(ResponseBody):
"""
Response body from a string
"""
def __init__(self, data='', content_type='text/html'):
"""
Initialise object, data is the data to send
Parameters:
* data: String data to send
* content_type: Value of Content-Type header, default text/plain
"""
super().__init__(data.encode(), content_type)
class ResponseJSON(ResponseString):
"""
Response body encoded in json
"""
def __init__(self, data='', content_type='application/json'):
"""
Initialise object, data is the data to send
Parameters:
* data: Object to encode as json for sending
* content_type: Value of Content-Type header, default application/json
"""
super().__init__(json.dumps(data), content_type)
class ResponseFile(ResponseBody):
"""
Respond with a file
Content type is guessed if not provided
"""
def __init__(self, filename, content_type=None):
"""
Initialise object, data is the data to send
Parameters:
* filename: Name of file to read and send
* content_type: Value of Content-Type header, default is to guess from file extension
"""
if content_type == None:
content_type = mimetypes.guess_type(filename)[0]
self.filename = filename
self._headers = {'Transfer-Encoding': 'chunked',
'Content-Type': content_type}
async def _write(self, writer):
f = io.FileIO(self.filename)
while True:
data = f.read(io.DEFAULT_BUFFER_SIZE)
header = format(len(data), 'x') + '\r\n'
writer.write(header.encode())
writer.write(data)
writer.write(b'\r\n')
await writer.drain()
if len(data) == 0:
f.close()
return # EOF
class Response:
"""
Represents a single HTTP response
"""
def __init__(self, data=None, code=200, reason='OK', headers={},
version='HTTP/1.1'):
"""
Create a response
Parameters:
* data: Object to send e.g. ResponseBody / ResponseJSON.
* code: The response code, default 200
* reason: The response reason, default OK
* version: The response version, default HTTP/1.1
* headers: Dictionary of response headers, default is a Server header and those from the response body
Note, data is intelligently converted to an appropriate ResponseXYZ object depending on it's type.
"""
self.version = version
self.code = code
self.reason = reason
self.data = self._create_body(data)
self.headers = {'Server': 'grole/' + __version__}
self.data._set_headers(self.headers) # Update headers from data
self.headers.update(headers) # Update headers from user
async def _write(self, writer):
start_line = '{} {} {}\r\n'.format(self.version, self.code, self.reason)
headers = ['{}: {}'.format(x[0], x[1]) for x in self.headers.items()]
header = start_line + '\r\n'.join(headers) + '\r\n\r\n'
writer.write(header.encode())
await writer.drain()
await self.data._write(writer)
def _create_body(self, data):
if isinstance(data, ResponseBody):
return data
elif data is None:
return ResponseBody()
elif isinstance(data, bytes):
return ResponseBody(data)
elif isinstance(data, str):
return ResponseString(data)
else:
return ResponseJSON(data)
def serve_static(app, base_url, base_path, index=False):
"""
Serve a directory statically
Parameters:
* app: Grole application object
* base_url: Base URL to serve from, e.g. /static
* base_path: Base path to look for files in
* index: Provide simple directory indexes if True
"""
@app.route(base_url + '/(.*)')
def serve(env, req):
"""
Static files
"""
try:
base = pathlib.Path(base_path).resolve()
path = (base / req.match.group(1)).resolve()
except FileNotFoundError:
return Response(None, 404, 'Not Found')
# Don't let bad paths through
if base == path or base in path.parents:
if path.is_file():
return ResponseFile(str(path))
if index and path.is_dir():
if base == path:
ret = ''
else:
ret = '<a href="../">../</a><br/>\r\n'
for item in path.iterdir():
name = item.parts[-1]
if item.is_dir():
name += '/'
ret += '<a href="{}">{}</a><br/>\r\n'.format(urllib.parse.quote(name), html.escape(name))
ret = ResponseString(ret, 'text/html')
return ret
return Response(None, 404, 'Not Found')
def serve_doc(app, url):
"""
Serve API documentation extracted from request handler docstrings
Parameters:
* app: Grole application object
* url: URL to serve at
"""
@app.route(url, doc=False)
def index(env, req):
ret = ''
for d in env['doc']:
ret += 'URL: {url}, supported methods: {methods}{doc}\n'.format(**d)
return ret
class Grole:
"""
A Grole Webserver
"""
def __init__(self, env={}):
"""
Initialise a server
env is passed to request handlers to provide shared state.
Note, env by default contains doc which is populated from
registered route docstrings.
"""
self._handlers = defaultdict(list)
self.env = {'doc': []}
self.env.update(env)
self._logger = logging.getLogger('grole')
def route(self, path_regex, methods=['GET'], doc=True):
"""
Decorator to register a handler
Parameters:
* path_regex: Request path regex to match against for running the handler
* methods: HTTP methods to use this handler for
* doc: Add to internal doc structure
"""
def register_func(func):
"""
Decorator implementation
"""
if doc:
self.env['doc'].append({'url': path_regex, 'methods': ', '.join(methods), 'doc': func.__doc__})
for method in methods:
self._handlers[method].append((re.compile(path_regex), func))
return func # Return the original function
return register_func # Decorator
async def _handle(self, reader, writer):
"""
Handle a single TCP connection
Parses requests, finds appropriate handlers and returns responses
"""
peer = writer.get_extra_info('peername')
self._logger.debug('New connection from {}'.format(peer))
try:
# Loop handling requests
while True:
# Read the request
req = Request()
await req._read(reader)
# Find and execute handler
res = None
for path_regex, handler in self._handlers.get(req.method, []):
match = path_regex.fullmatch(req.path)
if match:
req.match = match
try:
if inspect.iscoroutinefunction(handler):
res = await handler(self.env, req)
else:
res = handler(self.env, req)
if not isinstance(res, Response):
res = Response(data=res)
except:
# Error - log it and return 500
self._logger.error(traceback.format_exc())
res = Response(code=500, reason='Internal Server Error')
break
# No handler - send 404
if res == None:
res = Response(code=404, reason='Not Found')
# Respond
await res._write(writer)
self._logger.info('{}: {} -> {}'.format(peer, req.path, res.code))
except EOFError:
self._logger.debug('Connection closed from {}'.format(peer))
except Exception as e:
self._logger.error('Connection error ({}) from {}'.format(e, peer))
writer.close()
def run(self, host='localhost', port=1234, ssl_context=None):
"""
Launch the server. Will run forever accepting connections until interrupted.
Parameters:
* host: The host to listen on
* port: The port to listen on
* ssl_context: The SSL context passed to asyncio
"""
# Setup loop
loop = asyncio.get_event_loop()
coro = asyncio.start_server(self._handle, host, port, loop=loop, ssl=ssl_context)
try:
server = loop.run_until_complete(coro)
except Exception as e:
self._logger.error('Could not launch server: {}'.format(e))
return
# Run the server
self._logger.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
def parse_args(args=sys.argv[1:]):
"""
Parse command line arguments for Grole server running as static file server
"""
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help='address to listen on, default localhost',
default='localhost')
parser.add_argument('-p', '--port', help='port to listen on, default 1234',
default=1234, type=int)
parser.add_argument('-d', '--directory', help='directory to serve, default .',
default='.')
parser.add_argument('-n', '--noindex', help='do not show directory indexes',
default=False, action='store_true')
loglevel = parser.add_mutually_exclusive_group()
loglevel.add_argument('-v', '--verbose', help='verbose logging',
default=False, action='store_true')
loglevel.add_argument('-q', '--quiet', help='quiet logging',
default=False, action='store_true')
return parser.parse_args(args)
def main(args=sys.argv[1:]):
"""
Run Grole static file server
"""
args = parse_args(args)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
app = Grole()
serve_static(app, '', args.directory, not args.noindex)
app.run(args.address, args.port)
if __name__ == '__main__':
main()
|
|
'''
Defines the main analysis functions which are called by the
PyGeoNS executable.
'''
from __future__ import division
import numpy as np
import logging
import subprocess as sp
from pygeons.main.fit import fit
from pygeons.main.reml import reml
from pygeons.main.strain import strain
from pygeons.main.autoclean import autoclean
from pygeons.main.gptools import composite_units
from pygeons.main import gpnetwork
from pygeons.main import gpstation
from pygeons.mjd import mjd_inv,mjd
from pygeons.basemap import make_basemap
from pygeons.io.convert import dict_from_hdf5,hdf5_from_dict
logger = logging.getLogger(__name__)
def _params_dict(b):
'''
coerce the list *b* into a dictionary of hyperparameters for each
direction. The dictionary keys are 'east', 'north', and 'vertical'.
The dictionary values are each an N array of hyperparameters.
>>> b1 = [1.0,2.0]
>>> b2 = ['1.0','2.0']
>>> b3 = ['east','1.0','2.0','north','1.0','2.0','vertical','1.0','2.0']
'''
b = list(b)
msg = ('the hyperparameters must be a list of N floats or 3 lists '
'of N floats where each list is preceded by "east", "north", '
'or "vertical"')
if ('east' in b) & ('north' in b) & ('vertical' in b):
if (len(b) % 3) != 0:
raise ValueError(msg)
arr = np.reshape(b,(3,-1))
dirs = arr[:,0].astype(str) # directions
vals = arr[:,1:].astype(float) # hyperparameter array
out = dict(zip(dirs,vals))
# make sure the keys contain 'east', 'north', and 'vertical'
if set(out.keys()) != set(['east','north','vertical']):
raise ValueError(msg)
else:
try:
arr = np.array(b,dtype=float)
except ValueError:
raise ValueError(msg)
out = {'east':arr,
'north':arr,
'vertical':arr}
return out
def _remove_extension(f):
'''remove file extension if one exists'''
if '.' not in f:
return f
else:
return '.'.join(f.split('.')[:-1])
def _log_fit(input_file,
network_model,network_params,
station_model,station_params,
output_file):
msg = '\n'
msg += '---------------- PYGEONS FIT RUN INFORMATION -----------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network :\n'
msg += ' model : %s\n' % ', '.join(network_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_model,gpnetwork.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['vertical']])
msg += 'station :\n'
msg += ' model : %s\n' % ', '.join(station_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_model,gpstation.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['vertical']])
msg += 'output file : %s\n\n' % output_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
return
def _log_autoclean(input_file,
network_model,network_params,
station_model,station_params,
outlier_tol,
output_file):
msg = '\n'
msg += '------------- PYGEONS AUTOCLEAN RUN INFORMATION --------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network :\n'
msg += ' model : %s\n' % ', '.join(network_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_model,gpnetwork.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['vertical']])
msg += 'station :\n'
msg += ' model : %s\n' % ', '.join(station_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_model,gpstation.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['vertical']])
msg += 'outlier tolerance : %s\n' % outlier_tol
msg += 'output file : %s\n\n' % output_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
return
def _log_reml(input_file,
network_model,network_params,network_fix,
station_model,station_params,station_fix,
output_file):
msg = '\n'
msg += '---------------- PYGEONS REML RUN INFORMATION ----------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network :\n'
msg += ' model : %s\n' % ', '.join(network_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_model,gpnetwork.CONSTRUCTORS))
msg += ' fixed parameters : %s\n' % ', '.join(network_fix.astype(str))
msg += ' initial east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['east']])
msg += ' initial north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['north']])
msg += ' initial vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['vertical']])
msg += 'station :\n'
msg += ' model : %s\n' % ', '.join(station_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_model,gpstation.CONSTRUCTORS))
msg += ' fixed parameters : %s\n' % ', '.join(station_fix.astype(str))
msg += ' initial east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['east']])
msg += ' initial north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['north']])
msg += ' initial vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['vertical']])
msg += 'output file : %s\n\n' % output_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
return msg
def _log_reml_results(input_file,
network_model,network_params,network_fix,
station_model,station_params,station_fix,
likelihood,output_file):
msg = '\n'
msg += '-------------------- PYGEONS REML RESULTS --------------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network :\n'
msg += ' model : %s\n' % ' '.join(network_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_model,gpnetwork.CONSTRUCTORS))
msg += ' fixed parameters : %s\n' % ', '.join(network_fix.astype(str))
msg += ' optimal east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['east']])
msg += ' optimal north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['north']])
msg += ' optimal vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['vertical']])
msg += 'station :\n'
msg += ' model : %s\n' % ' '.join(station_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_model,gpstation.CONSTRUCTORS))
msg += ' fixed parameters : %s\n' % ', '.join(station_fix.astype(str))
msg += ' optimal east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['east']])
msg += ' optimal north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['north']])
msg += ' optimal vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['vertical']])
msg += 'log likelihood :\n'
msg += ' east : %s\n' % likelihood['east']
msg += ' north : %s\n' % likelihood['north']
msg += ' vertical : %s\n' % likelihood['vertical']
msg += 'output file : %s\n\n' % output_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
return msg
def _log_strain(input_file,
network_prior_model,network_prior_params,
network_noise_model,network_noise_params,
station_noise_model,station_noise_params,
start_date,stop_date,output_id,rate,vertical,
covariance,output_dx_file,output_dy_file):
msg = '\n'
msg += '--------------- PYGEONS STRAIN RUN INFORMATION ---------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network prior :\n'
msg += ' model : %s\n' % ', '.join(network_prior_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_prior_model,gpnetwork.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_prior_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_prior_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_prior_params['vertical']])
msg += 'network noise :\n'
msg += ' model : %s\n' % ' '.join(network_noise_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_noise_model,gpnetwork.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_noise_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_noise_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_noise_params['vertical']])
msg += 'station noise :\n'
msg += ' model : %s\n' % ', '.join(station_noise_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_noise_model,gpstation.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_noise_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_noise_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_noise_params['vertical']])
msg += 'start date for output : %s\n' % start_date
msg += 'stop date for output : %s\n' % stop_date
msg += 'number of output positions : %s\n' % len(output_id)
msg += 'ignore vertical deformation : %s\n' % (not vertical)
msg += 'return strain rates : %s\n' % rate
msg += 'return covariances : %s\n' % covariance
msg += 'output east derivative file : %s\n' % output_dx_file
msg += 'output north derivative file : %s\n\n' % output_dy_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
def pygeons_fit(input_file,
network_model=('spwen12-se',),
network_params=(1.0,0.1,100.0),
station_model=('linear',),
station_params=(),
output_stem=None):
'''
Condition the Gaussian process to the observations and evaluate the
posterior at the observation points.
'''
logger.info('Running pygeons fit ...')
data = dict_from_hdf5(input_file)
if data['time_exponent'] != 0:
raise ValueError('input dataset must have units of displacement')
if data['space_exponent'] != 1:
raise ValueError('input dataset must have units of displacement')
# create output dictionary
out = dict((k,np.copy(v)) for k,v in data.iteritems())
# convert params to a dictionary of hyperparameters for each direction
network_params = _params_dict(network_params)
station_params = _params_dict(station_params)
# make output file name
if output_stem is None:
output_stem = _remove_extension(input_file) + '.fit'
output_file = output_stem + '.h5'
# convert geodetic positions to cartesian
bm = make_basemap(data['longitude'],data['latitude'])
x,y = bm(data['longitude'],data['latitude'])
xy = np.array([x,y]).T
_log_fit(input_file,
network_model,network_params,
station_model,station_params,
output_file)
for dir in ['east','north','vertical']:
u,su = fit(t=data['time'][:,None],
x=xy,
d=data[dir],
sd=data[dir+'_std_dev'],
network_model=network_model,
network_params=network_params[dir],
station_model=station_model,
station_params=station_params[dir])
out[dir] = u
out[dir+'_std_dev'] = su
hdf5_from_dict(output_file,out)
logger.info('Posterior fit written to %s' % output_file)
return
def pygeons_autoclean(input_file,
network_model=('spwen12-se',),
network_params=(1.0,0.1,100.0),
station_model=('linear',),
station_params=(),
output_stem=None,
outlier_tol=4.0):
'''
Remove outliers with a data editing algorithm
'''
logger.info('Running pygeons autoclean ...')
data = dict_from_hdf5(input_file)
if data['time_exponent'] != 0:
raise ValueError('input dataset must have units of displacement')
if data['space_exponent'] != 1:
raise ValueError('input dataset must have units of displacement')
# dictionary which will contain the edited data
out = dict((k,np.copy(v)) for k,v in data.iteritems())
# convert params to a dictionary of hyperparameters for each direction
network_params = _params_dict(network_params)
station_params = _params_dict(station_params)
# make output file name
if output_stem is None:
output_stem = _remove_extension(input_file) + '.autoclean'
output_file = output_stem + '.h5'
# convert geodetic positions to cartesian
bm = make_basemap(data['longitude'],data['latitude'])
x,y = bm(data['longitude'],data['latitude'])
xy = np.array([x,y]).T
_log_autoclean(input_file,
network_model,network_params,
station_model,station_params,
outlier_tol,
output_file)
for dir in ['east','north','vertical']:
de,sde = autoclean(t=data['time'][:,None],
x=xy,
d=data[dir],
sd=data[dir+'_std_dev'],
network_model=network_model,
network_params=network_params[dir],
station_model=station_model,
station_params=station_params[dir],
tol=outlier_tol)
out[dir] = de
out[dir+'_std_dev'] = sde
hdf5_from_dict(output_file,out)
logger.info('Edited data written to %s' % output_file)
return
def pygeons_reml(input_file,
network_model=('spwen12-se',),
network_params=(1.0,0.1,100.0),
network_fix=(),
station_model=('linear',),
station_params=(),
station_fix=(),
output_stem=None):
'''
Restricted maximum likelihood estimation
'''
logger.info('Running pygeons reml ...')
data = dict_from_hdf5(input_file)
if data['time_exponent'] != 0:
raise ValueError('input dataset must have units of displacement')
if data['space_exponent'] != 1:
raise ValueError('input dataset must have units of displacement')
# convert params to a dictionary of hyperparameters for each direction
network_params = _params_dict(network_params)
network_fix = np.asarray(network_fix,dtype=int)
station_params = _params_dict(station_params)
station_fix = np.asarray(station_fix,dtype=int)
# make output file name
if output_stem is None:
output_stem = _remove_extension(input_file) + '.reml'
output_file = output_stem + '.txt'
# convert geodetic positions to cartesian
bm = make_basemap(data['longitude'],data['latitude'])
x,y = bm(data['longitude'],data['latitude'])
xy = np.array([x,y]).T
# call "pygeons info" on the input data file. pipe the results to
# the output file
sp.call('pygeons info %s > %s' % (input_file,output_file),shell=True)
msg = _log_reml(input_file,
network_model,network_params,network_fix,
station_model,station_params,station_fix,
output_file)
# write log entry to file
with open(output_file,'a') as fout:
fout.write(msg)
# make a dictionary storing likelihoods
likelihood = {}
for dir in ['east','north','vertical']:
net_opt,sta_opt,like = reml(t=data['time'][:,None],
x=xy,
d=data[dir],
sd=data[dir+'_std_dev'],
network_model=network_model,
network_params=network_params[dir],
network_fix=network_fix,
station_model=station_model,
station_params=station_params[dir],
station_fix=station_fix)
# update the parameter dict with the optimal values
network_params[dir] = net_opt
station_params[dir] = sta_opt
likelihood[dir] = like
msg = _log_reml_results(input_file,
network_model,network_params,network_fix,
station_model,station_params,station_fix,
likelihood,output_file)
# write log entry to file
with open(output_file,'a') as fout:
fout.write(msg)
logger.info('Optimal parameters written to %s' % output_file)
return
def pygeons_strain(input_file,
network_prior_model=('spwen12-se',),
network_prior_params=(1.0,0.1,100.0),
network_noise_model=(),
network_noise_params=(),
station_noise_model=('linear',),
station_noise_params=(),
start_date=None,stop_date=None,
positions=None,positions_file=None,
rate=True,vertical=True,covariance=False,
output_stem=None):
'''
calculates strain
'''
logger.info('Running pygeons strain ...')
data = dict_from_hdf5(input_file)
if data['time_exponent'] != 0:
raise ValueError('input dataset must have units of displacement')
if data['space_exponent'] != 1:
raise ValueError('input dataset must have units of displacement')
out_dx = dict((k,np.copy(v)) for k,v in data.iteritems())
out_dy = dict((k,np.copy(v)) for k,v in data.iteritems())
# convert params to a dictionary of hyperparameters for each direction
network_prior_params = _params_dict(network_prior_params)
network_noise_params = _params_dict(network_noise_params)
station_noise_params = _params_dict(station_noise_params)
# convert geodetic input positions to cartesian
bm = make_basemap(data['longitude'],data['latitude'])
x,y = bm(data['longitude'],data['latitude'])
xy = np.array([x,y]).T
# set output positions
if (positions is None) & (positions_file is None):
# no output positions were specified so return the solution at the
# input data positions
output_id = np.array(data['id'],copy=True)
output_lon = np.array(data['longitude'],copy=True)
output_lat = np.array(data['latitude'],copy=True)
else:
output_id = np.zeros((0,),dtype=str)
output_lon = np.zeros((0,),dtype=float)
output_lat = np.zeros((0,),dtype=float)
if positions_file is not None:
# if positions file was specified
pos = np.loadtxt(positions_file,dtype=str,ndmin=2)
if pos.shape[1] != 3:
raise ValueError(
'positions file must contain a column for IDs, longitudes, '
'and latitudes')
output_id = np.hstack((output_id,pos[:,0]))
output_lon = np.hstack((output_lon,pos[:,1].astype(float)))
output_lat = np.hstack((output_lat,pos[:,2].astype(float)))
if positions is not None:
# if positions were specified via the command line
pos = np.array(positions,dtype=str).reshape((-1,3))
output_id = np.hstack((output_id,pos[:,0]))
output_lon = np.hstack((output_lon,pos[:,1].astype(float)))
output_lat = np.hstack((output_lat,pos[:,2].astype(float)))
# convert geodetic output positions to cartesian
output_x,output_y = bm(output_lon,output_lat)
output_xy = np.array([output_x,output_y]).T
# set output times
if start_date is None:
start_date = mjd_inv(np.min(data['time']),'%Y-%m-%d')
if stop_date is None:
stop_date = mjd_inv(np.max(data['time']),'%Y-%m-%d')
start_time = mjd(start_date,'%Y-%m-%d')
stop_time = mjd(stop_date,'%Y-%m-%d')
output_time = np.arange(start_time,stop_time+1)
# set output file names
if output_stem is None:
output_stem = _remove_extension(input_file) + '.strain'
output_dx_file = output_stem + '.dudx.h5'
output_dy_file = output_stem + '.dudy.h5'
_log_strain(input_file,
network_prior_model,network_prior_params,
network_noise_model,network_noise_params,
station_noise_model,station_noise_params,
start_date,stop_date,output_id,rate,vertical,
covariance,output_dx_file,output_dy_file)
for dir in ['east','north','vertical']:
if (dir == 'vertical') & (not vertical):
logger.debug('Not computing vertical deformation gradients')
# do not compute the deformation gradients for vertical. Just
# return zeros.
dx = np.zeros((output_time.shape[0],output_xy.shape[0]))
sdx = np.zeros((output_time.shape[0],output_xy.shape[0]))
dy = np.zeros((output_time.shape[0],output_xy.shape[0]))
sdy = np.zeros((output_time.shape[0],output_xy.shape[0]))
if covariance:
# if covariance is True then create an empty array of
# covariances
cdx = np.zeros((output_time.shape[0],output_xy.shape[0],
output_time.shape[0],output_xy.shape[0]))
cdy = np.zeros((output_time.shape[0],output_xy.shape[0],
output_time.shape[0],output_xy.shape[0]))
soln = (dx,sdx,cdx,dy,sdy,cdy)
else:
soln = (dx,sdx,dy,sdy)
else:
soln = strain(t=data['time'][:,None],
x=xy,
d=data[dir],
sd=data[dir+'_std_dev'],
network_prior_model=network_prior_model,
network_prior_params=network_prior_params[dir],
network_noise_model=network_noise_model,
network_noise_params=network_noise_params[dir],
station_noise_model=station_noise_model,
station_noise_params=station_noise_params[dir],
out_t=output_time[:,None],
out_x=output_xy,
rate=rate,
covariance=covariance)
if covariance:
# soln contains six entries when covariance is True
dx,sdx,cdx,dy,sdy,cdy = soln
out_dx[dir] = dx
out_dx[dir+'_std_dev'] = sdx
out_dx[dir+'_covariance'] = cdx
out_dy[dir] = dy
out_dy[dir+'_std_dev'] = sdy
out_dy[dir+'_covariance'] = cdy
else:
# soln contains four entries when covariance is False
dx,sdx,dy,sdy = soln
out_dx[dir] = dx
out_dx[dir+'_std_dev'] = sdx
out_dy[dir] = dy
out_dy[dir+'_std_dev'] = sdy
out_dx['time'] = output_time
out_dx['longitude'] = output_lon
out_dx['latitude'] = output_lat
out_dx['id'] = output_id
out_dx['time_exponent'] = -int(rate)
out_dx['space_exponent'] = 0
out_dy['time'] = output_time
out_dy['longitude'] = output_lon
out_dy['latitude'] = output_lat
out_dy['id'] = output_id
out_dy['time_exponent'] = -int(rate)
out_dy['space_exponent'] = 0
hdf5_from_dict(output_dx_file,out_dx)
hdf5_from_dict(output_dy_file,out_dy)
if rate:
logger.info('Posterior velocity gradients written to %s and %s' % (output_dx_file,output_dy_file))
else:
logger.info('Posterior displacement gradients written to %s and %s' % (output_dx_file,output_dy_file))
return
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import tensorflow as tf
from IPython import display
from google.colab import output
from witwidget.notebook import base
# Python functions for requests from javascript.
def infer_examples(wit_id):
WitWidget.widgets[wit_id].infer()
output.register_callback('notebook.InferExamples', infer_examples)
def delete_example(wit_id, index):
WitWidget.widgets[wit_id].delete_example(index)
output.register_callback('notebook.DeleteExample', delete_example)
def duplicate_example(wit_id, index):
WitWidget.widgets[wit_id].duplicate_example(index)
output.register_callback('notebook.DuplicateExample', duplicate_example)
def update_example(wit_id, index, example):
WitWidget.widgets[wit_id].update_example(index, example)
output.register_callback('notebook.UpdateExample', update_example)
def get_eligible_features(wit_id):
WitWidget.widgets[wit_id].get_eligible_features()
output.register_callback('notebook.GetEligibleFeatures', get_eligible_features)
def sort_eligible_features(wit_id, details):
WitWidget.widgets[wit_id].sort_eligible_features(details)
output.register_callback('notebook.SortEligibleFeatures', sort_eligible_features)
def infer_mutants(wit_id, details):
WitWidget.widgets[wit_id].infer_mutants(details)
output.register_callback('notebook.InferMutants', infer_mutants)
def compute_custom_distance(wit_id, index, callback_name, params):
WitWidget.widgets[wit_id].compute_custom_distance(index, callback_name,
params)
output.register_callback('notebook.ComputeCustomDistance',
compute_custom_distance)
# HTML/javascript for the WIT frontend.
WIT_HTML = """
<script>
(function() {{
const id = {id};
const wit = document.querySelector("#wit");
wit.style.height = '{height}px';
let mutantFeature = null;
let stagedExamples = [];
let prevExampleCountdown = 0;
let stagedInferences = {{}};
let prevInferencesCountdown = 0;
// Listeners from WIT element events which pass requests to python.
wit.addEventListener("infer-examples", e => {{
google.colab.kernel.invokeFunction(
'notebook.InferExamples', [id], {{}});
}});
wit.addEventListener("compute-custom-distance", e => {{
google.colab.kernel.invokeFunction(
'notebook.ComputeCustomDistance',
[id, e.detail.index, e.detail.callback, e.detail.params],
{{}});
}});
wit.addEventListener("delete-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.DeleteExample', [id, e.detail.index], {{}});
}});
wit.addEventListener("duplicate-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.DuplicateExample', [id, e.detail.index], {{}});
}});
wit.addEventListener("update-example", e => {{
google.colab.kernel.invokeFunction(
'notebook.UpdateExample',
[id, e.detail.index, e.detail.example],
{{}});
}});
wit.addEventListener('get-eligible-features', e => {{
google.colab.kernel.invokeFunction(
'notebook.GetEligibleFeatures', [id], {{}});
}});
wit.addEventListener('infer-mutants', e => {{
mutantFeature = e.detail.feature_name;
google.colab.kernel.invokeFunction(
'notebook.InferMutants', [id, e.detail], {{}});
}});
wit.addEventListener('sort-eligible-features', e => {{
google.colab.kernel.invokeFunction(
'notebook.SortEligibleFeatures', [id, e.detail], {{}});
}});
// Javascript callbacks called by python code to communicate with WIT
// Polymer element.
window.backendError = error => {{
wit.handleError(error.msg);
}};
window.inferenceCallback = res => {{
// If starting a new set of data, reset the staged results.
if (res.countdown >= prevInferencesCountdown) {{
stagedInferences = res.inferences;
}}
prevInferencesCountdown = res.countdown;
for (let i = 0; i < res.results.length; i++) {{
if (wit.modelType == 'classification') {{
stagedInferences.inferences.results[i].classificationResult.classifications.push(...res.results[i]);
}}
else {{
stagedInferences.inferences.results[i].regressionResult.regressions.push(...res.results[i]);
}}
const extras = res.extra[i];
for (let key of Object.keys(extras)) {{
stagedInferences.extra_outputs[i][key].push(...extras[key]);
}}
}}
stagedInferences.inferences.indices.push(...res.indices);
// If this is the final chunk, set the staged results.
if (res.countdown === 0) {{
wit.labelVocab = stagedInferences.label_vocab;
wit.inferences = stagedInferences.inferences;
wit.extraOutputs = {{indices: wit.inferences.indices,
extra: stagedInferences.extra_outputs}};
}}
}};
window.distanceCallback = callbackDict => {{
wit.invokeCustomDistanceCallback(callbackDict);
}};
window.spriteCallback = spriteUrl => {{
if (!wit.updateSprite) {{
requestAnimationFrame(() => window.spriteCallback(spriteUrl));
return;
}}
wit.hasSprite = true;
wit.localAtlasUrl = spriteUrl;
wit.updateSprite();
}};
window.eligibleFeaturesCallback = features => {{
wit.partialDepPlotEligibleFeatures = features;
}};
window.sortEligibleFeaturesCallback = features => {{
wit.partialDepPlotEligibleFeatures = features;
}};
window.inferMutantsCallback = chartInfo => {{
wit.makeChartForFeature(chartInfo.chartType, mutantFeature,
chartInfo.data);
}};
window.configCallback = config => {{
if (!wit.updateNumberOfModels) {{
requestAnimationFrame(() => window.configCallback(config));
return;
}}
if ('inference_address' in config) {{
let addresses = config['inference_address'];
if ('inference_address_2' in config) {{
addresses += ',' + config['inference_address_2'];
}}
wit.inferenceAddress = addresses;
}}
if ('model_name' in config) {{
let names = config['model_name'];
if ('model_name_2' in config) {{
names += ',' + config['model_name_2'];
}}
wit.modelName = names;
}}
if ('model_type' in config) {{
wit.modelType = config['model_type'];
}}
if ('are_sequence_examples' in config) {{
wit.sequenceExamples = config['are_sequence_examples'];
}}
if ('max_classes' in config) {{
wit.maxInferenceEntriesPerRun = config['max_classes'];
}}
if ('multiclass' in config) {{
wit.multiClass = config['multiclass'];
}}
wit.updateNumberOfModels();
if ('target_feature' in config) {{
wit.selectedLabelFeature = config['target_feature'];
}}
if ('uses_custom_distance_fn' in config) {{
wit.customDistanceFunctionSet = true;
}} else {{
wit.customDistanceFunctionSet = false;
}}
}};
window.updateExamplesCallback = res => {{
// If starting a new set of data, reset the staged examples.
if (res.countdown >= prevExampleCountdown) {{
stagedExamples = [];
}}
prevExampleCountdown = res.countdown;
stagedExamples.push(...res.examples);
if (res.countdown === 0) {{
// If this is the final chunk, set the staged examples.
window.commitUpdatedExamples();
}}
}};
window.commitUpdatedExamples = () => {{
if (!wit.updateExampleContents) {{
requestAnimationFrame(() => window.commitUpdatedExamples());
return;
}}
wit.updateExampleContents(stagedExamples, false);
if (wit.localAtlasUrl) {{
window.spriteCallback(wit.localAtlasUrl);
}}
}};
// BroadcastChannels allows examples to be updated by a call from an
// output cell that isn't the cell hosting the WIT widget.
const channelName = 'updateExamples' + id;
const updateExampleListener = new BroadcastChannel(channelName);
updateExampleListener.onmessage = msg => {{
window.updateExamplesCallback(msg.data);
}};
}})();
</script>
"""
class WitWidget(base.WitWidgetBase):
"""WIT widget for colab."""
# Static instance list of constructed WitWidgets so python global functions
# can call into instances of this object
widgets = []
# Static instance index to keep track of ID number of each constructed
# WitWidget.
index = 0
def __init__(self, config_builder, height=1000, delay_rendering=False):
"""Constructor for colab notebook WitWidget.
Args:
config_builder: WitConfigBuilder object containing settings for WIT.
height: Optional height in pixels for WIT to occupy. Defaults to 1000.
delay_rendering: Optional. If true, then do not render WIT on
construction. Instead, only render when render method is called. Defaults
to False.
"""
self._rendering_complete = False
self.id = WitWidget.index
self.height = height
self.set_examples_in_progress = False
# How large of example slices should be sent to the front-end at a time,
# in order to avoid issues with kernel crashes on large messages.
self.SLICE_SIZE = 10000
base.WitWidgetBase.__init__(self, config_builder)
# Add this instance to the static instance list.
WitWidget.widgets.append(self)
if not delay_rendering:
self.render()
# Increment the static instance WitWidget index counter
WitWidget.index += 1
def render(self):
"""Render the widget to the display."""
# Display WIT Polymer element.
display.display(display.HTML(self._get_element_html()))
display.display(display.HTML(
WIT_HTML.format(height=self.height, id=self.id)))
# Send the provided config and examples to JS.
output.eval_js("""configCallback({config})""".format(
config=json.dumps(self.config)))
self.set_examples_in_progress = True
self._set_examples_looper('updateExamplesCallback({data})')
self.set_examples_in_progress = False
self._generate_sprite()
self._rendering_complete = True
def _get_element_html(self):
return tf.io.gfile.GFile(
'/usr/local/share/jupyter/nbextensions/wit-widget/wit_jupyter.html'
).read()
def set_examples(self, examples):
if self.set_examples_in_progress:
print('Cannot set examples while transfer is in progress.')
return
self.set_examples_in_progress = True
base.WitWidgetBase.set_examples(self, examples)
# If this is called after rendering, use a BroadcastChannel to send
# the updated examples to the visualization. Inside of the ctor, no action
# is necessary as the rendering handles all communication.
if self._rendering_complete:
# Use BroadcastChannel to allow this call to be made in a separate colab
# cell from the cell that displays WIT.
channel_str = """(new BroadcastChannel('updateExamples{}'))""".format(
self.id)
eval_js_str = channel_str + '.postMessage({data})'
self._set_examples_looper(eval_js_str)
self._generate_sprite()
self.set_examples_in_progress = False
def _set_examples_looper(self, eval_js_str):
# Send the set examples to JS in chunks.
num_pieces = math.ceil(len(self.examples) / self.SLICE_SIZE)
i = 0
while num_pieces > 0:
num_pieces -= 1
exs = self.examples[i : i + self.SLICE_SIZE]
piece = {'examples': exs, 'countdown': num_pieces}
output.eval_js(eval_js_str.format(data=json.dumps(piece)))
i += self.SLICE_SIZE
def infer(self):
try:
inferences = base.WitWidgetBase.infer_impl(self)
# Parse out the inferences from the returned structure and empty the
# structure of contents, keeping its nested structure.
# Chunks of the inference results will be sent to the front-end and
# re-assembled.
indices = inferences['inferences']['indices'][:]
inferences['inferences']['indices'] = []
res2 = []
extra = {}
extra2 = {}
model_inference = inferences['inferences']['results'][0]
if ('extra_outputs' in inferences and len(inferences['extra_outputs']) and
inferences['extra_outputs'][0]):
for key in inferences['extra_outputs'][0]:
extra[key] = inferences['extra_outputs'][0][key][:]
inferences['extra_outputs'][0][key] = []
if 'classificationResult' in model_inference:
res = model_inference['classificationResult']['classifications'][:]
model_inference['classificationResult']['classifications'] = []
else:
res = model_inference['regressionResult']['regressions'][:]
model_inference['regressionResult']['regressions'] = []
if len(inferences['inferences']['results']) > 1:
if ('extra_outputs' in inferences and
len(inferences['extra_outputs']) > 1 and
inferences['extra_outputs'][1]):
for key in inferences['extra_outputs'][1]:
extra2[key] = inferences['extra_outputs'][1][key][:]
inferences['extra_outputs'][1][key] = []
model_2_inference = inferences['inferences']['results'][1]
if 'classificationResult' in model_2_inference:
res2 = model_2_inference['classificationResult']['classifications'][:]
model_2_inference['classificationResult']['classifications'] = []
else:
res2 = model_2_inference['regressionResult']['regressions'][:]
model_2_inference['regressionResult']['regressions'] = []
i = 0
num_pieces = math.ceil(len(indices) / self.SLICE_SIZE)
# Loop over each piece to send.
while num_pieces > 0:
num_pieces -= 1
piece = [res[i : i + self.SLICE_SIZE]]
extra_piece = [{}]
for key in extra:
extra_piece[0][key] = extra[key][i : i + self.SLICE_SIZE]
if res2:
piece.append(res2[i : i + self.SLICE_SIZE])
extra_piece.append({})
for key in extra2:
extra_piece[1][key] = extra2[key][i : i + self.SLICE_SIZE]
ind_piece = indices[i : i + self.SLICE_SIZE]
data = {'results': piece, 'indices': ind_piece, 'extra': extra_piece,
'countdown': num_pieces}
# For the first segment to send, also send the blank inferences
# structure to be filled in. This was cleared of contents above but is
# used to maintain the nested structure of the results.
if i == 0:
data['inferences'] = inferences
output.eval_js("""inferenceCallback({data})""".format(
data=json.dumps(data)))
i += self.SLICE_SIZE
except Exception as e:
output.eval_js("""backendError({error})""".format(
error=json.dumps({'msg': repr(e)})))
def delete_example(self, index):
self.examples.pop(index)
self.updated_example_indices = set([
i if i < index else i - 1 for i in self.updated_example_indices])
self._generate_sprite()
def update_example(self, index, example):
self.updated_example_indices.add(index)
self.examples[index] = example
self._generate_sprite()
def duplicate_example(self, index):
self.examples.append(self.examples[index])
self.updated_example_indices.add(len(self.examples) - 1)
self._generate_sprite()
def compute_custom_distance(self, index, callback_fn, params):
try:
distances = base.WitWidgetBase.compute_custom_distance_impl(
self, index, params['distanceParams'])
callback_dict = {
'distances': distances,
'exInd': index,
'funId': callback_fn,
'params': params['callbackParams']
}
output.eval_js("""distanceCallback({callback_dict})""".format(
callback_dict=json.dumps(callback_dict)))
except Exception as e:
output.eval_js(
"""backendError({error})""".format(
error=json.dumps({'msg': repr(e)})))
def get_eligible_features(self):
features_list = base.WitWidgetBase.get_eligible_features_impl(self)
output.eval_js("""eligibleFeaturesCallback({features_list})""".format(
features_list=json.dumps(features_list)))
def infer_mutants(self, info):
try:
json_mapping = base.WitWidgetBase.infer_mutants_impl(self, info)
output.eval_js("""inferMutantsCallback({json_mapping})""".format(
json_mapping=json.dumps(json_mapping)))
except Exception as e:
output.eval_js("""backendError({error})""".format(
error=json.dumps({'msg': repr(e)})))
def sort_eligible_features(self, info):
try:
features_list = base.WitWidgetBase.sort_eligible_features_impl(self, info)
output.eval_js("""sortEligibleFeaturesCallback({features_list})""".format(
features_list=json.dumps(features_list)))
except Exception as e:
output.eval_js("""backendError({error})""".format(
error=json.dumps({'msg': repr(e)})))
def _generate_sprite(self):
sprite = base.WitWidgetBase.create_sprite(self)
if sprite is not None:
output.eval_js("""spriteCallback('{sprite}')""".format(sprite=sprite))
|
|
#!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for Closure Library dependency calculation.
ClosureBuilder scans source files to build dependency info. From the
dependencies, the script can produce a manifest in dependency order,
a concatenated script, or compiled output from the Closure Compiler.
Paths to files can be expressed as individual arguments to the tool (intended
for use with find and xargs). As a convenience, --root can be used to specify
all JS files below a directory.
usage: %prog [options] [file1.js file2.js ...]
"""
__author__ = '[email protected] (Nathan Naze)'
import logging
import optparse
import os
import sys
import depstree
import jscompiler
import source
import treescan
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
default=[],
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler. '
'To pass multiple flags, --compiler_flags has to be '
'specified multiple times.')
parser.add_option('-j',
'--jvm_flags',
dest='jvm_flags',
default=[],
action='append',
help='Additional flags to pass to the JVM compiler. '
'To pass multiple flags, --jvm_flags has to be '
'specified multiple times.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser
def _GetInputByPath(path, sources):
"""Get the source identified by a path.
Args:
path: str, A path to a file that identifies a source.
sources: An iterable collection of source objects.
Returns:
The source from sources identified by path, if found. Converts to
real paths for comparison.
"""
for js_source in sources:
# Convert both to real paths for comparison.
if os.path.realpath(path) == os.path.realpath(js_source.GetPath()):
return js_source
def _GetClosureBaseFile(sources):
"""Given a set of sources, returns the one base.js file.
Note that if zero or two or more base.js files are found, an error message
will be written and the program will be exited.
Args:
sources: An iterable of _PathSource objects.
Returns:
The _PathSource representing the base Closure file.
"""
base_files = [
js_source for js_source in sources if _IsClosureBaseFile(js_source)]
if not base_files:
logging.error('No Closure base.js file found.')
sys.exit(1)
if len(base_files) > 1:
logging.error('More than one Closure base.js files found at these paths:')
for base_file in base_files:
logging.error(base_file.GetPath())
sys.exit(1)
return base_files[0]
def _IsClosureBaseFile(js_source):
"""Returns true if the given _PathSource is the Closure base.js source."""
return (os.path.basename(js_source.GetPath()) == 'base.js' and
js_source.provides == set(['goog']))
class _PathSource(source.Source):
"""Source file subclass that remembers its file path."""
def __init__(self, path):
"""Initialize a source.
Args:
path: str, Path to a JavaScript file. The source string will be read
from this file.
"""
super(_PathSource, self).__init__(source.GetFileContents(path))
self._path = path
def __str__(self):
return 'PathSource %s' % self._path
def GetPath(self):
"""Returns the path."""
return self._path
def _WrapGoogModuleSource(src):
return ('goog.loadModule(function(exports) {{'
'"use strict";'
'{0}'
'\n' # terminate any trailing single line comment.
';return exports'
'}});\n').format(src)
def main():
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
sources = set()
logging.info('Scanning paths...')
for path in options.roots:
for js_path in treescan.ScanTreeForJsFiles(path):
sources.add(_PathSource(js_path))
# Add scripts specified on the command line.
for js_path in args:
sources.add(_PathSource(js_path))
logging.info('%s sources scanned.', len(sources))
# Though deps output doesn't need to query the tree, we still build it
# to validate dependencies.
logging.info('Building dependency tree..')
tree = depstree.DepsTree(sources)
input_namespaces = set()
inputs = options.inputs or []
for input_path in inputs:
js_input = _GetInputByPath(input_path, sources)
if not js_input:
logging.error('No source matched input %s', input_path)
sys.exit(1)
input_namespaces.update(js_input.provides)
input_namespaces.update(options.namespaces)
if not input_namespaces:
logging.error('No namespaces found. At least one namespace must be '
'specified with the --namespace or --input flags.')
sys.exit(2)
# The Closure Library base file must go first.
base = _GetClosureBaseFile(sources)
deps = [base] + tree.GetDependencies(input_namespaces)
output_mode = options.output_mode
if output_mode == 'list':
out.writelines([js_source.GetPath() + '\n' for js_source in deps])
elif output_mode == 'script':
for js_source in deps:
src = js_source.GetSource()
if js_source.is_goog_module:
src = _WrapGoogModuleSource(src)
out.write(src.encode('utf-8') + '\n')
elif output_mode == 'compiled':
logging.warning("""\
Closure Compiler now natively understands and orders Closure dependencies and
is prefererred over using this script for performing JavaScript compilation.
Please migrate your codebase.
See:
https://github.com/google/closure-compiler/wiki/Managing-Dependencies
""")
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(2)
# Will throw an error if the compilation fails.
compiled_source = jscompiler.Compile(
options.compiler_jar,
[js_source.GetPath() for js_source in deps],
jvm_flags=options.jvm_flags,
compiler_flags=options.compiler_flags)
logging.info('JavaScript compilation succeeded.')
out.write(str(str(compiled_source).encode('utf-8')))
else:
logging.error('Invalid value for --output flag.')
sys.exit(2)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <[email protected]>
from trac.db import Table, Column, Index
# Database version identifier. Used for automatic upgrades.
db_version = 29
def __mkreports(reports):
"""Utility function used to create report data in same syntax as the
default data. This extra step is done to simplify editing the default
reports."""
result = []
for report in reports:
result.append((None, report[0], report[2], report[1]))
return result
##
## Database schema
##
schema = [
# Common
Table('system', key='name')[
Column('name'),
Column('value')],
Table('permission', key=('username', 'action'))[
Column('username'),
Column('action')],
Table('auth_cookie', key=('cookie', 'ipnr', 'name'))[
Column('cookie'),
Column('name'),
Column('ipnr'),
Column('time', type='int')],
Table('session', key=('sid', 'authenticated'))[
Column('sid'),
Column('authenticated', type='int'),
Column('last_visit', type='int'),
Index(['last_visit']),
Index(['authenticated'])],
Table('session_attribute', key=('sid', 'authenticated', 'name'))[
Column('sid'),
Column('authenticated', type='int'),
Column('name'),
Column('value')],
Table('cache', key='id')[
Column('id', type='int'),
Column('generation', type='int'),
Column('key')],
# Attachments
Table('attachment', key=('type', 'id', 'filename'))[
Column('type'),
Column('id'),
Column('filename'),
Column('size', type='int'),
Column('time', type='int64'),
Column('description'),
Column('author'),
Column('ipnr')],
# Wiki system
Table('wiki', key=('name', 'version'))[
Column('name'),
Column('version', type='int'),
Column('time', type='int64'),
Column('author'),
Column('ipnr'),
Column('text'),
Column('comment'),
Column('readonly', type='int'),
Index(['time'])],
# Version control cache
Table('repository', key=('id', 'name'))[
Column('id', type='int'),
Column('name'),
Column('value')],
Table('revision', key=('repos', 'rev'))[
Column('repos', type='int'),
Column('rev', key_size=20),
Column('time', type='int64'),
Column('author'),
Column('message'),
Index(['repos', 'time'])],
Table('node_change', key=('repos', 'rev', 'path', 'change_type'))[
Column('repos', type='int'),
Column('rev', key_size=20),
Column('path', key_size=255),
Column('node_type', size=1),
Column('change_type', size=1, key_size=2),
Column('base_path'),
Column('base_rev'),
Index(['repos', 'rev'])],
# Ticket system
Table('ticket', key='id')[
Column('id', auto_increment=True),
Column('type'),
Column('time', type='int64'),
Column('changetime', type='int64'),
Column('component'),
Column('severity'),
Column('priority'),
Column('owner'),
Column('reporter'),
Column('cc'),
Column('version'),
Column('milestone'),
Column('status'),
Column('resolution'),
Column('summary'),
Column('description'),
Column('keywords'),
Index(['time']),
Index(['status'])],
Table('ticket_change', key=('ticket', 'time', 'field'))[
Column('ticket', type='int'),
Column('time', type='int64'),
Column('author'),
Column('field'),
Column('oldvalue'),
Column('newvalue'),
Index(['ticket']),
Index(['time'])],
Table('ticket_custom', key=('ticket', 'name'))[
Column('ticket', type='int'),
Column('name'),
Column('value')],
Table('enum', key=('type', 'name'))[
Column('type'),
Column('name'),
Column('value')],
Table('component', key='name')[
Column('name'),
Column('owner'),
Column('description')],
Table('milestone', key='name')[
Column('name'),
Column('due', type='int64'),
Column('completed', type='int64'),
Column('description')],
Table('version', key='name')[
Column('name'),
Column('time', type='int64'),
Column('description')],
# Report system
Table('report', key='id')[
Column('id', auto_increment=True),
Column('author'),
Column('title'),
Column('query'),
Column('description')],
]
##
## Default Reports
##
def get_reports(db):
return (
('Active Tickets',
"""\
* List all active tickets by priority.
* Color each row based on priority.
""",
"""\
SELECT p.value AS __color__,
id AS ticket, summary, component, version, milestone, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY """ + db.cast('p.value', 'int') + """, milestone, t.type, time
"""),
#----------------------------------------------------------------------------
('Active Tickets by Version',
"""\
This report shows how to color results by priority,
while grouping results by version.
Last modification time, description and reporter are included as hidden fields
for useful RSS export.
""",
"""\
SELECT p.value AS __color__,
version AS __group__,
id AS ticket, summary, component, version, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (version IS NULL),version, """ + db.cast('p.value', 'int') +
""", t.type, time
"""),
#----------------------------------------------------------------------------
('Active Tickets by Milestone',
"""\
This report shows how to color results by priority,
while grouping results by milestone.
Last modification time, description and reporter are included as hidden fields
for useful RSS export.
""",
"""\
SELECT p.value AS __color__,
%s AS __group__,
id AS ticket, summary, component, version, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (milestone IS NULL),milestone, %s, t.type, time
""" % (db.concat("'Milestone '", 'milestone'), db.cast('p.value', 'int'))),
#----------------------------------------------------------------------------
('Accepted, Active Tickets by Owner',
"""\
List accepted tickets, group by ticket owner, sorted by priority.
""",
"""\
SELECT p.value AS __color__,
owner AS __group__,
id AS ticket, summary, component, milestone, t.type AS type, time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status = 'accepted'
ORDER BY owner, """ + db.cast('p.value', 'int') + """, t.type, time
"""),
#----------------------------------------------------------------------------
('Accepted, Active Tickets by Owner (Full Description)',
"""\
List tickets accepted, group by ticket owner.
This report demonstrates the use of full-row display.
""",
"""\
SELECT p.value AS __color__,
owner AS __group__,
id AS ticket, summary, component, milestone, t.type AS type, time AS created,
description AS _description_,
changetime AS _changetime, reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status = 'accepted'
ORDER BY owner, """ + db.cast('p.value', 'int') + """, t.type, time
"""),
#----------------------------------------------------------------------------
('All Tickets By Milestone (Including closed)',
"""\
A more complex example to show how to make advanced reports.
""",
"""\
SELECT p.value AS __color__,
t.milestone AS __group__,
(CASE status
WHEN 'closed' THEN 'color: #777; background: #ddd; border-color: #ccc;'
ELSE
(CASE owner WHEN $USER THEN 'font-weight: bold' END)
END) AS __style__,
id AS ticket, summary, component, status,
resolution,version, t.type AS type, priority, owner,
changetime AS modified,
time AS _time,reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
ORDER BY (milestone IS NULL), milestone DESC, (status = 'closed'),
(CASE status WHEN 'closed' THEN changetime ELSE (-1) * %s END) DESC
""" % db.cast('p.value', 'int')),
#----------------------------------------------------------------------------
('My Tickets',
"""\
This report demonstrates the use of the automatically set
USER dynamic variable, replaced with the username of the
logged in user when executed.
""",
"""\
SELECT p.value AS __color__,
(CASE
WHEN owner = $USER AND status = 'accepted' THEN 'Accepted'
WHEN owner = $USER THEN 'Owned'
WHEN reporter = $USER THEN 'Reported'
ELSE 'Commented' END) AS __group__,
t.id AS ticket, summary, component, version, milestone,
t.type AS type, priority, t.time AS created,
t.changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE t.status <> 'closed' AND
(owner = $USER OR reporter = $USER OR
EXISTS (SELECT * FROM ticket_change tc
WHERE tc.ticket = t.id AND tc.author = $USER AND
tc.field = 'comment'))
ORDER BY (COALESCE(owner, '') = $USER AND status = 'accepted') DESC,
COALESCE(owner, '') = $USER DESC,
COALESCE(reporter, '') = $USER DESC,
""" + db.cast('p.value', 'int') + """, milestone, t.type, t.time
"""),
#----------------------------------------------------------------------------
('Active Tickets, Mine first',
"""\
* List all active tickets by priority.
* Show all tickets owned by the logged in user in a group first.
""",
"""\
SELECT p.value AS __color__,
(CASE owner
WHEN $USER THEN 'My Tickets'
ELSE 'Active Tickets'
END) AS __group__,
id AS ticket, summary, component, version, milestone, t.type AS type,
owner, status,
time AS created,
changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
WHERE status <> 'closed'
ORDER BY (COALESCE(owner, '') = $USER) DESC, """
+ db.cast('p.value', 'int') + """, milestone, t.type, time
"""))
##
## Default database values
##
# (table, (column1, column2), ((row1col1, row1col2), (row2col1, row2col2)))
def get_data(db):
return (('component',
('name', 'owner'),
(('component1', 'somebody'),
('component2', 'somebody'))),
('milestone',
('name', 'due', 'completed'),
(('milestone1', 0, 0),
('milestone2', 0, 0),
('milestone3', 0, 0),
('milestone4', 0, 0))),
('version',
('name', 'time'),
(('1.0', 0),
('2.0', 0))),
('enum',
('type', 'name', 'value'),
(('resolution', 'fixed', 1),
('resolution', 'invalid', 2),
('resolution', 'wontfix', 3),
('resolution', 'duplicate', 4),
('resolution', 'worksforme', 5),
('priority', 'blocker', 1),
('priority', 'critical', 2),
('priority', 'major', 3),
('priority', 'minor', 4),
('priority', 'trivial', 5),
('ticket_type', 'defect', 1),
('ticket_type', 'enhancement', 2),
('ticket_type', 'task', 3))),
('permission',
('username', 'action'),
(('anonymous', 'LOG_VIEW'),
('anonymous', 'FILE_VIEW'),
('anonymous', 'WIKI_VIEW'),
('authenticated', 'WIKI_CREATE'),
('authenticated', 'WIKI_MODIFY'),
('anonymous', 'SEARCH_VIEW'),
('anonymous', 'REPORT_VIEW'),
('anonymous', 'REPORT_SQL_VIEW'),
('anonymous', 'TICKET_VIEW'),
('authenticated', 'TICKET_CREATE'),
('authenticated', 'TICKET_MODIFY'),
('anonymous', 'BROWSER_VIEW'),
('anonymous', 'TIMELINE_VIEW'),
('anonymous', 'CHANGESET_VIEW'),
('anonymous', 'ROADMAP_VIEW'),
('anonymous', 'MILESTONE_VIEW'))),
('system',
('name', 'value'),
(('database_version', str(db_version)),
('initial_database_version', str(db_version)))),
('report',
('author', 'title', 'query', 'description'),
__mkreports(get_reports(db))))
|
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_status.base_stage import base_stage
import crm
from datetime import datetime
from operator import itemgetter
from openerp.osv import fields, osv, orm
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
from base.res.res_partner import format_address
CRM_LEAD_FIELDS_TO_MERGE = ['name',
'partner_id',
'channel_id',
'company_id',
'country_id',
'section_id',
'state_id',
'stage_id',
'type_id',
'user_id',
'title',
'city',
'contact_name',
'description',
'email',
'fax',
'mobile',
'partner_name',
'phone',
'probability',
'planned_revenue',
'street',
'street2',
'zip',
'create_date',
'date_action_last',
'date_action_next',
'email_from',
'email_cc',
'partner_name']
CRM_LEAD_PENDING_STATES = (
crm.AVAILABLE_STATES[2][0], # Cancelled
crm.AVAILABLE_STATES[3][0], # Done
crm.AVAILABLE_STATES[4][0], # Pending
)
class crm_lead(base_stage, format_address, osv.osv):
""" CRM Lead Case """
_name = "crm.lead"
_description = "Lead/Opportunity"
_order = "priority,date_action,id desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'crm.mt_lead_create': lambda self, cr, uid, obj, ctx=None: obj['state'] in ['new', 'draft'],
'crm.mt_lead_won': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done',
'crm.mt_lead_lost': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'cancel',
},
'stage_id': {
'crm.mt_lead_stage': lambda self, cr, uid, obj, ctx=None: obj['state'] not in ['new', 'draft', 'cancel', 'done'],
},
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('type') and not context.get('default_type'):
context['default_type'] = vals.get('type')
if vals.get('section_id') and not context.get('default_section_id'):
context['default_section_id'] = vals.get('section_id')
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(crm_lead, self).create(cr, uid, vals, context=create_context)
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return self._resolve_section_id_from_context(cr, uid, context=context) or False
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
section_id = self._get_default_section_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], section_id, [('state', '=', 'draft')], context=context)
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_name = context['default_section_id']
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=section_name, context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
def _resolve_type_from_context(self, cr, uid, context=None):
""" Returns the type (lead or opportunity) from the type context
key. Returns None if it cannot be resolved.
"""
if context is None:
context = {}
return context.get('default_type')
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('crm.case.stage')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('section_ids', '=', section_id), ('fold', '=', False) if section_id: add section columns that are not folded
search_domain = []
section_id = self._resolve_section_id_from_context(cr, uid, context=context)
if section_id:
search_domain += ['|', ('section_ids', '=', section_id)]
search_domain += [('id', 'in', ids)]
else:
search_domain += ['|', ('id', 'in', ids), ('case_default', '=', True)]
# retrieve type from the context (if set: choose 'type' or 'both')
type = self._resolve_type_from_context(cr, uid, context=context)
if type:
search_domain += ['|', ('type', '=', type), ('type', '=', 'both')]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(crm_lead,self).fields_view_get(cr, user, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self.fields_view_get_address(cr, user, res['arch'], context=context)
return res
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
:return dict: difference between current date and log date
"""
cal_obj = self.pool.get('resource.calendar')
res_obj = self.pool.get('resource.resource')
res = {}
for lead in self.browse(cr, uid, ids, context=context):
for field in fields:
res[lead.id] = {}
duration = 0
ans = False
if field == 'day_open':
if lead.date_open:
date_create = datetime.strptime(lead.create_date, "%Y-%m-%d %H:%M:%S")
date_open = datetime.strptime(lead.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
date_until = lead.date_open
elif field == 'day_close':
if lead.date_closed:
date_create = datetime.strptime(lead.create_date, "%Y-%m-%d %H:%M:%S")
date_close = datetime.strptime(lead.date_closed, "%Y-%m-%d %H:%M:%S")
date_until = lead.date_closed
ans = date_close - date_create
if ans:
resource_id = False
if lead.user_id:
resource_ids = res_obj.search(cr, uid, [('user_id','=',lead.user_id.id)])
if len(resource_ids):
resource_id = resource_ids[0]
duration = float(ans.days)
if lead.section_id and lead.section_id.resource_calendar_id:
duration = float(ans.days) * 24
new_dates = cal_obj.interval_get(cr,
uid,
lead.section_id.resource_calendar_id and lead.section_id.resource_calendar_id.id or False,
datetime.strptime(lead.create_date, '%Y-%m-%d %H:%M:%S'),
duration,
resource=resource_id
)
no_days = []
date_until = datetime.strptime(date_until, '%Y-%m-%d %H:%M:%S')
for in_time, out_time in new_dates:
if in_time.date not in no_days:
no_days.append(in_time.date)
if out_time > date_until:
break
duration = len(no_days)
res[lead.id][field] = abs(int(duration))
return res
def _history_search(self, cr, uid, obj, name, args, context=None):
res = []
msg_obj = self.pool.get('mail.message')
message_ids = msg_obj.search(cr, uid, [('email_from','!=',False), ('subject', args[0][1], args[0][2])], context=context)
lead_ids = self.search(cr, uid, [('message_ids', 'in', message_ids)], context=context)
if lead_ids:
return [('id', 'in', lead_ids)]
else:
return [('id', '=', '0')]
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='set null', track_visibility='onchange',
select=True, help="Linked partner (optional). Usually created when converting the lead."),
'id': fields.integer('ID', readonly=True),
'name': fields.char('Subject', size=64, required=True, select=1),
'active': fields.boolean('Active', required=False),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'email_from': fields.char('Email', size=128, help="Email address of the contact", select=1),
'section_id': fields.many2one('crm.case.section', 'Sales Team',
select=True, track_visibility='onchange', help='When sending mails, the default email address is taken from the sales team.'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'email_cc': fields.text('Global CC', size=252 , help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'description': fields.text('Notes'),
'write_date': fields.datetime('Update Date' , readonly=True),
'categ_ids': fields.many2many('crm.case.categ', 'crm_lead_category_rel', 'lead_id', 'category_id', 'Categories', \
domain="['|',('section_id','=',section_id),('section_id','=',False), ('object_id.model', '=', 'crm.lead')]"),
'type_id': fields.many2one('crm.case.resource.type', 'Campaign', \
domain="['|',('section_id','=',section_id),('section_id','=',False)]", help="From which campaign (seminar, marketing campaign, mass mailing, ...) did this contact come from?"),
'channel_id': fields.many2one('crm.case.channel', 'Channel', help="Communication channel (mail, direct, phone, ...)"),
'contact_name': fields.char('Contact Name', size=64),
'partner_name': fields.char("Customer Name", size=64,help='The name of the future partner company that will be created while converting the lead into opportunity', select=1),
'opt_out': fields.boolean('Opt-Out', oldname='optout',
help="If opt-out is checked, this contact has refused to receive emails for mass mailing and marketing campaign. "
"Filter 'Available for Mass Mailing' allows users to filter the leads when performing mass mailing."),
'type':fields.selection([ ('lead','Lead'), ('opportunity','Opportunity'), ],'Type', help="Type is used to separate Leads and Opportunities"),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority', select=True),
'date_closed': fields.datetime('Closed', readonly=True),
'stage_id': fields.many2one('crm.case.stage', 'Stage', track_visibility='onchange',
domain="['&', '&', ('fold', '=', False), ('section_ids', '=', section_id), '|', ('type', '=', type), ('type', '=', 'both')]"),
'user_id': fields.many2one('res.users', 'Salesperson', select=True, track_visibility='onchange'),
'referred': fields.char('Referred By', size=64),
'date_open': fields.datetime('Opened', readonly=True),
'day_open': fields.function(_compute_day, string='Days to Open', \
multi='day_open', type="float", store=True),
'day_close': fields.function(_compute_day, string='Days to Close', \
multi='day_close', type="float", store=True),
'state': fields.related('stage_id', 'state', type="selection", store=True,
selection=crm.AVAILABLE_STATES, string="Status", readonly=True,
help='The Status is set to \'Draft\', when a case is created. If the case is in progress the Status is set to \'Open\'. When the case is over, the Status is set to \'Done\'. If the case needs to be reviewed then the Status is set to \'Pending\'.'),
# Only used for type opportunity
'probability': fields.float('Success Rate (%)',group_operator="avg"),
'planned_revenue': fields.float('Expected Revenue', track_visibility='always'),
'ref': fields.reference('Reference', selection=crm._links_get, size=128),
'ref2': fields.reference('Reference 2', selection=crm._links_get, size=128),
'phone': fields.char("Phone", size=64),
'date_deadline': fields.date('Expected Closing', help="Estimate of the date on which the opportunity will be won."),
'date_action': fields.date('Next Action Date', select=True),
'title_action': fields.char('Next Action', size=64),
'color': fields.integer('Color Index'),
'partner_address_name': fields.related('partner_id', 'name', type='char', string='Partner Contact Name', readonly=True),
'partner_address_email': fields.related('partner_id', 'email', type='char', string='Partner Contact Email', readonly=True),
'company_currency': fields.related('company_id', 'currency_id', type='many2one', string='Currency', readonly=True, relation="res.currency"),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'user_login': fields.related('user_id', 'login', type='char', string='User Login', readonly=True),
# Fields for address, due to separation from crm and res.partner
'street': fields.char('Street', size=128),
'street2': fields.char('Street2', size=128),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City', size=128),
'state_id': fields.many2one("res.country.state", 'State'),
'country_id': fields.many2one('res.country', 'Country'),
'phone': fields.char('Phone', size=64),
'fax': fields.char('Fax', size=64),
'mobile': fields.char('Mobile', size=64),
'function': fields.char('Function', size=128),
'title': fields.many2one('res.partner.title', 'Title'),
'company_id': fields.many2one('res.company', 'Company', select=1),
'payment_mode': fields.many2one('crm.payment.mode', 'Payment Mode', \
domain="[('section_id','=',section_id)]"),
'planned_cost': fields.float('Planned Costs'),
}
_defaults = {
'active': 1,
'type': 'lead',
'user_id': lambda s, cr, uid, c: s._get_default_user(cr, uid, c),
'email_from': lambda s, cr, uid, c: s._get_default_email(cr, uid, c),
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.lead', context=c),
'priority': lambda *a: crm.AVAILABLE_PRIORITIES[2][0],
'color': 0,
}
_sql_constraints = [
('check_probability', 'check(probability >= 0 and probability <= 100)', 'The probability of closing the deal should be between 0% and 100%!')
]
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value':{}}
stage = self.pool.get('crm.case.stage').browse(cr, uid, stage_id, context)
if not stage.on_change:
return {'value':{}}
return {'value':{'probability': stage.probability}}
def on_change_partner(self, cr, uid, ids, partner_id, context=None):
result = {}
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'partner_name' : partner.name,
'street' : partner.street,
'street2' : partner.street2,
'city' : partner.city,
'state_id' : partner.state_id and partner.state_id.id or False,
'country_id' : partner.country_id and partner.country_id.id or False,
'email_from' : partner.email,
'phone' : partner.phone,
'mobile' : partner.mobile,
'fax' : partner.fax,
}
return {'value' : values}
def on_change_user(self, cr, uid, ids, user_id, context=None):
""" When changing the user, also set a section_id or restrict section id
to the ones user_id is member of. """
if user_id:
section_ids = self.pool.get('crm.case.section').search(cr, uid, ['|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context)
if section_ids:
return {'value': {'section_id': section_ids[0]}}
return {'value': {}}
def _check(self, cr, uid, ids=False, context=None):
""" Override of the base.stage method.
Function called by the scheduler to process cases for date actions
Only works on not done and cancelled cases
"""
cr.execute('select * from crm_case \
where (date_action_last<%s or date_action_last is null) \
and (date_action_next<=%s or date_action_next is null) \
and state not in (\'cancel\',\'done\')',
(time.strftime("%Y-%m-%d %H:%M:%S"),
time.strftime('%Y-%m-%d %H:%M:%S')))
ids2 = map(lambda x: x[0], cr.fetchall() or [])
cases = self.browse(cr, uid, ids2, context=context)
return self._action(cr, uid, cases, False, context=context)
def stage_find(self, cr, uid, cases, section_id, domain=None, order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
types = ['both']
if not cases :
type = context.get('default_type')
types += [type]
if section_id:
section_ids.append(section_id)
for lead in cases:
if lead.section_id:
section_ids.append(lead.section_id.id)
if lead.type not in types:
types.append(lead.type)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * len(section_ids)
for section_id in section_ids:
search_domain.append(('section_ids', '=', section_id))
else:
search_domain.append(('case_default', '=', True))
# AND with cases types
search_domain.append(('type', 'in', types))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('crm.case.stage').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_cancel(self, cr, uid, ids, context=None):
""" Overrides case_cancel from base_stage to set probability """
res = super(crm_lead, self).case_cancel(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'probability' : 0.0}, context=context)
return res
def case_reset(self, cr, uid, ids, context=None):
""" Overrides case_reset from base_stage to set probability """
res = super(crm_lead, self).case_reset(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'probability': 0.0}, context=context)
return res
def case_mark_lost(self, cr, uid, ids, context=None):
""" Mark the case as lost: state=cancel and probability=0 """
for lead in self.browse(cr, uid, ids):
stage_id = self.stage_find(cr, uid, [lead], lead.section_id.id or False, [('probability', '=', 0.0),('on_change','=',True)], context=context)
if stage_id:
self.case_set(cr, uid, [lead.id], values_to_update={'probability': 0.0}, new_stage_id=stage_id, context=context)
return True
def case_mark_won(self, cr, uid, ids, context=None):
""" Mark the case as won: state=done and probability=100 """
for lead in self.browse(cr, uid, ids):
stage_id = self.stage_find(cr, uid, [lead], lead.section_id.id or False, [('probability', '=', 100.0),('on_change','=',True)], context=context)
if stage_id:
self.case_set(cr, uid, [lead.id], values_to_update={'probability': 100.0}, new_stage_id=stage_id, context=context)
return True
def set_priority(self, cr, uid, ids, priority):
""" Set lead priority
"""
return self.write(cr, uid, ids, {'priority' : priority})
def set_high_priority(self, cr, uid, ids, context=None):
""" Set lead priority to high
"""
return self.set_priority(cr, uid, ids, '1')
def set_normal_priority(self, cr, uid, ids, context=None):
""" Set lead priority to normal
"""
return self.set_priority(cr, uid, ids, '3')
def _merge_get_result_type(self, cr, uid, opps, context=None):
"""
Define the type of the result of the merge. If at least one of the
element to merge is an opp, the resulting new element will be an opp.
Otherwise it will be a lead.
We'll directly use a list of browse records instead of a list of ids
for performances' sake: it will spare a second browse of the
leads/opps.
:param list opps: list of browse records containing the leads/opps to process
:return string type: the type of the final element
"""
for opp in opps:
if (opp.type == 'opportunity'):
return 'opportunity'
return 'lead'
def _merge_data(self, cr, uid, ids, oldest, fields, context=None):
"""
Prepare lead/opp data into a dictionary for merging. Different types
of fields are processed in different ways:
- text: all the values are concatenated
- m2m and o2m: those fields aren't processed
- m2o: the first not null value prevails (the other are dropped)
- any other type of field: same as m2o
:param list ids: list of ids of the leads to process
:param list fields: list of leads' fields to process
:return dict data: contains the merged values
"""
opportunities = self.browse(cr, uid, ids, context=context)
def _get_first_not_null(attr):
for opp in opportunities:
if hasattr(opp, attr) and bool(getattr(opp, attr)):
return getattr(opp, attr)
return False
def _get_first_not_null_id(attr):
res = _get_first_not_null(attr)
return res and res.id or False
def _concat_all(attr):
return '\n\n'.join(filter(lambda x: x, [getattr(opp, attr) or '' for opp in opportunities if hasattr(opp, attr)]))
# Process the fields' values
data = {}
for field_name in fields:
field_info = self._all_columns.get(field_name)
if field_info is None:
continue
field = field_info.column
if field._type in ('many2many', 'one2many'):
continue
elif field._type == 'many2one':
data[field_name] = _get_first_not_null_id(field_name) # !!
elif field._type == 'text':
data[field_name] = _concat_all(field_name) #not lost
else:
data[field_name] = _get_first_not_null(field_name) #not lost
# Define the resulting type ('lead' or 'opportunity')
data['type'] = self._merge_get_result_type(cr, uid, opportunities, context)
return data
def _mail_body(self, cr, uid, lead, fields, title=False, context=None):
body = []
if title:
body.append("%s\n" % (title))
for field_name in fields:
field_info = self._all_columns.get(field_name)
if field_info is None:
continue
field = field_info.column
value = ''
if field._type == 'selection':
if hasattr(field.selection, '__call__'):
key = field.selection(self, cr, uid, context=context)
else:
key = field.selection
value = dict(key).get(lead[field_name], lead[field_name])
elif field._type == 'many2one':
if lead[field_name]:
value = lead[field_name].name_get()[0][1]
elif field._type == 'many2many':
if lead[field_name]:
for val in lead[field_name]:
field_value = val.name_get()[0][1]
value += field_value + ","
else:
value = lead[field_name]
body.append("%s: %s" % (field.string, value or ''))
return "<br/>".join(body + ['<br/>'])
def _merge_notify(self, cr, uid, opportunity_id, opportunities, context=None):
"""
Create a message gathering merged leads/opps information.
"""
#TOFIX: mail template should be used instead of fix body, subject text
details = []
result_type = self._merge_get_result_type(cr, uid, opportunities, context)
if result_type == 'lead':
merge_message = _('Merged leads')
else:
merge_message = _('Merged opportunities')
subject = [merge_message]
for opportunity in opportunities:
subject.append(opportunity.name)
title = "%s : %s" % (opportunity.type == 'opportunity' and _('Merged opportunity') or _('Merged lead'), opportunity.name)
fields = list(CRM_LEAD_FIELDS_TO_MERGE)
details.append(self._mail_body(cr, uid, opportunity, fields, title=title, context=context))
# Chatter message's subject
subject = subject[0] + ": " + ", ".join(subject[1:])
details = "\n\n".join(details)
return self.message_post(cr, uid, [opportunity_id], body=details, subject=subject, context=context)
def _merge_opportunity_history(self, cr, uid, opportunity_id, opportunities, context=None):
message = self.pool.get('mail.message')
for opportunity in opportunities:
for history in opportunity.message_ids:
message.write(cr, uid, history.id, {
'res_id': opportunity_id,
'subject' : _("From %s : %s") % (opportunity.name, history.subject)
}, context=context)
return True
def _merge_opportunity_attachments(self, cr, uid, opportunity_id, opportunities, context=None):
attach_obj = self.pool.get('ir.attachment')
# return attachments of opportunity
def _get_attachments(opportunity_id):
attachment_ids = attach_obj.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', opportunity_id)], context=context)
return attach_obj.browse(cr, uid, attachment_ids, context=context)
first_attachments = _get_attachments(opportunity_id)
#counter of all attachments to move. Used to make sure the name is different for all attachments
count = 1
for opportunity in opportunities:
attachments = _get_attachments(opportunity.id)
for attachment in attachments:
values = {'res_id': opportunity_id,}
for attachment_in_first in first_attachments:
if attachment.name == attachment_in_first.name:
name = "%s (%s)" % (attachment.name, count,),
count+=1
attachment.write(values)
return True
def merge_opportunity(self, cr, uid, ids, context=None):
"""
Different cases of merge:
- merge leads together = 1 new lead
- merge at least 1 opp with anything else (lead or opp) = 1 new opp
:param list ids: leads/opportunities ids to merge
:return int id: id of the resulting lead/opp
"""
if context is None:
context = {}
if len(ids) <= 1:
raise osv.except_osv(_('Warning!'), _('Please select more than one element (lead or opportunity) from the list view.'))
opportunities = self.browse(cr, uid, ids, context=context)
sequenced_opps = []
for opportunity in opportunities:
sequence = -1
if opportunity.stage_id and opportunity.stage_id.state != 'cancel':
sequence = opportunity.stage_id.sequence
sequenced_opps.append(((int(sequence != -1 and opportunity.type == 'opportunity'), sequence, -opportunity.id), opportunity))
sequenced_opps.sort(reverse=True)
opportunities = map(itemgetter(1), sequenced_opps)
ids = [opportunity.id for opportunity in opportunities]
highest = opportunities[0]
opportunities_rest = opportunities[1:]
tail_opportunities = opportunities_rest
fields = list(CRM_LEAD_FIELDS_TO_MERGE)
merged_data = self._merge_data(cr, uid, ids, highest, fields, context=context)
# Merge messages and attachements into the first opportunity
self._merge_opportunity_history(cr, uid, highest.id, tail_opportunities, context=context)
self._merge_opportunity_attachments(cr, uid, highest.id, tail_opportunities, context=context)
# Merge notifications about loss of information
opportunities = [highest]
opportunities.extend(opportunities_rest)
self._merge_notify(cr, uid, highest.id, opportunities, context=context)
# Check if the stage is in the stages of the sales team. If not, assign the stage with the lowest sequence
if merged_data.get('section_id'):
section_stage_ids = self.pool.get('crm.case.stage').search(cr, uid, [('section_ids', 'in', merged_data['section_id']), ('type', '=', merged_data.get('type'))], order='sequence', context=context)
if merged_data.get('stage_id') not in section_stage_ids:
merged_data['stage_id'] = section_stage_ids and section_stage_ids[0] or False
# Write merged data into first opportunity
self.write(cr, uid, [highest.id], merged_data, context=context)
# Delete tail opportunities
# We use the SUPERUSER to avoid access rights issues because as the user had the rights to see the records it should be safe to do so
self.unlink(cr, SUPERUSER_ID, [x.id for x in tail_opportunities], context=context)
return highest.id
def _convert_opportunity_data(self, cr, uid, lead, customer, section_id=False, context=None):
crm_stage = self.pool.get('crm.case.stage')
contact_id = False
if customer:
contact_id = self.pool.get('res.partner').address_get(cr, uid, [customer.id])['default']
if not section_id:
section_id = lead.section_id and lead.section_id.id or False
val = {
'planned_revenue': lead.planned_revenue,
'probability': lead.probability,
'name': lead.name,
'partner_id': customer and customer.id or False,
'user_id': (lead.user_id and lead.user_id.id),
'type': 'opportunity',
'date_action': fields.datetime.now(),
'date_open': fields.datetime.now(),
'email_from': customer and customer.email or lead.email_from,
'phone': customer and customer.phone or lead.phone,
}
if not lead.stage_id or lead.stage_id.type=='lead':
val['stage_id'] = self.stage_find(cr, uid, [lead], section_id, [('state', '=', 'draft'),('type', 'in', ('opportunity','both'))], context=context)
return val
def convert_opportunity(self, cr, uid, ids, partner_id, user_ids=False, section_id=False, context=None):
customer = False
if partner_id:
partner = self.pool.get('res.partner')
customer = partner.browse(cr, uid, partner_id, context=context)
for lead in self.browse(cr, uid, ids, context=context):
if lead.state in ('done', 'cancel'):
continue
vals = self._convert_opportunity_data(cr, uid, lead, customer, section_id, context=context)
self.write(cr, uid, [lead.id], vals, context=context)
self.message_post(cr, uid, ids, body=_("Lead <b>converted into an Opportunity</b>"), subtype="crm.mt_lead_convert_to_opportunity", context=context)
if user_ids or section_id:
self.allocate_salesman(cr, uid, ids, user_ids, section_id, context=context)
return True
def _lead_create_contact(self, cr, uid, lead, name, is_company, parent_id=False, context=None):
partner = self.pool.get('res.partner')
vals = {'name': name,
'user_id': lead.user_id.id,
'comment': lead.description,
'section_id': lead.section_id.id or False,
'parent_id': parent_id,
'phone': lead.phone,
'mobile': lead.mobile,
'email': tools.email_split(lead.email_from) and tools.email_split(lead.email_from)[0] or False,
'fax': lead.fax,
'title': lead.title and lead.title.id or False,
'function': lead.function,
'street': lead.street,
'street2': lead.street2,
'zip': lead.zip,
'city': lead.city,
'country_id': lead.country_id and lead.country_id.id or False,
'state_id': lead.state_id and lead.state_id.id or False,
'is_company': is_company,
'type': 'contact'
}
partner = partner.create(cr, uid, vals, context=context)
return partner
def _create_lead_partner(self, cr, uid, lead, context=None):
partner_id = False
if lead.partner_name and lead.contact_name:
partner_id = self._lead_create_contact(cr, uid, lead, lead.partner_name, True, context=context)
partner_id = self._lead_create_contact(cr, uid, lead, lead.contact_name, False, partner_id, context=context)
elif lead.partner_name and not lead.contact_name:
partner_id = self._lead_create_contact(cr, uid, lead, lead.partner_name, True, context=context)
elif not lead.partner_name and lead.contact_name:
partner_id = self._lead_create_contact(cr, uid, lead, lead.contact_name, False, context=context)
elif lead.email_from and self.pool.get('res.partner')._parse_partner_name(lead.email_from, context=context)[0]:
contact_name = self.pool.get('res.partner')._parse_partner_name(lead.email_from, context=context)[0]
partner_id = self._lead_create_contact(cr, uid, lead, contact_name, False, context=context)
else:
raise osv.except_osv(
_('Warning!'),
_('No customer name defined. Please fill one of the following fields: Company Name, Contact Name or Email ("Name <email@address>")')
)
return partner_id
def _lead_set_partner(self, cr, uid, lead, partner_id, context=None):
"""
Assign a partner to a lead.
:param object lead: browse record of the lead to process
:param int partner_id: identifier of the partner to assign
:return bool: True if the partner has properly been assigned
"""
res = False
res_partner = self.pool.get('res.partner')
if partner_id:
res_partner.write(cr, uid, partner_id, {'section_id': lead.section_id and lead.section_id.id or False})
contact_id = res_partner.address_get(cr, uid, [partner_id])['default']
res = lead.write({'partner_id': partner_id}, context=context)
message = _("<b>Partner</b> set to <em>%s</em>." % (lead.partner_id.name))
self.message_post(cr, uid, [lead.id], body=message, context=context)
return res
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to the specified partner_id
:param list ids: leads/opportunities ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_phonecall
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for lead in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._create_lead_partner(cr, uid, lead, context)
self._lead_set_partner(cr, uid, lead, partner_id, context=context)
partner_ids[lead.id] = partner_id
return partner_ids
def allocate_salesman(self, cr, uid, ids, user_ids=None, team_id=False, context=None):
"""
Assign salesmen and salesteam to a batch of leads. If there are more
leads than salesmen, these salesmen will be assigned in round-robin.
E.g.: 4 salesmen (S1, S2, S3, S4) for 6 leads (L1, L2, ... L6). They
will be assigned as followed: L1 - S1, L2 - S2, L3 - S3, L4 - S4,
L5 - S1, L6 - S2.
:param list ids: leads/opportunities ids to process
:param list user_ids: salesmen to assign
:param int team_id: salesteam to assign
:return bool
"""
index = 0
for lead_id in ids:
value = {}
if team_id:
value['section_id'] = team_id
if user_ids:
value['user_id'] = user_ids[index]
# Cycle through user_ids
index = (index + 1) % len(user_ids)
if value:
self.write(cr, uid, [lead_id], value, context=context)
return True
def schedule_phonecall(self, cr, uid, ids, schedule_time, call_summary, desc, phone, contact_name, user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
:param string action: ('schedule','Schedule a call'), ('log','Log a call')
"""
phonecall = self.pool.get('crm.phonecall')
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for lead in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = lead.section_id and lead.section_id.id or False
if not user_id:
user_id = lead.user_id and lead.user_id.id or False
vals = {
'name': call_summary,
'opportunity_id': lead.id,
'user_id': user_id or False,
'categ_id': categ_id or False,
'description': desc or '',
'date': schedule_time,
'section_id': section_id or False,
'partner_id': lead.partner_id and lead.partner_id.id or False,
'partner_phone': phone or lead.phone or (lead.partner_id and lead.partner_id.phone or False),
'partner_mobile': lead.partner_id and lead.partner_id.mobile or False,
'priority': lead.priority,
}
new_id = phonecall.create(cr, uid, vals, context=context)
phonecall.case_open(cr, uid, [new_id], context=context)
if action == 'log':
phonecall.case_close(cr, uid, [new_id], context=context)
phonecall_dict[lead.id] = new_id
self.schedule_phonecall_send_note(cr, uid, [lead.id], new_id, action, context=context)
return phonecall_dict
def redirect_opportunity_view(self, cr, uid, opportunity_id, context=None):
models_data = self.pool.get('ir.model.data')
# Get opportunity views
dummy, form_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_form_view_oppor')
dummy, tree_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_tree_view_oppor')
return {
'name': _('Opportunity'),
'view_type': 'form',
'view_mode': 'tree, form',
'res_model': 'crm.lead',
'domain': [('type', '=', 'opportunity')],
'res_id': int(opportunity_id),
'view_id': False,
'views': [(form_view or False, 'form'),
(tree_view or False, 'tree'),
(False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def redirect_lead_view(self, cr, uid, lead_id, context=None):
models_data = self.pool.get('ir.model.data')
# Get lead views
dummy, form_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_form_view_leads')
dummy, tree_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_tree_view_leads')
return {
'name': _('Lead'),
'view_type': 'form',
'view_mode': 'tree, form',
'res_model': 'crm.lead',
'domain': [('type', '=', 'lead')],
'res_id': int(lead_id),
'view_id': False,
'views': [(form_view or False, 'form'),
(tree_view or False, 'tree'),
(False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def action_makeMeeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule meeting on current opportunity.
:return dict: dictionary value for created Meeting view
"""
opportunity = self.browse(cr, uid, ids[0], context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'base_calendar', 'action_crm_meeting', context)
res['context'] = {
'default_opportunity_id': opportunity.id,
'default_partner_id': opportunity.partner_id and opportunity.partner_id.id or False,
'default_partner_ids' : opportunity.partner_id and [opportunity.partner_id.id] or False,
'default_user_id': uid,
'default_section_id': opportunity.section_id and opportunity.section_id.id or False,
'default_email_from': opportunity.email_from,
'default_name': opportunity.name,
}
return res
def write(self, cr, uid, ids, vals, context=None):
if vals.get('stage_id') and not vals.get('probability'):
# change probability of lead(s) if required by stage
stage = self.pool.get('crm.case.stage').browse(cr, uid, vals['stage_id'], context=context)
if stage.on_change:
vals['probability'] = stage.probability
return super(crm_lead, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if not context:
context = {}
lead = self.browse(cr, uid, id, context=context)
local_context = dict(context)
local_context.setdefault('default_type', lead.type)
local_context.setdefault('default_section_id', lead.section_id)
if lead.type == 'opportunity':
default['date_open'] = fields.datetime.now()
else:
default['date_open'] = False
default['date_closed'] = False
default['stage_id'] = self._get_default_stage_id(cr, uid, local_context)
return super(crm_lead, self).copy(cr, uid, id, default, context=context)
def new_mail_send(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'crm', 'email_template_opportunity_mail')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
if context is None:
context = {}
ctx = context.copy()
ctx.update({
'default_model': 'crm.lead',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
# ----------------------------------------
# Mail Gateway
# ----------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
return [lead.section_id.message_get_reply_to()[0] if lead.section_id else False
for lead in self.browse(cr, SUPERUSER_ID, ids, context=context)]
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(crm_lead, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for lead in self.browse(cr, uid, ids, context=context):
if lead.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, lead, partner=lead.partner_id, reason=_('Customer'))
elif lead.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, lead, email=lead.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
if msg.get('author_id'):
defaults.update(self.on_change_partner(cr, uid, None, msg.get('author_id'), context=context)['value'])
if msg.get('priority') in dict(crm.AVAILABLE_PRIORITIES):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(crm_lead, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Overrides mail_thread message_update that is called by the mailgateway
through message_process.
This method updates the document according to the email.
"""
if isinstance(ids, (str, int, long)):
ids = [ids]
if update_vals is None: update_vals = {}
if msg.get('priority') in dict(crm.AVAILABLE_PRIORITIES):
update_vals['priority'] = msg.get('priority')
maps = {
'cost':'planned_cost',
'revenue': 'planned_revenue',
'probability':'probability',
}
for line in msg.get('body', '').split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res and maps.get(res.group(1).lower()):
key = maps.get(res.group(1).lower())
update_vals[key] = res.group(2).lower()
return super(crm_lead, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
# ----------------------------------------
# OpenChatter methods and notifications
# ----------------------------------------
def schedule_phonecall_send_note(self, cr, uid, ids, phonecall_id, action, context=None):
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, [phonecall_id], context=context)[0]
if action == 'log':
message = _('Logged a call for %(date)s. %(description)s')
else:
message = _('Scheduled a call for %(date)s. %(description)s')
phonecall_date = datetime.strptime(phonecall.date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
phonecall_usertime = fields.datetime.context_timestamp(cr, uid, phonecall_date, context=context).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
html_time = "<time datetime='%s+00:00'>%s</time>" % (phonecall.date, phonecall_usertime)
message = message % dict(date=html_time, description=phonecall.description)
return self.message_post(cr, uid, ids, body=message, context=context)
def log_meeting(self, cr, uid, ids, meeting_subject, meeting_date, duration, context=None):
if not duration:
duration = _('unknown')
else:
duration = str(duration)
message = _("Meeting scheduled at '%s'<br> Subject: %s <br> Duration: %s hour(s)") % (meeting_date, meeting_subject, duration)
return self.message_post(cr, uid, ids, body=message, context=context)
def onchange_state(self, cr, uid, ids, state_id, context=None):
if state_id:
country_id=self.pool.get('res.country.state').browse(cr, uid, state_id, context).country_id.id
return {'value':{'country_id':country_id}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
from django.views.generic import *
from teachers.models import *
from teachers.forms import *
from django.shortcuts import *
from baseapp.models import *
from django.contrib import messages
from django.db import *
from datetime import *
from datetime import datetime, timedelta
from django.views.generic import *
from django.contrib.auth import authenticate, login
from django.views.decorators.cache import never_cache
class teacher_leave_entry_create(View):
#@never_cache
def get(self,request,**kwargs):
if request.user.is_authenticated():
import teacher_main_views
if request.user.account.associated_with=='state' or request.user.account.associated_with=='DIPE' or request.user.account.associated_with=='CIPE' or request.user.account.associated_with=='Zone' or request.user.account.associated_with=='IAS' or request.user.account.associated_with=='IMS' :
AEOENTRY=0
else:
AEOENTRY=teacher_main_views.aeoentrycheck(request.user.account.associated_with)
leave_code=Teacher_leave_type.objects.all()
tid=self.kwargs.get('pk')
unique_id=Teacher_detail.objects.get(id=tid)
basic_det=Basicinfo.objects.get(school_id=unique_id.school_id)
school_id =unique_id.school_id
doj=unique_id.dofsed
try:
records=Teacher_leave_master.objects.get(teacherid_id=unique_id)
timez=records.timestamp
validated=timez.date()
except:
msg = " First Make Leave Master "
messages.warning(request, msg)
return redirect('teacher_personnel_entry_after',pk=tid)
family=Teacher_family_detail.objects.filter(teacherid_id=tid)
edu_list = Teacher_leave.objects.filter(teacherid_id=tid)
if edu_list.count()==0:
messages.success(request, 'No Data')
return render(request,'teachers/leave/teacher_leave_form.html',locals())
else:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
#@never_cache
def post(self,request,**kwargs):
if request.user.is_authenticated():
unique_id=request.POST['unique_id']
unique_name=request.POST['unique_name']
data=Teacher_detail.objects.get(id=unique_id)
doj=data.dofsed
desig=data.designation
doregular=data.doregu
form = Teacher_leaveform(request.POST,request.FILES)
if form.is_valid():
records=Teacher_leave_master.objects.get(teacherid_id=unique_id)
no_of_days1=(form.cleaned_data['leave_to'])-(form.cleaned_data['leave_from'])
no_of_days=no_of_days1.days +1
leave=Teacher_leave(teacherid_id=unique_id,
leave_type=form.cleaned_data['leave_type'],
leave_from=form.cleaned_data['leave_from'],
leave_to=form.cleaned_data['leave_to'],
order_no=form.cleaned_data['order_no'],
order_date=form.cleaned_data['order_date'],
)
b=completed_table.objects.get(teacherid_id=unique_id)
if b.Teacher_leav=='0':
b.id=b.id
b.teacherid_id=b.teacherid_id
b.Teacher_leav=9
b.save()
desig_id=User_desig.objects.get(user_desig=desig)
desig=desig_id.ser_type
record=Teacher_leave_master.objects.get(teacherid_id=unique_id)
if leave.leave_type_id==2:
if no_of_days >(record.el_ob-record.el_taken):
msg = " Leave Exceeds."
messages.warning(request, msg )
return redirect('teacher_leave_entry_create',pk=unique_id)
else:
opening_balance=record.el_ob
if (record.el_taken):
record.el_taken=int(record.el_taken)+int(no_of_days)
else:
record.el_taken=no_of_days
if (record.el_bal):
record.el_bal=int(record.el_bal)-int(no_of_days)
else:
record.el_bal=no_of_days
leave.ob=opening_balance
leave.taken=record.el_taken
leave.bal=record.el_bal
record.save()
leave.save()
msg = str(unique_name) + " Leave details added successfully."
messages.success(request, msg )
elif leave.leave_type_id==3:
try:
if desig==0:
years_of_exp=(leave.leave_from-doj).days/365.25
print 'staff'
if years_of_exp<2:
messages.success(request,'ML Not Eligible')
return redirect('teacher_leave_entry_create',pk=unique_id)
elif years_of_exp>=2 and years_of_exp<5:
eligible_days=90
elif years_of_exp>=5 and years_of_exp<10:
eligible_days=180
elif years_of_exp>=10 and years_of_exp<15:
eligible_days=270
elif years_of_exp>=15 and years_of_exp<20:
eligible_days=360
elif years_of_exp>=20:
eligible_days=540
else:
years_of_exp=(leave.leave_from-doregular).days/365.25
if years_of_exp<1:
messages.success(request,'ML Not Eligible')
return redirect('teacher_leave_entry_create',pk=unique_id)
elif years_of_exp>=1 and years_of_exp<15:
eligible_days=((leave.leave_from-doregular).days/1365.25)*10
elif years_of_exp>=15 and years_of_exp<20:
eligible_days=360
elif years_of_exp>=20:
eligible_days=540
record.uel_mc_ob=eligible_days
if no_of_days>eligible_days:
messages.warning(request,'ML Days Exceeds than available')
return redirect('teacher_leave_entry_create',pk=unique_id)
else:
opening_balance=record.uel_mc_ob
if (record.uel_mc_taken):
record.uel_mc_taken=int(record.uel_mc_taken)+int(no_of_days)
else:
record.uel_mc_taken=no_of_days
leave.ob=opening_balance
leave.taken=record.uel_mc_taken
record.uel_mc_bal=opening_balance-no_of_days
record.save()
leave.save()
msg = str(unique_name) + " Leave details added successfully."
messages.success(request, msg )
except:
messages.warning(request,'ML Days Exceeds than available')
elif leave.leave_type_id==4:
opening_balance=record.llp_mc_ob
if (record.llp_mc_taken):
record.llp_mc_taken=int(record.llp_mc_taken)+int(no_of_days)
else:
record.llp_mc_taken=no_of_days
leave.ob=opening_balance
leave.taken=record.llp_mc_taken
record.save()
leave.save()
msg = str(unique_name) + " Leave details added successfully."
messages.success(request, msg )
elif leave.leave_type_id==10:
if desig==05:
years_of_exp=(leave.leave_from-doj).days/365.25
if years_of_exp>2:
eligible_days=730
else:
messages.success(request,'LLP without MC Not Eligible')
else:
years_of_exp=(leave.leave_from-doregular).days/365.25
if years_of_exp<15:
messages.success(request,'LLP without MC Not Eligible')
elif years_of_exp>=15:
eligible_days=180
record.llp_womc_ob=eligible_days
if no_of_days>eligible_days:
messages.warning(request,'LLP without MC - Days Exceeds than Eligible ' + eligible_days )
else:
opening_balance=record.llp_womc_ob
if (record.llp_womc_taken):
record.llp_womc_taken=int(record.llp_womc_taken)+int(no_of_days)
else:
record.llp_womc_taken=no_of_days
record.llp_womc_bal=opening_balance-no_of_days
leave.ob=opening_balance
leave.taken=record.llp_womc_taken
record.save()
leave.save()
msg = str(unique_name) + " Leave details added successfully."
messages.success(request, msg )
elif leave.leave_type_id==5:
if desig==05:
years_of_exp=(leave.leave_from-doj).days/365.25
if years_of_exp<10:
eligible_days=90
else:
eligible_days=180
else:
years_of_exp=(leave.leave_from-doregular).days/365.25
if years_of_exp<15:
messages.success(request,'UnEarned Leave Private Affairs Not Eligible')
elif years_of_exp>=15:
eligible_days=180
record.uel_pa_ob=eligible_days
if no_of_days>eligible_days:
messages.warning(request,'UnEarned Leave Private Affairs - Days Exceeds than Eligible. Eligible Days - ' + str(eligible_days))
elif no_of_days>90:
messages.warning(request,'Maximum at any time - 90 days-TNLR 13')
else:
opening_balance=record.uel_pa_ob
if (record.uel_pa_taken):
record.uel_pa_taken=int(record.uel_pa_taken)+int(no_of_days)
else:
record.uel_pa_taken=no_of_days
record.uel_pa_bal=opening_balance-no_of_days
leave.ob=opening_balance
leave.taken=record.uel_pa_taken
record.save()
leave.save()
msg = str(unique_name) + " Leave details added successfully."
messages.success(request, msg )
elif leave.leave_type_id==6:
gender=data.gender
if gender=='Male':
messages.warning(request,'MATERNITY LEAVE only Eligible for Ladies')
else:
fam_details=Teacher_family_detail.objects.filter(teacherid_id=unique_id)
child_count=0
import datetime
leave.leave_to= (leave.leave_from + timedelta(days=180)).isoformat()
for i in fam_details:
if i.relation.id==2 or i.relation.id==3:
child_count=child_count+1
if child_count>=2:
messages.warning(request,'MATERNITY LEAVE only Eligible for 2 Babies')
record.maternity_leave_ob=child_count+1
elif child_count<2:
record.maternity_leave_ob=child_count+1
leave.ob=record.maternity_leave_ob * 180
leave.taken=record.maternity_leave_ob
leave.bal=2-child_count
record.save()
leave.save()
msg = str(unique_name) + " Leave details added successfully."
messages.success(request, msg )
elif leave.leave_type_id==7:
leave_reasons=request.POST['relation1']
if leave_reasons=='infectious disease':
eligible_days=21
elif leave_reasons=='participating in sporting events':
eligible_days=30
elif leave_reasons=='family planning':
gender=data.gender
if gender=='Male':
eligible_days=8
else:
eligible_days=21
leave.leave_to= (leave.leave_from + timedelta(days=eligible_days)).isoformat()
print leave.leave_to
no_of_days=eligible_days
edu = Teacher_leave.objects.filter(teacherid_id=unique_id).filter(leave_type_id=7)
taken_days=0
for i in edu:
if i.leave_type_id==7:
taken_days=taken_days+i.taken
if taken_days:
leave.ob=no_of_days+taken_days
leave.taken=no_of_days+taken_days
leave.bal=no_of_days+taken_days
else:
leave.ob=no_of_days
leave.taken=no_of_days
leave.bal=no_of_days
leave.save()
msg = str(unique_name) + " Leave details added successfully."
messages.success(request, msg )
elif leave.leave_type_id==11:
gender=data.gender
if gender=='Male':
messages.warning(request,'Eligible for Ladies')
else:
import datetime
leave.leave_to= (leave.leave_from + timedelta(days=42)).isoformat()
edu = Teacher_leave.objects.filter(teacherid_id=tid).filter(leave_type_id=11)
taken_days=0
for i in edu:
if i.leave_type_id==11:
taken_days=taken_days+edu.taken
if taken_days:
leave.bal=taken_days
leave.taken=taken_days+42
else:
leave.taken=42
record.save()
leave.save()
msg = str(unique_name) + " Leave details added successfully."
messages.success(request, msg )
return redirect('teacher_leave_entry_create',pk=unique_id)
else:
print form.errors
return render(request,'teachers/leave/teacher_leave_form.html',locals())
else:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
class Teacher_leave_delete(View):
#@never_cache
def get(self, request,**kwargs):
if request.user.is_authenticated():
tid=self.kwargs.get('pk')
staff_name=request.session['staffname']
data=Teacher_leave.objects.get(id=tid)
staff_id=request.session['staffid']
count=Teacher_leave.objects.filter(teacherid_id=staff_id).count()
if count == 1 :
data.delete()
b=completed_table.objects.get(teacherid_id=staff_id)
b.id=b.id
b.teacherid_id=b.teacherid_id
b.Teacher_leav=0
b.save()
else :
data.delete()
msg= str(data.leave_type) + " Removed successfully"
messages.success(request, msg )
return HttpResponseRedirect('/teachers/teacher_leave_create/')
else:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
class teacher_leave_update(View):
#@never_cache
def get(self, request,**kwargs):
if request.user.is_authenticated():
tid=self.kwargs.get('pk')
tid1=self.kwargs.get('pk1')
unique_id=Teacher_detail.objects.get(id=tid)
school_id =unique_id.school_id
instance=Teacher_leave.objects.get(id=tid1)
form=Teacher_leaveform(instance=instance)
edu_list = Teacher_leave.objects.filter(teacherid_id=tid)
if edu_list.count()==0:
messages.success(request,'No Data')
leave_code=Teacher_leave_type.objects.all()
basic_det=Basicinfo.objects.get(school_id=staff_id.school_id)
teacherid_id = instance.teacherid_id
leave_type = instance.leave_type
leave_from=instance.leave_from
leave_to = instance.leave_to
order_no = instance.order_no
order_date =instance.order_date
return render(request,'teachers/leave/teacher_leave_form.html',locals())
else:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
#@never_cache
def post(self,request,**kwargs):
if request.user.is_authenticated():
tid=self.kwargs.get('pk')
tid1=self.kwargs.get('pk1')
instance=Teacher_leave.objects.get(id=tid1)
form = Teacher_leaveform(request.POST,request.FILES)
data=Teacher_detail.objects.get(id=tid)
doj=data.dofsed
desig=data.designation
doregular=data.doregu
mgnt_edit = Teacher_leave.objects.get(id=tid1)
records=Teacher_leave_master.objects.get(teacherid_id=tid)
if form.is_valid():
no_of_days1=(form.cleaned_data['leave_to'])-(form.cleaned_data['leave_from'])
no_of_days=no_of_days1.days +1
changed_taken=no_of_days-mgnt_edit.taken
mgnt_edit.leave_type=form.cleaned_data['leave_type']
mgnt_edit.leave_from=form.cleaned_data['leave_from']
mgnt_edit.leave_to=form.cleaned_data['leave_to']
mgnt_edit.order_no=form.cleaned_data['order_no']
mgnt_edit.order_date=form.cleaned_data['order_date']
record=Teacher_leave_master.objects.get(teacherid_id=tid)
if mgnt_edit.leave_type_id==2:
if changed_taken >(record.el_ob-record.el_taken):
msg = " Leave Exceeds."
messages.warning(request, msg )
return redirect('teacher_leave_entry_create',pk=unique_id)
else:
opening_balance=record.el_ob
if (record.el_taken):
record.el_taken=int(record.el_taken)+int(changed_taken)
else:
record.el_taken=changed_taken
if (record.el_bal):
record.el_bal=int(record.el_bal)-int(no_of_days)
else:
record.el_bal=no_of_days
mgnt_edit.ob=opening_balance
mgnt_edit.taken=record.el_taken
record.save()
mgnt_edit.save()
elif mgnt_edit.leave_type_id==3:
if desig==05:
years_of_exp=(mgnt_edit.leave_from-doj).days/365.25
print 'staff'
if years_of_exp<2:
messages.success(request,'ML Not Eligible')
elif years_of_exp>=2 and years_of_exp<5:
eligible_days=90
elif years_of_exp>=5 and years_of_exp<10:
eligible_days=180
elif years_of_exp>=10 and years_of_exp<15:
eligible_days=270
elif years_of_exp>=15 and years_of_exp<20:
eligible_days=360
elif years_of_exp>=20:
eligible_days=540
else:
years_of_exp=(mgnt_edit.leave_from-doregular).days/365.25
if years_of_exp<1:
messages.success(request,'ML Not Eligible')
elif years_of_exp>=1 and years_of_exp<15:
eligible_days=((mgnt_edit.leave_from-doregular).days/1365.25)*10
elif years_of_exp>=15 and years_of_exp<20:
eligible_days=360
elif years_of_exp>=20:
eligible_days=540
if changed_taken>eligible_days:
messages.warning(request,'ML Days Exceeds than available')
else:
opening_balance=record.uel_mc_ob
if (record.uel_mc_taken):
record.uel_mc_taken=int(record.uel_mc_taken)+int(changed_taken)
else:
record.uel_mc_taken=changed_taken
mgnt_edit.ob=opening_balance
mgnt_edit.taken=record.uel_mc_taken
record.save()
mgnt_edit.save()
elif mgnt_edit.leave_type_id==4:
opening_balance=record.llp_mc_ob
if (record.llp_mc_taken):
record.llp_mc_taken=int(record.llp_mc_taken)+int(changed_taken)
else:
record.llp_mc_taken=changed_taken
mgnt_edit.ob=opening_balance
mgnt_edit.taken=record.llp_mc_taken
record.save()
mgnt_edit.save()
elif mgnt_edit.leave_type_id==10:
if desig==05:
years_of_exp=(mgnt_edit.leave_from-doj).days/365.25
if years_of_exp>2:
eligible_days=730
else:
messages.success(request,'LLP without MC Not Eligible')
else:
years_of_exp=(mgnt_edit.leave_from-doregular).days/365.25
if years_of_exp<15:
messages.success(request,'LLP without MC Not Eligible')
elif years_of_exp>=15:
eligible_days=180
if changed_taken>eligible_days:
messages.warning(request,'LLP without MC - Days Exceeds than Eligible ' + eligible_days )
else:
opening_balance=record.llp_womc_ob
if (record.llp_womc_taken):
record.llp_womc_taken=int(record.llp_womc_taken)+int(changed_taken)
else:
record.llp_womc_taken=changed_taken
mgnt_edit.ob=opening_balance
mgnt_edit.taken=record.llp_womc_taken
record.save()
mgnt_edit.save()
elif mgnt_edit.leave_type_id==5:
if desig==05:
years_of_exp=(mgnt_edit.leave_from-doj).days/365.25
print 'staff'
if years_of_exp<10:
eligible_days=90
else:
eligible_days=180
else:
years_of_exp=(mgnt_edit.leave_from-doregular).days/365.25
if years_of_exp<15:
messages.success(request,'UnEarned Leave Private Affairs Not Eligible')
elif years_of_exp>=15:
eligible_days=180
if changed_taken>eligible_days:
messages.warning(request,'UnEarned Leave Private Affairs - Days Exceeds than Eligible. Eligible Days - ' + str(eligible_days))
elif changed_taken>90:
messages.warning(request,'Maximum at any time - 90 days-TNLR 13')
else:
opening_balance=record.uel_pa_ob
if (record.uel_pa_taken):
record.uel_pa_taken=int(record.uel_pa_taken)+int(changed_taken)
else:
record.uel_pa_taken=changed_taken
mgnt_edit.ob=opening_balance
mgnt_edit.taken=record.uel_pa_taken
record.save()
mgnt_edit.save()
elif mgnt_edit.leave_type_id==6:
gender=data.gender
if gender=='Male':
messages.warning(request,'MATERNITY LEAVE only Eligible for Ladies')
else:
fam_details=Teacher_family_detail.objects.filter(teacherid_id=unique_id)
child_count=0
import datetime
mgnt_edit.leave_to= (mgnt_edit.leave_from + timedelta(days=180)).isoformat()
for i in fam_details:
if i.relation.id==2 or i.relation.id==3:
child_count=child_count+1
if child_count>=2:
messages.warning(request,'MATERNITY LEAVE only Eligible for 2 Babies')
record.maternity_leave_ob=child_count+1
elif child_count<2:
record.maternity_leave_ob=child_count+1
mgnt_edit.ob=record.maternity_leave_ob * 180
mgnt_edit.taken=record.maternity_leave_ob
mgnt_edit.bal=2-child_count
record.save()
mgnt_edit.save()
elif mgnt_edit.leave_type_id==7:
leave_reasons=request.POST['relation1']
if leave_reasons=='infectious disease':
eligible_days=21
elif leave_reasons=='participating in sporting events':
eligible_days=30
elif leave_reasons=='family planning':
gender=data.gender
if gender=='Male':
eligible_days=8
else:
eligible_days=21
mgnt_edit.leave_to= (mgnt_edit.leave_from + timedelta(days=eligible_days)).isoformat()
changed_taken=eligible_days
edu = Teacher_leave.objects.filter(teacherid_id=tid).filter(leave_type_id=7)
taken_days=0
for i in edu:
if i.leave_type_id==7:
taken_days=taken_days+i.taken
if taken_days:
mgnt_edit.ob=changed_taken+taken_days
mgnt_edit.taken=changed_taken+taken_days
mgnt_edit.bal=changed_taken+taken_days
else:
mgnt_edit.ob=changed_taken
mgnt_edit.taken=changed_taken
mgnt_edit.bal=changed_taken
mgnt_edit.save()
elif mgnt_edit.leave_type_id==11:
gender=data.gender
if gender=='Male':
messages.warning(request,'Eligible for Ladies')
else:
import datetime
mgnt_edit.leave_to= (mgnt_edit.leave_from + timedelta(days=42)).isoformat()
edu = Teacher_leave.objects.filter(teacherid_id=tid).filter(leave_type_id=11)
taken_days=0
for i in edu:
if i.leave_type_id==11:
taken_days=taken_days+edu.taken
if taken_days:
mgnt_edit.bal=taken_days
mgnt_edit.taken=taken_days+42
else:
mgnt_edit.taken=42
record.save()
mgnt_edit.save()
messages.success(request,'Leave Details Updated successfully')
return redirect('teacher_leave_entry_create',pk=tid)
else:
print form.errors
return render(request,'teachers/leave/teacher_leave_form.html',locals())
else:
return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))
|
|
from __future__ import absolute_import
import os
import struct
import io
import warnings
import six
from .protocol import Masker
from netlib import tcp
from netlib import utils
MAX_16_BIT_INT = (1 << 16)
MAX_64_BIT_INT = (1 << 64)
DEFAULT=object()
OPCODE = utils.BiDi(
CONTINUE=0x00,
TEXT=0x01,
BINARY=0x02,
CLOSE=0x08,
PING=0x09,
PONG=0x0a
)
class FrameHeader(object):
def __init__(
self,
opcode=OPCODE.TEXT,
payload_length=0,
fin=False,
rsv1=False,
rsv2=False,
rsv3=False,
masking_key=DEFAULT,
mask=DEFAULT,
length_code=DEFAULT
):
if not 0 <= opcode < 2 ** 4:
raise ValueError("opcode must be 0-16")
self.opcode = opcode
self.payload_length = payload_length
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
if length_code is DEFAULT:
self.length_code = self._make_length_code(self.payload_length)
else:
self.length_code = length_code
if mask is DEFAULT and masking_key is DEFAULT:
self.mask = False
self.masking_key = b""
elif mask is DEFAULT:
self.mask = 1
self.masking_key = masking_key
elif masking_key is DEFAULT:
self.mask = mask
self.masking_key = os.urandom(4)
else:
self.mask = mask
self.masking_key = masking_key
if self.masking_key and len(self.masking_key) != 4:
raise ValueError("Masking key must be 4 bytes.")
@classmethod
def _make_length_code(self, length):
"""
A websockets frame contains an initial length_code, and an optional
extended length code to represent the actual length if length code is
larger than 125
"""
if length <= 125:
return length
elif length >= 126 and length <= 65535:
return 126
else:
return 127
def __repr__(self):
vals = [
"ws frame:",
OPCODE.get_name(self.opcode, hex(self.opcode)).lower()
]
flags = []
for i in ["fin", "rsv1", "rsv2", "rsv3", "mask"]:
if getattr(self, i):
flags.append(i)
if flags:
vals.extend([":", "|".join(flags)])
if self.masking_key:
vals.append(":key=%s" % repr(self.masking_key))
if self.payload_length:
vals.append(" %s" % utils.pretty_size(self.payload_length))
return "".join(vals)
def human_readable(self):
warnings.warn("FrameHeader.to_bytes is deprecated, use bytes(frame_header) instead.", DeprecationWarning)
return repr(self)
def __bytes__(self):
first_byte = utils.setbit(0, 7, self.fin)
first_byte = utils.setbit(first_byte, 6, self.rsv1)
first_byte = utils.setbit(first_byte, 5, self.rsv2)
first_byte = utils.setbit(first_byte, 4, self.rsv3)
first_byte = first_byte | self.opcode
second_byte = utils.setbit(self.length_code, 7, self.mask)
b = six.int2byte(first_byte) + six.int2byte(second_byte)
if self.payload_length < 126:
pass
elif self.payload_length < MAX_16_BIT_INT:
# '!H' pack as 16 bit unsigned short
# add 2 byte extended payload length
b += struct.pack('!H', self.payload_length)
elif self.payload_length < MAX_64_BIT_INT:
# '!Q' = pack as 64 bit unsigned long long
# add 8 bytes extended payload length
b += struct.pack('!Q', self.payload_length)
if self.masking_key:
b += self.masking_key
return b
if six.PY2:
__str__ = __bytes__
def to_bytes(self):
warnings.warn("FrameHeader.to_bytes is deprecated, use bytes(frame_header) instead.", DeprecationWarning)
return bytes(self)
@classmethod
def from_file(cls, fp):
"""
read a websockets frame header
"""
first_byte = six.byte2int(fp.safe_read(1))
second_byte = six.byte2int(fp.safe_read(1))
fin = utils.getbit(first_byte, 7)
rsv1 = utils.getbit(first_byte, 6)
rsv2 = utils.getbit(first_byte, 5)
rsv3 = utils.getbit(first_byte, 4)
# grab right-most 4 bits
opcode = first_byte & 15
mask_bit = utils.getbit(second_byte, 7)
# grab the next 7 bits
length_code = second_byte & 127
# payload_lengthy > 125 indicates you need to read more bytes
# to get the actual payload length
if length_code <= 125:
payload_length = length_code
elif length_code == 126:
payload_length, = struct.unpack("!H", fp.safe_read(2))
elif length_code == 127:
payload_length, = struct.unpack("!Q", fp.safe_read(8))
# masking key only present if mask bit set
if mask_bit == 1:
masking_key = fp.safe_read(4)
else:
masking_key = None
return cls(
fin=fin,
rsv1=rsv1,
rsv2=rsv2,
rsv3=rsv3,
opcode=opcode,
mask=mask_bit,
length_code=length_code,
payload_length=payload_length,
masking_key=masking_key,
)
def __eq__(self, other):
if isinstance(other, FrameHeader):
return bytes(self) == bytes(other)
return False
class Frame(object):
"""
Represents one websockets frame.
Constructor takes human readable forms of the frame components
from_bytes() is also avaliable.
WebSockets Frame as defined in RFC6455
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-------+-+-------------+-------------------------------+
|F|R|R|R| opcode|M| Payload len | Extended payload length |
|I|S|S|S| (4) |A| (7) | (16/64) |
|N|V|V|V| |S| | (if payload len==126/127) |
| |1|2|3| |K| | |
+-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
| Extended payload length continued, if payload len == 127 |
+ - - - - - - - - - - - - - - - +-------------------------------+
| |Masking-key, if MASK set to 1 |
+-------------------------------+-------------------------------+
| Masking-key (continued) | Payload Data |
+-------------------------------- - - - - - - - - - - - - - - - +
: Payload Data continued ... :
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Payload Data continued ... |
+---------------------------------------------------------------+
"""
def __init__(self, payload=b"", **kwargs):
self.payload = payload
kwargs["payload_length"] = kwargs.get("payload_length", len(payload))
self.header = FrameHeader(**kwargs)
@classmethod
def default(cls, message, from_client=False):
"""
Construct a basic websocket frame from some default values.
Creates a non-fragmented text frame.
"""
if from_client:
mask_bit = 1
masking_key = os.urandom(4)
else:
mask_bit = 0
masking_key = None
return cls(
message,
fin=1, # final frame
opcode=OPCODE.TEXT, # text
mask=mask_bit,
masking_key=masking_key,
)
@classmethod
def from_bytes(cls, bytestring):
"""
Construct a websocket frame from an in-memory bytestring
to construct a frame from a stream of bytes, use from_file() directly
"""
return cls.from_file(tcp.Reader(io.BytesIO(bytestring)))
def __repr__(self):
ret = repr(self.header)
if self.payload:
ret = ret + "\nPayload:\n" + utils.clean_bin(self.payload).decode("ascii")
return ret
def human_readable(self):
warnings.warn("Frame.to_bytes is deprecated, use bytes(frame) instead.", DeprecationWarning)
return repr(self)
def __bytes__(self):
"""
Serialize the frame to wire format. Returns a string.
"""
b = bytes(self.header)
if self.header.masking_key:
b += Masker(self.header.masking_key)(self.payload)
else:
b += self.payload
return b
if six.PY2:
__str__ = __bytes__
def to_bytes(self):
warnings.warn("FrameHeader.to_bytes is deprecated, use bytes(frame_header) instead.", DeprecationWarning)
return bytes(self)
def to_file(self, writer):
warnings.warn("Frame.to_file is deprecated, use wfile.write(bytes(frame)) instead.", DeprecationWarning)
writer.write(bytes(self))
writer.flush()
@classmethod
def from_file(cls, fp):
"""
read a websockets frame sent by a server or client
fp is a "file like" object that could be backed by a network
stream or a disk or an in memory stream reader
"""
header = FrameHeader.from_file(fp)
payload = fp.safe_read(header.payload_length)
if header.mask == 1 and header.masking_key:
payload = Masker(header.masking_key)(payload)
return cls(
payload,
fin=header.fin,
opcode=header.opcode,
mask=header.mask,
payload_length=header.payload_length,
masking_key=header.masking_key,
rsv1=header.rsv1,
rsv2=header.rsv2,
rsv3=header.rsv3,
length_code=header.length_code
)
def __eq__(self, other):
if isinstance(other, Frame):
return bytes(self) == bytes(other)
return False
|
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from openprocurement.api.utils import get_now
# TenderContractResourceTest
def patch_tender_contract(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
contract = response.json['data'][0]
fake_contractID = "myselfID"
fake_items_data = [{"description": "New Description"}]
fake_suppliers_data = [{"name": "New Name"}]
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"contractID": fake_contractID,
"items": fake_items_data,
"suppliers": fake_suppliers_data}})
response = self.app.get('/tenders/{}/contracts/{}'.format(self.tender_id, contract['id']))
self.assertNotEqual(fake_contractID, response.json['data']['contractID'])
self.assertNotEqual(fake_items_data, response.json['data']['items'])
self.assertNotEqual(fake_suppliers_data, response.json['data']['suppliers'])
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"value": {"currency": "USD"}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": False}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"value": {"amount": 500}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"value": {"amountPerformance": 500}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {
"value": {
"annualCostsReduction": [300.6] * 21,
"yearlyPaymentsPercentage": 0.9,
"contractDuration": {'years': 5, 'days': 100}
}
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertIn("Can't sign contract before stand-still period end (",
response.json['errors'][0]["description"])
self.set_status('complete', {'status': 'active.awarded'})
token = self.initial_bids_tokens[self.initial_bids[0]['id']]
response = self.app.post_json('/tenders/{}/awards/{}/complaints?acc_token={}'.format(
self.tender_id, self.award_id, token),
{'data': {'title': 'complaint title',
'description': 'complaint description',
'author': self.supplier_info}})
self.assertEqual(response.status, '201 Created')
complaint = response.json['data']
owner_token = response.json['access']['token']
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.tender_id, self.award_id, complaint['id'], owner_token),
{"data": {"status": "pending"}})
self.assertEqual(response.status, '200 OK')
tender = self.db.get(self.tender_id)
for i in tender.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(tender)
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"dateSigned": i['complaintPeriod']['endDate']}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [{u'description': [
u'Contract signature date should be after award complaint period end date ({})'.format(
i['complaintPeriod']['endDate'])], u'location': u'body', u'name': u'dateSigned'}])
one_hour_in_furure = (get_now() + timedelta(hours=1)).isoformat()
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"dateSigned": one_hour_in_furure}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{u'description': [u"Contract signature date can't be in the future"], u'location': u'body',
u'name': u'dateSigned'}])
custom_signature_date = get_now().isoformat()
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"dateSigned": custom_signature_date}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't sign contract before reviewing all complaints")
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.tender_id, self.award_id, complaint['id'], owner_token),
{"data": {"status": "stopping", "cancellationReason": "reason"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "stopping")
authorization = self.app.authorization
self.app.authorization = ('Basic', ('reviewer', ''))
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}'.format(
self.tender_id, self.award_id, complaint['id']),
{'data': {'status': 'stopped'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["status"], "stopped")
self.app.authorization = authorization
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {
"value": {
"annualCostsReduction": [780.5] * 21,
"yearlyPaymentsPercentage": 0.9,
"contractDuration": {'years': 10}
},
"contractID": "myselfID",
"title": "New Title",
"items": [{"description": "New Description"}],
"suppliers": [{"name": "New Name"}]}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token),
{"data": {"status": "pending"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
response = self.app.patch_json('/tenders/{}/contracts/some_id'.format(self.tender_id),
{"data": {"status": "active"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'contract_id'}])
response = self.app.patch_json('/tenders/some_id/contracts/some_id',
{"data": {"status": "active"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'tender_id'}])
response = self.app.get('/tenders/{}/contracts/{}'.format(self.tender_id, contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
self.assertEqual(response.json['data']["value"]["amountPerformance"],
self.expected_contract_amountPerformance)
self.assertEqual(response.json['data']["value"]['amount'], self.expected_contract_amount)
|
|
"""
@file iotvt_integration.py
"""
##
# @addtogroup iotivity iotivity
# @brief This is iotivity component
# @{
# @addtogroup iotvt_integration iotvt_integration
# @brief This is iotvt_integration module
# @{
##
import os
import time
import string
import subprocess
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import get_files_dir
from oeqa.utils.helper import shell_cmd_timeout
from oeqa.utils.helper import run_as, add_group, add_user, remove_user
from oeqa.utils.decorators import tag
@tag(TestType="EFT", FeatureID="IOTOS-754,IOTOS-1019")
class IOtvtIntegration(oeRuntimeTest):
"""
@class IOtvtIntegration
"""
@classmethod
def setUpClass(cls):
'''Clean all the server and client firstly
@fn setUpClass
@param cls
@return
'''
cls.tc.target.run("killall presenceserver presenceclient devicediscoveryserver devicediscoveryclient")
cls.tc.target.run("killall fridgeserver fridgeclient garageserver garageclient groupserver groupclient")
cls.tc.target.run("killall roomserver roomclient simpleserver simpleclient simpleserverHQ simpleclientHQ")
cls.tc.target.run("killall simpleclientserver threadingsample")
cls.tc.target.run("rm -f /tmp/svr_output")
cls.tc.target.run("rm -f /tmp/output")
# check if image contains iotivity example applications
(status, output) = cls.tc.target.run("ls /opt/iotivity/examples/resource/")
if "cpp" in output:
pass
else:
assert 1 == 0, 'There is no iotivity exmaple app installed'
# add group and non-root user
add_group("tester")
add_user("iotivity-tester", "tester")
# Setup firewall accept for multicast
(status, output) = cls.tc.target.run("cat /proc/sys/net/ipv4/ip_local_port_range")
port_range = output.split()
cls.tc.target.run("/usr/sbin/iptables -w -A INPUT -p udp --dport 5683 -j ACCEPT")
cls.tc.target.run("/usr/sbin/iptables -w -A INPUT -p udp --dport 5684 -j ACCEPT")
cls.tc.target.run("/usr/sbin/ip6tables -w -A INPUT -s fe80::/10 -p udp -m udp --dport 5683 -j ACCEPT")
cls.tc.target.run("/usr/sbin/ip6tables -w -A INPUT -s fe80::/10 -p udp -m udp --dport 5684 -j ACCEPT")
cls.tc.target.run("/usr/sbin/ip6tables -w -A INPUT -s fe80::/10 -p udp -m udp --dport %s:%s -j ACCEPT" % (port_range[0], port_range[1]))
@classmethod
def tearDownClass(cls):
'''Clean user
@fn setUpClass
@param cls
@return
'''
remove_user("iotivity-tester")
def get_ipv6(self):
"""
@fn get_ipv6
@param self
@return
"""
time.sleep(1)
# Check ip address by ifconfig command
interface = "nothing"
(status, interface) = self.target.run("ifconfig | grep '^enp' | awk '{print $1}'")
(status, output) = self.target.run("ifconfig %s | grep 'inet6 addr:' | awk '{print $3}'" % interface)
return output.split('%')[0]
def presence_check(self, para):
'''this is a function used by presence test
@fn presence_check
@param self
@return
'''
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/presenceserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd)
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/presenceclient -t %d > /tmp/output &" % para
run_as("iotivity-tester", client_cmd)
# Some platform is too slow, it needs more time to sleep. E.g. MinnowMax
time.sleep(60)
(status, output) = run_as("iotivity-tester", "cat /tmp/output")
self.target.run("killall presenceserver presenceclient")
time.sleep(3)
return output.count("Received presence notification from : %s" % self.target.ip) + \
output.count("Received presence notification from : %s" % self.get_ipv6())
def test_devicediscovery(self):
'''
Test devicediscoveryserver and devicediscoveryclient.
The server registers platform info values, the client connects to the
server and fetch the information to print out.
@fn test_devicediscovery
@param self
@return
'''
# ensure env is clean
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/devicediscoveryserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, timeout=20)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/devicediscoveryclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, timeout=20)
time.sleep(5)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output')
# judge if the values are correct
ret = 0
if "Device name" in output and "Bill's Battlestar" in output:
pass
else:
ret = 1
# kill server and client
self.target.run("killall devicediscoveryserver devicediscoveryclient")
time.sleep(3)
##
# TESTPOINT: #1, test_devicediscovery
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_fridge(self):
'''
Test fridgeserver and fridgeclient.
The server registers resource with 2 doors and 1 light, client connects to the
server and fetch the information to print out.
@fn test_fridge
@param self
@return
'''
# ensure env is clean
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/fridgeserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, timeout=20)
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/fridgeclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, timeout=20)
time.sleep(5)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output')
# judge if the values are correct
ret = 0
if "Name of device: Intel Powered 2 door, 1 light refrigerator" in output and \
"Get ID is 1 and resource URI is /light" in output and \
"Get ID is 2 and resource URI is /door/left" in output and \
"Get ID is 3 and resource URI is /door/right" in output and \
"Get ID is 4 and resource URI is /door/random" in output and \
"Delete ID is 0 and resource URI is /device" in output:
pass
else:
ret = 1
# kill server and client
self.target.run("killall fridgeserver fridgeclient")
time.sleep(3)
##
# TESTPOINT: #1, test_fridge
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_garage(self):
'''
Test garageserver and garageclient.
While server and client communication, remove one attribute Name from
OCRepresentation. Then the attribute number of OCRepresentation should
reduce 1.
@fn test_garage
@param self
@return
'''
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/garageserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, timeout=20)
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/garageclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, timeout=20)
time.sleep(5)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output')
# judge if the values are correct
ret = 0
if "GET request was successful" in output and \
"attribute: name, was removed successfully from rep2." in output and \
"Number of attributes in rep2: 6" in output and \
"PUT request was successful" in output:
pass
else:
ret = 1
# kill server and client
self.target.run("killall garageserver garageclient")
time.sleep(3)
##
# TESTPOINT: #1, test_garage
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_group(self):
'''
groupclient has 4 main operations. Only option1 is doable.
In option (user inputs 1), it will set ActionSet value of rep. This case
is to check if the set operation is done.
@fn test_group
@param self
@return
'''
# start light server and group server
lightserver_cmd = "/opt/iotivity/examples/resource/cpp/lightserver > /tmp/svr_output &"
(status, output) = self.target.run(lightserver_cmd, timeout=20)
time.sleep(2)
ssh_cmd = "ssh root@%s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=ERROR" % self.target.ip
groupserver_cmd = "/opt/iotivity/examples/resource/cpp/groupserver > /dev/null 2>&1"
subprocess.Popen("%s %s" % (ssh_cmd, groupserver_cmd), shell=True)
time.sleep(3)
# start client to get info, here needs user input. So use expect
exp_cmd = os.path.join(os.path.dirname(__file__), "files/group_client.exp")
status, output = shell_cmd_timeout("expect %s %s" % (exp_cmd, self.target.ip), timeout=200)
# kill server and client
self.target.run("killall lightserver groupserver groupclient")
time.sleep(3)
##
# TESTPOINT: #1, test_group
#
if type(output) is bytes:
output = output.decode("ascii")
self.assertEqual(status, 2, msg="expect excution fail\n %s" % output)
def test_presence_unicast(self):
'''
Presence test is complex. It contains 6 sub-tests.
Main goal (client) is to observe server resource presence status (presence/stop).
Every change will trigger a Received presence notification on client side.
To each client observation mode:
-t 1 Unicast --- it will receive 7 notifications
-t 2 Unicast with one filter --- it will receive 5 notifications
-t 3 Unicast with two filters --- it will receive 6 notifications
-t 4 Multicast --- it will receive 7 notifications
-t 5 Multicast with one filter --- it will receive 5 notifications
-t 6 Multicast with two filters --- it will receive 6 notifications
@fn test_presence_unicast
@param self
@return
'''
number = self.presence_check(1)
##
# TESTPOINT: #1, test_presence_unicast
#
assert number > 0, "type 1 should have no notifications"
def test_presence_unicast_one_filter(self):
''' See instruction in test_presence_unicast.
@fn test_presence_unicast_one_filter
@param self
@return
'''
number = self.presence_check(2)
##
# TESTPOINT: #1, test_presence_unicast_one_filter
#
assert number > 0, "type 2 should have no notifications"
def test_presence_unicast_two_filters(self):
''' See instruction in test_presence_unicast.
@fn test_presence_unicast_two_filters
@param self
@return
'''
number = self.presence_check(3)
##
# TESTPOINT: #1, test_presence_unicast_two_filters
#
assert number > 0, "type 3 should have no notifications"
def test_presence_multicast(self):
''' See instruction in test_presence_unicast.
@fn test_presence_multicast
@param self
@return
'''
number = self.presence_check(4)
##
# TESTPOINT: #1, test_presence_multicast
#
assert number > 0, "type 4 should have no notifications"
def test_presence_multicast_one_filter(self):
''' See instruction in test_presence_unicast.
@fn test_presence_multicast_one_filter
@param self
@return
'''
number = self.presence_check(5)
##
# TESTPOINT: #1, test_presence_multicast_one_filter
#
assert number > 0, "type 5 should have no notifications"
def test_presence_multicast_two_filters(self):
''' See instruction in test_presence_unicast.
@fn test_presence_multicast_two_filters
@param self
@return
'''
number = self.presence_check(6)
##
# TESTPOINT: #1, test_presence_multicast_two_filters
#
assert number > 0, "type 6 should have no notifications"
def test_room_default_collection(self):
'''
When number is 1 and request is put, light and fan give response individually.
So, there is no 'In Server CPP entity handler' output. Each respone is given by
light or fan.
@fn test_room_default_collection
@param self
@return
'''
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/roomserver 1 > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd)
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/roomclient > /tmp/output &"
run_as("iotivity-tester", client_cmd)
time.sleep(5)
(status, output) = run_as("iotivity-tester", "cat /tmp/svr_output | grep 'In Server CPP entity handler' -c")
# kill server and client
self.target.run("killall roomserver roomclient")
time.sleep(3)
##
# TESTPOINT: #1, test_room_default_collection
#
self.assertEqual(int(output), 0, msg="CPP entity handler is: %s" % output)
def test_room_application_collection(self):
'''
When number is 2 and request is put, room entity handler give light and fan
response. So, there are 3 responses output: In Server CPP entity handler.
In the middle one, it will handle light and fan.
@fn test_room_application_collection
@param self
@return
'''
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/roomserver 2 > /tmp/svr_output &"
run_as("iotivity-tester", server_cmd)
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/roomclient > /tmp/output &"
run_as("iotivity-tester", client_cmd)
time.sleep(6)
(status, output) = run_as("iotivity-tester", "cat /tmp/svr_output")
# kill server and client
self.target.run("killall roomserver roomclient")
time.sleep(3)
##
# TESTPOINT: #1, test_room_application_collection
#
self.assertEqual(output.count("In Server CPP entity handler"), 3, msg="CPP entity handler is: %s" % output)
def test_simple(self):
'''
Test simpleserver and simpleclient.
After finding resource, simpleclient will do: GET, PUT, POST, Observer sequencely.
@fn test_simple
@param self
@return
'''
for i in range(3):
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/simpleserver > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, timeout=90)
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/simpleclient > /tmp/output &"
run_as("iotivity-tester", client_cmd, timeout=90)
print ("\npatient... simpleclient needs long time for its observation")
time.sleep(70)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output')
# kill server and client
self.target.run("killall simpleserver simpleclient")
time.sleep(3)
# judge if the values are correct
ret = 0
if "DISCOVERED Resource" in output and \
"GET request was successful" in output and \
"PUT request was successful" in output and \
"POST request was successful" in output and \
"Observe is used." in output:
break
else:
ret = 1
##
# TESTPOINT: #1, test_simple
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_simpleHQ(self):
'''
Test simpleserverHQ and simpleclientHQ.
Compared to simpleserver, simpleserverHQ removes SlowResponse, and give
sendResponse (when PUT) / sendPostResponse (when POST). Basically, they
are the same.
@fn test_simpleHQ
@param self
@return
'''
for i in range(3):
# start server
server_cmd = "/opt/iotivity/examples/resource/cpp/simpleserverHQ > /tmp/svr_output &"
(status, output) = run_as("iotivity-tester", server_cmd, timeout=90)
time.sleep(1)
# start client to get info
client_cmd = "/opt/iotivity/examples/resource/cpp/simpleclientHQ > /tmp/output &"
run_as("iotivity-tester", client_cmd, timeout=90)
print ("\npatient... simpleclientHQ needs long time for its observation")
time.sleep(70)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output')
# kill server and client
self.target.run("killall simpleserverHQ simpleclientHQ")
time.sleep(3)
# judge if the values are correct
ret = 0
if "DISCOVERED Resource" in output and \
"GET request was successful" in output and \
"PUT request was successful" in output and \
"POST request was successful" in output and \
"Observe is used." in output:
break
else:
ret = 1
##
# TESTPOINT: #1, test_simpleHQ
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_simpleclientserver(self):
''' Test simpleclientserver. It foos a server, and start client to do GET/PUT.
@fn test_simpleclientserver
@param self
@return
'''
# start test
client_cmd = "/opt/iotivity/examples/resource/cpp/simpleclientserver > /tmp/output &"
run_as("iotivity-tester", client_cmd, timeout=20)
time.sleep(10)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output')
# judge if the values are correct
ret = 0
if "Found Resource" in output and \
"Successful Get" in output and \
"Successful Put" in output and \
"barCount: 211" in output:
pass
else:
ret = 1
# kill test
self.target.run("killall simpleclientserver")
time.sleep(3)
##
# TESTPOINT: #1, test_simpleclientserver
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
def test_threadingsample(self):
'''
Test threadingsample. In its main(), a foo1 server registered. Then, it opens
three threads:
1> second server foo2
2> clinet1 to detect foo1
3> client2 to detect foo2, and does GET/PUT further
@fn test_threadingsample
@param self
@return
'''
# start test
client_cmd = "/opt/iotivity/examples/resource/cpp/threadingsample > /tmp/output &"
run_as("iotivity-tester", client_cmd, timeout=20)
print ("\n patient, threadingsample needs some time to open 3 threads")
time.sleep(20)
(status, output) = run_as("iotivity-tester", 'cat /tmp/output')
# judge if the values are correct
ret = 0
if "URI: /q/foo1" in output and \
"URI: /q/foo2" in output and \
"Successful Get." in output and \
"Successful Put." in output:
pass
else:
ret = 1
# kill test
self.target.run("killall threadingsample")
time.sleep(3)
##
# TESTPOINT: #1, test_threadingsample
#
self.assertEqual(ret, 0, msg="Error messages: %s" % output)
@tag(TestType="FVT", FeatureID="IOTOS-1004")
def test_neg_register_resource_byroot(self):
'''
Start iotivity server by root account, it must fail
@fn test_neg_register_resource_byroot
@param self
@return
'''
cmd = "/opt/iotivity/examples/resource/cpp/simpleserver > /tmp/output &"
self.target.run(cmd, timeout=20)
time.sleep(2)
# kill the server process
self.target.run("killall simpleserver")
time.sleep(1)
(status, output) = self.target.run('cat /tmp/output')
# judge if the values are correct
if "Created resource." in output:
pass
else:
# root account should also be able to do iotivity operations
self.assertEqual(1, 0, msg="By root, the simpleserver fails to start: %s" % output)
##
# @}
# @}
##
|
|
#!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage JJB yaml feature implementation
import copy
import fnmatch
import io
import itertools
import logging
import re
import os
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
import jenkins_jobs.local_yaml as local_yaml
from jenkins_jobs import utils
__all__ = [
"YamlParser"
]
logger = logging.getLogger(__name__)
def matches(what, glob_patterns):
"""
Checks if the given string, ``what``, matches any of the glob patterns in
the iterable, ``glob_patterns``
:arg str what: String that we want to test if it matches a pattern
:arg iterable glob_patterns: glob patterns to match (list, tuple, set,
etc.)
"""
return any(fnmatch.fnmatch(what, glob_pattern)
for glob_pattern in glob_patterns)
def combination_matches(combination, match_combinations):
"""
Checks if the given combination is matches for any of the given combination
globs, being those a set of combinations where if a key is missing, it's
considered matching
(key1=2, key2=3)
would match the combination match:
(key2=3)
but not:
(key1=2, key2=2)
"""
for cmatch in match_combinations:
for key, val in combination.items():
if cmatch.get(key, val) != val:
break
else:
return True
return False
class YamlParser(object):
def __init__(self, jjb_config=None):
self.data = {}
self.jobs = []
self.views = []
self.jjb_config = jjb_config
self.keep_desc = jjb_config.yamlparser['keep_descriptions']
self.path = jjb_config.yamlparser['include_path']
def load_files(self, fn):
# handle deprecated behavior, and check that it's not a file like
# object as these may implement the '__iter__' attribute.
if not hasattr(fn, '__iter__') or hasattr(fn, 'read'):
logger.warning(
'Passing single elements for the `fn` argument in '
'Builder.load_files is deprecated. Please update your code '
'to use a list as support for automatic conversion will be '
'removed in a future version.')
fn = [fn]
files_to_process = []
for path in fn:
if not hasattr(path, 'read') and os.path.isdir(path):
files_to_process.extend([os.path.join(path, f)
for f in sorted(os.listdir(path))
if (f.endswith('.yml') or
f.endswith('.yaml'))])
else:
files_to_process.append(path)
# symlinks used to allow loading of sub-dirs can result in duplicate
# definitions of macros and templates when loading all from top-level
unique_files = []
for f in files_to_process:
if hasattr(f, 'read'):
unique_files.append(f)
continue
rpf = os.path.realpath(f)
if rpf not in unique_files:
unique_files.append(rpf)
else:
logger.warning("File '%s' already added as '%s', ignoring "
"reference to avoid duplicating yaml "
"definitions." % (f, rpf))
for in_file in unique_files:
# use of ask-for-permissions instead of ask-for-forgiveness
# performs better when low use cases.
if hasattr(in_file, 'name'):
fname = in_file.name
else:
fname = in_file
logger.debug("Parsing YAML file {0}".format(fname))
if hasattr(in_file, 'read'):
self._parse_fp(in_file)
else:
self.parse(in_file)
def _parse_fp(self, fp):
# wrap provided file streams to ensure correct encoding used
data = local_yaml.load(utils.wrap_stream(fp),
self.jjb_config.yamlparser['retain_anchors'],
search_path=self.path)
if data:
if not isinstance(data, list):
raise JenkinsJobsException(
"The topmost collection in file '{fname}' must be a list,"
" not a {cls}".format(fname=getattr(fp, 'name', fp),
cls=type(data)))
for item in data:
cls, dfn = next(iter(item.items()))
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item "
"named '{0}'. Missing indent?"
.format(n))
# allow any entry to specify an id that can also be used
_id = dfn.get('id', dfn['name'])
if _id in group:
self._handle_dups(
"Duplicate entry found in '{0}: '{1}' already "
"defined".format(fp.name, _id))
group[_id] = dfn
self.data[cls] = group
def parse(self, fn):
with io.open(fn, 'r', encoding='utf-8') as fp:
self._parse_fp(fp)
def _handle_dups(self, message):
if not self.jjb_config.yamlparser['allow_duplicates']:
logger.error(message)
raise JenkinsJobsException(message)
else:
logger.warning(message)
def _getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def _getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _applyDefaults(self, data, override_dict=None):
if override_dict is None:
override_dict = {}
whichdefaults = data.get('defaults', 'global')
defaults = copy.deepcopy(self.data.get('defaults',
{}).get(whichdefaults, {}))
if defaults == {} and whichdefaults != 'global':
raise JenkinsJobsException("Unknown defaults set: '{0}'"
.format(whichdefaults))
for key in override_dict.keys():
if key in defaults.keys():
defaults[key] = override_dict[key]
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def _formatDescription(self, job):
if self.keep_desc:
description = job.get("description", None)
else:
description = job.get("description", '')
if description is not None:
job["description"] = description + \
self._get_managed_string().lstrip()
def _getfullname(self, data):
if 'folder' in data:
return "%s/%s" % (data['folder'], data['name'])
return data['name']
def expandYaml(self, registry, jobs_glob=None):
changed = True
while changed:
changed = False
for module in registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self.data):
changed = True
for job in self.data.get('job', {}).values():
job = self._applyDefaults(job)
job['name'] = self._getfullname(job)
if jobs_glob and not matches(job['name'], jobs_glob):
logger.debug("Ignoring job {0}".format(job['name']))
continue
logger.debug("Expanding job '{0}'".format(job['name']))
self._formatDescription(job)
self.jobs.append(job)
for view in self.data.get('view', {}).values():
view['name'] = self._getfullname(view)
logger.debug("Expanding view '{0}'".format(view['name']))
self._formatDescription(view)
self.views.append(view)
for project in self.data.get('project', {}).values():
logger.debug("Expanding project '{0}'".format(project['name']))
# use a set to check for duplicate job references in projects
seen = set()
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = next(iter(jobspec.items()))
if not isinstance(jobparams, dict):
jobparams = {}
else:
jobname = jobspec
jobparams = {}
job = self._getJob(jobname)
if job:
# Just naming an existing defined job
if jobname in seen:
self._handle_dups("Duplicate job '{0}' specified "
"for project '{1}'"
.format(jobname, project['name']))
seen.add(jobname)
continue
# see if it's a job group
group = self._getJobGroup(jobname)
if group:
for group_jobspec in group['jobs']:
if isinstance(group_jobspec, dict):
group_jobname, group_jobparams = \
next(iter(group_jobspec.items()))
if not isinstance(group_jobparams, dict):
group_jobparams = {}
else:
group_jobname = group_jobspec
group_jobparams = {}
job = self._getJob(group_jobname)
if job:
if group_jobname in seen:
self._handle_dups(
"Duplicate job '{0}' specified for "
"project '{1}'".format(group_jobname,
project['name']))
seen.add(group_jobname)
continue
template = self._getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = type(project)(project)
d.update(jobparams)
d.update(group)
d.update(group_jobparams)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self._expandYamlForTemplateJob(d, template,
jobs_glob)
continue
# see if it's a template
template = self._getJobTemplate(jobname)
if template:
d = type(project)(project)
d.update(jobparams)
self._expandYamlForTemplateJob(d, template, jobs_glob)
else:
raise JenkinsJobsException("Failed to find suitable "
"template named '{0}'"
.format(jobname))
for viewspec in project.get('views', []):
if isinstance(viewspec, dict):
# Singleton dict containing dict of view-specific params
viewname, viewparams = next(iter(viewspec.items()))
if not isinstance(viewparams, dict):
viewparams = {}
else:
viewname = viewspec
viewparams = {}
view = self._getView(viewname)
if view:
# Just naming an existing defined view
if viewname in seen:
self._handle_dups("Duplicate view '{0}' specified "
"for project '{1}'"
.format(viewname, project['name']))
seen.add(viewname)
continue
# see if it's a view group
group = self._getViewGroup(viewname)
if group:
for group_viewspec in group['views']:
if isinstance(group_viewspec, dict):
group_viewname, group_viewparams = \
next(iter(group_viewspec.items()))
if not isinstance(group_viewparams, dict):
group_viewparams = {}
else:
group_viewname = group_viewspec
group_viewparams = {}
view = self._getView(group_viewname)
if view:
if group_viewname in seen:
self._handle_dups(
"Duplicate view '{0}' specified for "
"project '{1}'".format(group_viewname,
project['name']))
seen.add(group_viewname)
continue
template = self._getViewTemplate(group_viewname)
# Allow a group to override parameters set by a project
d = type(project)(project)
d.update(viewparams)
d.update(group)
d.update(group_viewparams)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self._expandYamlForTemplateView(
d, template, jobs_glob)
continue
# see if it's a template
template = self._getViewTemplate(viewname)
if template:
d = type(project)(project)
d.update(viewparams)
self._expandYamlForTemplateView(d, template, jobs_glob)
else:
raise JenkinsJobsException("Failed to find suitable "
"template named '{0}'"
.format(viewname))
# check for duplicate generated jobs
seen = set()
# walk the list in reverse so that last definition wins
for job in self.jobs[::-1]:
if job['name'] in seen:
self._handle_dups("Duplicate definitions for job '{0}' "
"specified".format(job['name']))
self.jobs.remove(job)
seen.add(job['name'])
# check for duplicate generated views
seen_views = set()
# walk the list in reverse so that last definition wins
for view in self.views[::-1]:
if view['name'] in seen_views:
self._handle_dups("Duplicate definitions for view '{0}' "
"specified".format(view['name']))
self.views.remove(view)
seen_views.add(view['name'])
return self.jobs, self.views
def _expandYamlForTemplateJob(self, project, template, jobs_glob=None):
dimensions = []
template_name = template['name']
# reject keys that are not useful during yaml expansion
for k in ['jobs']:
project.pop(k)
excludes = project.pop('exclude', [])
for (k, v) in project.items():
tmpk = '{{{0}}}'.format(k)
if tmpk not in template_name:
continue
if type(v) == list:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params = self._applyDefaults(params, template)
params['template-name'] = re.sub(r'({|})', r'\1\1', template_name)
try:
expanded_values = {}
for (k, v) in values:
if isinstance(v, dict):
inner_key = next(iter(v))
expanded_values[k] = inner_key
expanded_values.update(v[inner_key])
else:
expanded_values[k] = v
except TypeError:
project_name = project.pop('name')
logger.error(
"Exception thrown while expanding template '%s' for "
"project '%s', with expansion arguments of:\n%s\n"
"Original project input variables for template:\n%s\n"
"Most likely the inputs have items indented incorrectly "
"to describe how they should be applied.\n\nNote yaml "
"'null' is mapped to python's 'None'", template_name,
project_name,
"".join(local_yaml.dump({k: v}, default_flow_style=False)
for (k, v) in values),
local_yaml.dump(project, default_flow_style=False))
raise
params.update(expanded_values)
try:
params = deep_format(params, params)
except Exception:
logging.error(
"Failure formatting params '%s' with itself", params)
raise
if combination_matches(params, excludes):
logger.debug('Excluding combination %s', str(params))
continue
for key in template.keys():
if key not in params:
params[key] = template[key]
try:
expanded = deep_format(
template, params,
self.jjb_config.yamlparser['allow_empty_variables'])
except Exception:
logging.error(
"Failure formatting template '%s', containing '%s' with "
"params '%s'", template_name, template, params)
raise
expanded['name'] = self._getfullname(expanded)
job_name = expanded.get('name')
if jobs_glob and not matches(job_name, jobs_glob):
continue
self._formatDescription(expanded)
self.jobs.append(expanded)
def _get_managed_string(self):
# The \n\n is not hard coded, because they get stripped if the
# project does not otherwise have a description.
return "\n\n" + MAGIC_MANAGE_STRING
# Views related
def _getView(self, name):
view = self.data.get('view', {}).get(name, None)
if not view:
return view
return self._applyDefaults(view)
def _getViewGroup(self, name):
return self.data.get('view-group', {}).get(name, None)
def _getViewTemplate(self, name):
view = self.data.get('view-template', {}).get(name, None)
if not view:
return view
return self._applyDefaults(view)
def _expandYamlForTemplateView(self, project, template, views_glob=None):
dimensions = []
template_name = template['name']
# reject keys that are not useful during yaml expansion
for k in ['views']:
project.pop(k)
excludes = project.pop('exclude', [])
for (k, v) in project.items():
tmpk = '{{{0}}}'.format(k)
if tmpk not in template_name:
continue
if type(v) == list:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params = self._applyDefaults(params, template)
expanded_values = {}
for (k, v) in values:
if isinstance(v, dict):
inner_key = next(iter(v))
expanded_values[k] = inner_key
expanded_values.update(v[inner_key])
else:
expanded_values[k] = v
params.update(expanded_values)
params = deep_format(params, params)
if combination_matches(params, excludes):
logger.debug('Excluding combination %s', str(params))
continue
for key in template.keys():
if key not in params:
params[key] = template[key]
params['template-name'] = template_name
expanded = deep_format(
template, params,
self.jjb_config.yamlparser['allow_empty_variables'])
view_name = expanded.get('name')
if views_glob and not matches(view_name, views_glob):
continue
self._formatDescription(expanded)
self.views.append(expanded)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:4370")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:4370")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a HazeCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a HazeCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.