code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
import datetime
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=255)
brief = models.CharField(null=True,blank=True,max_length=255)
category = models.ForeignKey("Category")
content = models.TextField(u"文章内容")
author = models.ForeignKey("UserProfile")
pub_date = models.DateTimeField(blank=True,null=True)
last_modify = models.DateTimeField(auto_now=True)
priority = models.IntegerField(u"优先级",default=1000)
head_img = models.ImageField(u"文章标题图片",upload_to="uploads")
status_choices = (('draft',u"草稿"),
('published',u"已发布"),
('hidden',u"隐藏"),
)
status = models.CharField(choices=status_choices,default='published',max_length=32)
def __str__(self):
return self.title
def clean(self):
# Don't allow draft entries to have a pub_date.
if self.status == 'draft' and self.pub_date is not None:
raise ValidationError(('Draft entries may not have a publication date.'))
# Set the pub_date for published items if it hasn't been set already.
if self.status == 'published' and self.pub_date is None:
self.pub_date = datetime.date.today()
class Comment(models.Model):
article = models.ForeignKey(Article,verbose_name=u"所属文章")
parent_comment = models.ForeignKey('self',related_name='my_children',blank=True,null=True)
comment_choices = ((1,u'评论'),
(2,u"点赞"))
comment_type = models.IntegerField(choices=comment_choices,default=1)
user = models.ForeignKey("UserProfile")
comment = models.TextField(blank=True,null=True)
date = models.DateTimeField(auto_now_add=True)
def clean(self):
if self.comment_type == 1 and len(self.comment) ==0:
raise ValidationError(u'评论内容不能为空,sb')
def __str__(self):
return "C:%s" %(self.comment)
class Category(models.Model):
name = models.CharField(max_length=64,unique=True)
brief = models.CharField(null=True,blank=True,max_length=255)
set_as_top_menu = models.BooleanField(default=False)
position_index = models.SmallIntegerField()
admins = models.ManyToManyField("UserProfile",blank=True)
def __str__(self):
return self.name
class UserProfile(models.Model):
user = models.OneToOneField(User)
name =models.CharField(max_length=32)
signature= models.CharField(max_length=255,blank=True,null=True)
head_img = models.ImageField(height_field=150,width_field=150,blank=True,null=True)
def __str__(self):
return self.name | XiaJieCom/change | stu103151/days20/project/bbs/models.py | Python | lgpl-2.1 | 2,809 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('womprat')
mobileTemplate.setLevel(4)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Wild Meat")
mobileTemplate.setMeatAmount(4)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setBoneAmount(3)
mobileTemplate.setBoneType("Animal Bone")
mobileTemplate.setHideAmount(2)
mobileTemplate.setSocialGroup("womprat")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_womp_rat.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_1')
attacks.add('bm_bolster_armor_1')
attacks.add('bm_enfeeble_1')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('womprat', mobileTemplate)
return | agry/NGECore2 | scripts/mobiles/tatooine/womprat.py | Python | lgpl-3.0 | 1,620 |
#!/usr/bin/env python3
import os
import json
import logging
import unittest
from pycaching import Geocaching
from pycaching.utfgrid import UTFGrid, GridCoordinateBlock
from pycaching.errors import Error
from test.test_geocaching import _username, _password
_this_folder = os.path.dirname(__file__)
sample_files = {i: os.path.join(_this_folder, i) for i in ["sample_caches.csv", "sample_utfgrid.json"]}
class TestUTFGrid(unittest.TestCase):
def setUp(self):
self.grid = UTFGrid(Geocaching(), 8800, 5574, 14)
def test_download(self):
"""Test if downloading a tile goes nice without errors"""
self.grid._gc.login(_username, _password)
with self.subTest("Not getting .png tile first"):
list(self.grid.download())
with self.subTest("Getting .png tile first"):
list(self.grid.download(get_png_first=True))
def test_parse(self):
"""Parse locally stored grid and compare to expected results"""
expected_caches = {}
with open(sample_files["sample_caches.csv"]) as f:
for row in f:
wp, lat, lon = row.split(',')
expected_caches[wp] = (float(lat), float(lon))
with open(sample_files["sample_utfgrid.json"]) as f:
j = json.loads(f.read())
caches = self.grid._parse_utfgrid(j)
for c in caches:
with self.subTest("Cache " + wp):
self.assertIn(c.wp, expected_caches)
self.assertAlmostEqual(c.location.latitude, expected_caches[c.wp][0])
self.assertAlmostEqual(c.location.longitude, expected_caches[c.wp][1])
expected_caches.pop(c.wp)
self.assertEqual(len(expected_caches), 0)
class TestGridCoordinateBlock(unittest.TestCase):
# {descriptor: [points, midpoint, x_lim, y_lim]}
good_cases = {9: [[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3)],
[2.0, 2.0],
(1, 3), (1, 3)],
6: [[(1, 0), (1, 1),
(2, 0), (2, 1),
(3, 0), (3, 1)],
[2.0, 0.0],
(1, 3), (-1, 1)],
4: [[(62, 62), (62, 63),
(63, 62), (63, 63)],
[63.0, 63.0],
(62, 64), (62, 64)],
3: [[(63, 30), (63, 31), (63, 32)],
[64.0, 31.0],
(63, 65), (30, 32)],
2: [[(62, 0),
(63, 0)],
[63.0, -1.0],
(62, 64), (-2, 0)],
1: [[(0, 63)],
[-1.0, 64.0],
(-2, 0), (63, 65)],
}
bad_cases = {'too much points':
[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3), (3, 4)],
'still too much points':
[(63, 30), (63, 31), (63, 32), (63, 33)],
'point missing: 9':
[(1, 1), (1, 3),
(2, 1), (2, 2), (2, 3),
(3, 1), (3, 2), (3, 3)],
'point missing: 6':
[(1, 0), (1, 1),
(2, 0),
(3, 0), (3, 1)],
'points not aligned':
[(1, 1), (1, 2), (1, 3),
(2, 1), (2, 3), (2, 4),
(3, 1), (3, 2), (3, 3)],
}
def setUp(self):
self.grid = UTFGrid(Geocaching(), 8800, 5574, 14)
self.grid.size = 64
self.cb = GridCoordinateBlock(self.grid)
def test_determine_block_size(self, *block_points):
with self.subTest("Initial value"):
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("Initial value of instance"):
self.assertEqual(GridCoordinateBlock(self.grid).size, 3)
with self.subTest("No changes: same value"):
sizes = [100] * 9 + [4] * 3 + [1]
GridCoordinateBlock.determine_block_size(*sizes)
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("No changes: no input"):
GridCoordinateBlock.determine_block_size()
self.assertEqual(GridCoordinateBlock.size, 3)
with self.subTest("Should change to 16"):
sizes = [16] * 21 + [4]
with self.assertLogs(level=logging.WARNING):
GridCoordinateBlock.determine_block_size(*sizes)
self.assertEqual(GridCoordinateBlock.size, 4)
with self.subTest("New value of instance"):
self.assertEqual(GridCoordinateBlock(self.grid).size, 4)
# Set back to initial value
GridCoordinateBlock.size = 3
def test_add_point(self):
"""Test passing points at initialization"""
with self.subTest("Zero points"):
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid).points)
with self.subTest("One point"):
self.cb.points = []
self.cb.add((3, 4))
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, (3, 4)).points)
with self.subTest("Multiple points: pass directly"):
points = [(0, 0), (1, 2), (3, 4), (1, 2), (5, 6)]
self.cb.points = points
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, *points).points)
with self.subTest("Multiple points: update"):
self.cb.points = []
points = [(0, 0), (1, 2), (3, 4), (1, 2), (5, 6)]
self.cb.update(points)
self.assertEqual(self.cb.points,
GridCoordinateBlock(self.grid, *points).points)
def test_get_middle_point(self):
"""Check that correct middle points are returned"""
for case in [self.good_cases, self.bad_cases]:
for i in case:
if case is self.good_cases:
points, mid_point, xlim, ylim = self.good_cases[i]
with self.subTest('{} points'.format(i)):
self.cb.points = points
self.assertEqual(self.cb._get_middle_point(),
mid_point)
else:
with self.subTest('Malformed input: {}'.format(i)):
with self.assertRaises(Error):
self.cb.points = self.bad_cases[i]
self.cb._get_middle_point()
def test_check_block(self):
"""Test block form with various passes and fails"""
for case in [self.good_cases, self.bad_cases]:
for i in case:
if case is self.good_cases:
self.cb.points = case[i][0]
with self.subTest(i):
if i == 9:
self.assertEqual(self.cb._check_block(), 1, i)
else:
self.assertEqual(self.cb._check_block(), 2, i)
else:
self.cb.points = case[i]
with self.subTest(i):
self.assertEqual(self.cb._check_block(), 0, i)
def test_find_limits(self):
"""Check calculation of block limits when going out of the border"""
for i in self.good_cases:
points, mid_point, xlim, ylim = self.good_cases[i]
self.cb.points = points
for axis, limits in zip(['x', 'y'], [xlim, ylim]):
with self.subTest('{} points, {} axis'.format(i, axis)):
self.assertEqual(self.cb._find_limits(axis), limits)
| kumy/pycaching | test/test_utfgrid.py | Python | lgpl-3.0 | 7,914 |
import unittest
from magnumfe import *
set_log_active(False)
class CacheTest(unittest.TestCase):
def test_initial_update(self):
mesh = UnitCubeMesh(1,1,1)
state = State(mesh)
cache = Cache()
self.assertTrue(cache.requires_update(state))
def test_change_state(self):
mesh = UnitCubeMesh(1,1,1)
state1 = State(mesh)
state2 = State(mesh)
cache = Cache()
count = 0
if cache.requires_update(state1): count += 1
if cache.requires_update(state1): count += 1
self.assertEqual(1, count)
if cache.requires_update(state2): count += 1
self.assertEqual(2, count)
def test_update_required(self):
mesh = UnitCubeMesh(2, 2, 2)
state = State(mesh, m = Constant((1.0, 0.0, 0.0)), j = Constant((0.0, 0.0, 0.0)))
cache = Cache("m", "t")
count = 0
if cache.requires_update(state): count += 1
self.assertEqual(1, count)
if cache.requires_update(state): count += 1
self.assertEqual(1, count)
state.t = 1.0
if cache.requires_update(state): count += 1
self.assertEqual(2, count)
if cache.requires_update(state): count += 1
self.assertEqual(2, count)
state.m = Constant((0.0, 1.0, 0.0))
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
state.j = Constant((1.0, 0.0, 0.0))
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
if __name__ == '__main__':
unittest.main()
| micromagnetics/magnum.fe | tests/cache_test.py | Python | lgpl-3.0 | 1,521 |
# graphicsDisplay.py
# ------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from graphicsUtils import *
import math, time
from game import Directions
###########################
# GRAPHICS DISPLAY CODE #
###########################
# Most code by Dan Klein and John Denero written or rewritten for cs188, UC Berkeley.
# Some code from a Pacman implementation by LiveWires, and used / modified with permission.
DEFAULT_GRID_SIZE = 30.0
INFO_PANE_HEIGHT = 35
BACKGROUND_COLOR = formatColor(0,0,0)
WALL_COLOR = formatColor(0.0/255.0, 51.0/255.0, 255.0/255.0)
INFO_PANE_COLOR = formatColor(.4,.4,0)
SCORE_COLOR = formatColor(.9, .9, .9)
PACMAN_OUTLINE_WIDTH = 2
PACMAN_CAPTURE_OUTLINE_WIDTH = 4
GHOST_COLORS = []
GHOST_COLORS.append(formatColor(.9,0,0)) # Red
GHOST_COLORS.append(formatColor(0,.3,.9)) # Blue
GHOST_COLORS.append(formatColor(.98,.41,.07)) # Orange
GHOST_COLORS.append(formatColor(.1,.75,.7)) # Green
GHOST_COLORS.append(formatColor(1.0,0.6,0.0)) # Yellow
GHOST_COLORS.append(formatColor(.4,0.13,0.91)) # Purple
TEAM_COLORS = GHOST_COLORS[:2]
GHOST_SHAPE = [
( 0, 0.3 ),
( 0.25, 0.75 ),
( 0.5, 0.3 ),
( 0.75, 0.75 ),
( 0.75, -0.5 ),
( 0.5, -0.75 ),
(-0.5, -0.75 ),
(-0.75, -0.5 ),
(-0.75, 0.75 ),
(-0.5, 0.3 ),
(-0.25, 0.75 )
]
GHOST_SIZE = 0.65
SCARED_COLOR = formatColor(1,1,1)
GHOST_VEC_COLORS = list(map(colorToVector, GHOST_COLORS))
PACMAN_COLOR = formatColor(255.0/255.0,255.0/255.0,61.0/255)
PACMAN_SCALE = 0.5
#pacman_speed = 0.25
# Food
FOOD_COLOR = formatColor(1,1,1)
FOOD_SIZE = 0.1
# Laser
LASER_COLOR = formatColor(1,0,0)
LASER_SIZE = 0.02
# Capsule graphics
CAPSULE_COLOR = formatColor(1,1,1)
CAPSULE_SIZE = 0.25
# Drawing walls
WALL_RADIUS = 0.15
class InfoPane:
def __init__(self, layout, gridSize):
self.gridSize = gridSize
self.width = (layout.width) * gridSize
self.base = (layout.height + 1) * gridSize
self.height = INFO_PANE_HEIGHT
self.fontSize = 24
self.textColor = PACMAN_COLOR
self.drawPane()
def toScreen(self, pos, y = None):
"""
Translates a point relative from the bottom left of the info pane.
"""
if y == None:
x,y = pos
else:
x = pos
x = self.gridSize + x # Margin
y = self.base + y
return x,y
def drawPane(self):
self.scoreText = text( self.toScreen(0, 0 ), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold")
def initializeGhostDistances(self, distances):
self.ghostDistanceText = []
size = 20
if self.width < 240:
size = 12
if self.width < 160:
size = 10
for i, d in enumerate(distances):
t = text( self.toScreen(self.width/2 + self.width/8 * i, 0), GHOST_COLORS[i+1], d, "Times", size, "bold")
self.ghostDistanceText.append(t)
def updateScore(self, score):
changeText(self.scoreText, "SCORE: % 4d" % score)
def setTeam(self, isBlue):
text = "RED TEAM"
if isBlue: text = "BLUE TEAM"
self.teamText = text( self.toScreen(300, 0 ), self.textColor, text, "Times", self.fontSize, "bold")
def updateGhostDistances(self, distances):
if len(distances) == 0: return
if 'ghostDistanceText' not in dir(self): self.initializeGhostDistances(distances)
else:
for i, d in enumerate(distances):
changeText(self.ghostDistanceText[i], d)
def drawGhost(self):
pass
def drawPacman(self):
pass
def drawWarning(self):
pass
def clearIcon(self):
pass
def updateMessage(self, message):
pass
def clearMessage(self):
pass
class PacmanGraphics:
def __init__(self, zoom=1.0, frameTime=0.0, capture=False):
self.have_window = 0
self.currentGhostImages = {}
self.pacmanImage = None
self.zoom = zoom
self.gridSize = DEFAULT_GRID_SIZE * zoom
self.capture = capture
self.frameTime = frameTime
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
self.startGraphics(state)
# self.drawDistributions(state)
self.distributionImages = None # Initialized lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def startGraphics(self, state):
self.layout = state.layout
layout = self.layout
self.width = layout.width
self.height = layout.height
self.make_window(self.width, self.height)
self.infoPane = InfoPane(layout, self.gridSize)
self.currentState = layout
def drawDistributions(self, state):
walls = state.layout.walls
dist = []
for x in range(walls.width):
distx = []
dist.append(distx)
for y in range(walls.height):
( screen_x, screen_y ) = self.to_screen( (x, y) )
block = square( (screen_x, screen_y),
0.5 * self.gridSize,
color = BACKGROUND_COLOR,
filled = 1, behind=2)
distx.append(block)
self.distributionImages = dist
def drawStaticObjects(self, state):
layout = self.layout
self.drawWalls(layout.walls)
self.food = self.drawFood(layout.food)
self.capsules = self.drawCapsules(layout.capsules)
refresh()
def drawAgentObjects(self, state):
self.agentImages = [] # (agentState, image)
for index, agent in enumerate(state.agentStates):
if agent.isPacman:
image = self.drawPacman(agent, index)
self.agentImages.append( (agent, image) )
else:
image = self.drawGhost(agent, index)
self.agentImages.append( (agent, image) )
refresh()
def swapImages(self, agentIndex, newState):
"""
Changes an image from a ghost to a pacman or vis versa (for capture)
"""
prevState, prevImage = self.agentImages[agentIndex]
for item in prevImage: remove_from_screen(item)
if newState.isPacman:
image = self.drawPacman(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
else:
image = self.drawGhost(newState, agentIndex)
self.agentImages[agentIndex] = (newState, image )
refresh()
def update(self, newState):
agentIndex = newState._agentMoved
agentState = newState.agentStates[agentIndex]
if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: self.swapImages(agentIndex, agentState)
prevState, prevImage = self.agentImages[agentIndex]
if agentState.isPacman:
self.animatePacman(agentState, prevState, prevImage)
else:
self.moveGhost(agentState, agentIndex, prevState, prevImage)
self.agentImages[agentIndex] = (agentState, prevImage)
if newState._foodEaten != None:
self.removeFood(newState._foodEaten, self.food)
if newState._capsuleEaten != None:
self.removeCapsule(newState._capsuleEaten, self.capsules)
self.infoPane.updateScore(newState.score)
if 'ghostDistances' in dir(newState):
self.infoPane.updateGhostDistances(newState.ghostDistances)
def make_window(self, width, height):
grid_width = (width-1) * self.gridSize
grid_height = (height-1) * self.gridSize
screen_width = 2*self.gridSize + grid_width
screen_height = 2*self.gridSize + grid_height + INFO_PANE_HEIGHT
begin_graphics(screen_width,
screen_height,
BACKGROUND_COLOR,
"AI Pacman")
def drawPacman(self, pacman, index):
position = self.getPosition(pacman)
screen_point = self.to_screen(position)
endpoints = self.getEndpoints(self.getDirection(pacman))
width = PACMAN_OUTLINE_WIDTH
outlineColor = PACMAN_COLOR
fillColor = PACMAN_COLOR
if self.capture:
outlineColor = TEAM_COLORS[index % 2]
fillColor = GHOST_COLORS[index]
width = PACMAN_CAPTURE_OUTLINE_WIDTH
return [circle(screen_point, PACMAN_SCALE * self.gridSize,
fillColor = fillColor, outlineColor = outlineColor,
endpoints = endpoints,
width = width)]
def getEndpoints(self, direction, position=(0,0)):
x, y = position
pos = x - int(x) + y - int(y)
width = 30 + 80 * math.sin(math.pi* pos)
delta = width / 2
if (direction == 'West'):
endpoints = (180+delta, 180-delta)
elif (direction == 'North'):
endpoints = (90+delta, 90-delta)
elif (direction == 'South'):
endpoints = (270+delta, 270-delta)
else:
endpoints = (0+delta, 0-delta)
return endpoints
def movePacman(self, position, direction, image):
screenPosition = self.to_screen(position)
endpoints = self.getEndpoints( direction, position )
r = PACMAN_SCALE * self.gridSize
moveCircle(image[0], screenPosition, r, endpoints)
refresh()
def animatePacman(self, pacman, prevPacman, image):
if self.frameTime < 0:
print('Press any key to step forward, "q" to play')
keys = wait_for_keys()
if 'q' in keys:
self.frameTime = 0.1
if self.frameTime > 0.01 or self.frameTime < 0:
start = time.time()
fx, fy = self.getPosition(prevPacman)
px, py = self.getPosition(pacman)
frames = 4.0
for i in range(1,int(frames) + 1):
pos = px*i/frames + fx*(frames-i)/frames, py*i/frames + fy*(frames-i)/frames
self.movePacman(pos, self.getDirection(pacman), image)
refresh()
sleep(abs(self.frameTime) / frames)
else:
self.movePacman(self.getPosition(pacman), self.getDirection(pacman), image)
refresh()
def getGhostColor(self, ghost, ghostIndex):
if ghost.scaredTimer > 0:
return SCARED_COLOR
else:
return GHOST_COLORS[ghostIndex]
def drawGhost(self, ghost, agentIndex):
pos = self.getPosition(ghost)
dir = self.getDirection(ghost)
(screen_x, screen_y) = (self.to_screen(pos) )
coords = []
for (x, y) in GHOST_SHAPE:
coords.append((x*self.gridSize*GHOST_SIZE + screen_x, y*self.gridSize*GHOST_SIZE + screen_y))
colour = self.getGhostColor(ghost, agentIndex)
body = polygon(coords, colour, filled = 1)
WHITE = formatColor(1.0, 1.0, 1.0)
BLACK = formatColor(0.0, 0.0, 0.0)
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
leftEye = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
rightEye = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE)
leftPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
rightPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK)
ghostImageParts = []
ghostImageParts.append(body)
ghostImageParts.append(leftEye)
ghostImageParts.append(rightEye)
ghostImageParts.append(leftPupil)
ghostImageParts.append(rightPupil)
return ghostImageParts
def moveEyes(self, pos, dir, eyes):
(screen_x, screen_y) = (self.to_screen(pos) )
dx = 0
dy = 0
if dir == 'North':
dy = -0.2
if dir == 'South':
dy = 0.2
if dir == 'East':
dx = 0.2
if dir == 'West':
dx = -0.2
moveCircle(eyes[0],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[1],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2)
moveCircle(eyes[2],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
moveCircle(eyes[3],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08)
def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts):
old_x, old_y = self.to_screen(self.getPosition(prevGhost))
new_x, new_y = self.to_screen(self.getPosition(ghost))
delta = new_x - old_x, new_y - old_y
for ghostImagePart in ghostImageParts:
move_by(ghostImagePart, delta)
refresh()
if ghost.scaredTimer > 0:
color = SCARED_COLOR
else:
color = GHOST_COLORS[ghostIndex]
edit(ghostImageParts[0], ('fill', color), ('outline', color))
self.moveEyes(self.getPosition(ghost), self.getDirection(ghost), ghostImageParts[-4:])
refresh()
def getPosition(self, agentState):
if agentState.configuration == None: return (-1000, -1000)
return agentState.getPosition()
def getDirection(self, agentState):
if agentState.configuration == None: return Directions.STOP
return agentState.configuration.getDirection()
def finish(self):
end_graphics()
def to_screen(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
# Fixes some TK issue with off-center circles
def to_screen2(self, point):
( x, y ) = point
#y = self.height - y
x = (x + 1)*self.gridSize
y = (self.height - y)*self.gridSize
return ( x, y )
def drawWalls(self, wallMatrix):
wallColor = WALL_COLOR
for xNum, x in enumerate(wallMatrix):
if self.capture and (xNum * 2) < wallMatrix.width: wallColor = TEAM_COLORS[0]
if self.capture and (xNum * 2) >= wallMatrix.width: wallColor = TEAM_COLORS[1]
for yNum, cell in enumerate(x):
if cell: # There's a wall here
pos = (xNum, yNum)
screen = self.to_screen(pos)
screen2 = self.to_screen2(pos)
# draw each quadrant of the square based on adjacent walls
wIsWall = self.isWall(xNum-1, yNum, wallMatrix)
eIsWall = self.isWall(xNum+1, yNum, wallMatrix)
nIsWall = self.isWall(xNum, yNum+1, wallMatrix)
sIsWall = self.isWall(xNum, yNum-1, wallMatrix)
nwIsWall = self.isWall(xNum-1, yNum+1, wallMatrix)
swIsWall = self.isWall(xNum-1, yNum-1, wallMatrix)
neIsWall = self.isWall(xNum+1, yNum+1, wallMatrix)
seIsWall = self.isWall(xNum+1, yNum-1, wallMatrix)
# NE quadrant
if (not nIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (0,91), 'arc')
if (nIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (eIsWall) and (not neIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (180,271), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# NW quadrant
if (not nIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (90,181), 'arc')
if (nIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor)
if (not nIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(-1)*WALL_RADIUS)), wallColor)
if (nIsWall) and (wIsWall) and (not nwIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (270,361), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(-1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5))), wallColor)
# SE quadrant
if (not sIsWall) and (not eIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (270,361), 'arc')
if (sIsWall) and (not eIsWall):
# vertical line
line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (eIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (eIsWall) and (not seIsWall):
# outer circle
circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (90,181), 'arc')
line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5, self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
# SW quadrant
if (not sIsWall) and (not wIsWall):
# inner circle
circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (180,271), 'arc')
if (sIsWall) and (not wIsWall):
# vertical line
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor)
if (not sIsWall) and (wIsWall):
# horizontal line
line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(1)*WALL_RADIUS)), wallColor)
if (sIsWall) and (wIsWall) and (not swIsWall):
# outer circle
circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (0,91), 'arc')
line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(1)*WALL_RADIUS)), wallColor)
line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5))), wallColor)
def isWall(self, x, y, walls):
if x < 0 or y < 0:
return False
if x >= walls.width or y >= walls.height:
return False
return walls[x][y]
def drawFood(self, foodMatrix ):
foodImages = []
color = FOOD_COLOR
for xNum, x in enumerate(foodMatrix):
if self.capture and (xNum * 2) <= foodMatrix.width: color = TEAM_COLORS[0]
if self.capture and (xNum * 2) > foodMatrix.width: color = TEAM_COLORS[1]
imageRow = []
foodImages.append(imageRow)
for yNum, cell in enumerate(x):
if cell: # There's food here
screen = self.to_screen((xNum, yNum ))
dot = circle( screen,
FOOD_SIZE * self.gridSize,
outlineColor = color, fillColor = color,
width = 1)
imageRow.append(dot)
else:
imageRow.append(None)
return foodImages
def drawCapsules(self, capsules ):
capsuleImages = {}
for capsule in capsules:
( screen_x, screen_y ) = self.to_screen(capsule)
dot = circle( (screen_x, screen_y),
CAPSULE_SIZE * self.gridSize,
outlineColor = CAPSULE_COLOR,
fillColor = CAPSULE_COLOR,
width = 1)
capsuleImages[capsule] = dot
return capsuleImages
def removeFood(self, cell, foodImages ):
x, y = cell
remove_from_screen(foodImages[x][y])
def removeCapsule(self, cell, capsuleImages ):
x, y = cell
remove_from_screen(capsuleImages[(x, y)])
def drawExpandedCells(self, cells):
"""
Draws an overlay of expanded grid positions for search agents
"""
n = float(len(cells))
baseColor = [1.0, 0.0, 0.0]
self.clearExpandedCells()
self.expandedCells = []
for k, cell in enumerate(cells):
screenPos = self.to_screen( cell)
cellColor = formatColor(*[(n-k) * c * .5 / n + .25 for c in baseColor])
block = square(screenPos,
0.5 * self.gridSize,
color = cellColor,
filled = 1, behind=2)
self.expandedCells.append(block)
if self.frameTime < 0:
refresh()
def clearExpandedCells(self):
if 'expandedCells' in dir(self) and len(self.expandedCells) > 0:
for cell in self.expandedCells:
remove_from_screen(cell)
def updateDistributions(self, distributions):
"Draws an agent's belief distributions"
if self.distributionImages == None:
self.drawDistributions(self.previousState)
for x in range(len(self.distributionImages)):
for y in range(len(self.distributionImages[0])):
image = self.distributionImages[x][y]
weights = [dist[ (x,y) ] for dist in distributions]
if sum(weights) != 0:
pass
# Fog of war
color = [0.0,0.0,0.0]
colors = GHOST_VEC_COLORS[1:] # With Pacman
if self.capture: colors = GHOST_VEC_COLORS
for weight, gcolor in zip(weights, colors):
color = [min(1.0, c + 0.95 * g * weight ** .3) for c,g in zip(color, gcolor)]
changeColor(image, formatColor(*color))
refresh()
class FirstPersonPacmanGraphics(PacmanGraphics):
def __init__(self, zoom = 1.0, showGhosts = True, capture = False, frameTime=0):
PacmanGraphics.__init__(self, zoom, frameTime=frameTime)
self.showGhosts = showGhosts
self.capture = capture
def initialize(self, state, isBlue = False):
self.isBlue = isBlue
PacmanGraphics.startGraphics(self, state)
# Initialize distribution images
walls = state.layout.walls
dist = []
self.layout = state.layout
# Draw the rest
self.distributionImages = None # initialize lazily
self.drawStaticObjects(state)
self.drawAgentObjects(state)
# Information
self.previousState = state
def lookAhead(self, config, state):
if config.getDirection() == 'Stop':
return
else:
pass
# Draw relevant ghosts
allGhosts = state.getGhostStates()
visibleGhosts = state.getVisibleGhosts()
for i, ghost in enumerate(allGhosts):
if ghost in visibleGhosts:
self.drawGhost(ghost, i)
else:
self.currentGhostImages[i] = None
def getGhostColor(self, ghost, ghostIndex):
return GHOST_COLORS[ghostIndex]
def getPosition(self, ghostState):
if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1:
return (-1000, -1000)
else:
return PacmanGraphics.getPosition(self, ghostState)
def add(x, y):
return (x[0] + y[0], x[1] + y[1])
# Saving graphical output
# -----------------------
# Note: to make an animated gif from this postscript output, try the command:
# convert -delay 7 -loop 1 -compress lzw -layers optimize frame* out.gif
# convert is part of imagemagick (freeware)
SAVE_POSTSCRIPT = False
POSTSCRIPT_OUTPUT_DIR = 'frames'
FRAME_NUMBER = 0
import os
def saveFrame():
"Saves the current graphical output as a postscript file"
global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR
if not SAVE_POSTSCRIPT: return
if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): os.mkdir(POSTSCRIPT_OUTPUT_DIR)
name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER)
FRAME_NUMBER += 1
writePostscript(name) # writes the current canvas
| rio-group/trabalho-ai-pacman | graphicsDisplay.py | Python | unlicense | 24,882 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pants.backend.core.tasks.list_goals import ListGoals
from pants.backend.core.tasks.task import Task
from pants.goal.goal import Goal
from pants.goal.task_registrar import TaskRegistrar
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class ListGoalsTest(ConsoleTaskTestBase):
_INSTALLED_HEADER = 'Installed goals:'
_UNDOCUMENTED_HEADER = 'Undocumented goals:'
_LIST_GOALS_NAME = 'goals'
_LIST_GOALS_DESC = 'List all documented goals.'
_LLAMA_NAME = 'llama'
_LLAMA_DESC = 'With such handsome fiber, no wonder everyone loves Llamas.'
_ALPACA_NAME = 'alpaca'
@classmethod
def task_type(cls):
return ListGoals
class LlamaTask(Task):
pass
class AlpacaTask(Task):
pass
def test_list_goals(self):
Goal.clear()
self.assert_console_output(self._INSTALLED_HEADER)
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
self.assert_console_output(
self._INSTALLED_HEADER,
' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
self.assert_console_output(
self._INSTALLED_HEADER,
' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC),
)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
self._INSTALLED_HEADER,
' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC),
)
def test_list_goals_all(self):
Goal.clear()
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
self._INSTALLED_HEADER,
' {0}: {1}'.format(self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' {0}: {1}'.format(self._LLAMA_NAME, self._LLAMA_DESC),
'',
self._UNDOCUMENTED_HEADER,
' {0}'.format(self._ALPACA_NAME),
options={ 'all': True }
)
# TODO(John Sirois): Re-enable when fixing up ListGoals `--graph` in
# https://github.com/pantsbuild/pants/issues/918
@pytest.mark.xfail
def test_list_goals_graph(self):
Goal.clear()
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
'digraph G {\n rankdir=LR;\n graph [compound=true];',
' subgraph cluster_goals {\n node [style=filled];\n color = blue;\n label = "goals";',
' goals_goals [label="goals"];',
' }',
' subgraph cluster_llama {\n node [style=filled];\n color = blue;\n label = "llama";',
' llama_llama [label="llama"];',
' }',
' subgraph cluster_alpaca {\n node [style=filled];\n color = blue;\n label = "alpaca";',
' alpaca_alpaca [label="alpaca"];',
' }',
' alpaca_alpaca -> llama_llama [ltail=cluster_alpaca lhead=cluster_llama];',
'}',
options={ 'graph': True }
)
| pgroudas/pants | tests/python/pants_test/tasks/test_list_goals.py | Python | apache-2.0 | 4,088 |
# -*- coding: utf-8 -*-
import os
from flask import request
from website.addons.dataverse.client import get_study, get_files, \
get_dataverse, connect_from_settings
from website.project.decorators import must_be_contributor_or_public
from website.project.decorators import must_have_addon
from website.util import rubeus
def dataverse_hgrid_root(node_addon, auth, state=None, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
default_state = 'released'
state = 'released' if not node.can_edit(auth) else state or default_state
connection = connect_from_settings(user_settings)
# Quit if no study linked
if node_addon.study_hdl is None or connection is None:
return []
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
# Quit if hdl does not produce a study
if study is None:
return []
released_files = get_files(study, released=True)
authorized = node.can_edit(auth)
# Produce draft version or quit if no released version is available
if not released_files:
if authorized:
state = 'draft'
else:
return []
study_name = node_addon.study
if len(study_name) > 23:
study_name = u'{0}...'.format(study_name[:20])
permissions = {
'edit': node.can_edit(auth) and not node.is_registration,
'view': node.can_view(auth)
}
urls = {
'upload': node.api_url_for('dataverse_upload_file'),
'fetch': node.api_url_for('dataverse_hgrid_data_contents', state=state),
'state': node.api_url_for('dataverse_root_folder_public'),
'release': node.api_url_for('dataverse_release_study'),
}
buttons = [rubeus.build_addon_button(
'<i class="fa fa-globe"></i> Release Study',
'releaseStudy')] if state == 'draft' else None
return [rubeus.build_addon_root(
node_addon,
study_name,
urls=urls,
permissions=permissions,
buttons=buttons,
study=study_name,
doi=study.doi,
dataverse=dataverse.title,
citation=study.citation,
hasReleasedFiles=bool(released_files),
state=state,
)]
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_root_folder_public(node_addon, auth, **kwargs):
state = request.args['state']
return dataverse_hgrid_root(node_addon, auth=auth, state=state)
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_hgrid_data_contents(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
state = request.args.get('state')
default_state = 'released'
state = 'released' if not node.can_edit(auth) else state or default_state
released = state == 'released'
can_edit = node.can_edit(auth) and not node.is_registration and not released
can_view = node.can_view(auth)
connection = connect_from_settings(user_settings)
if node_addon.study_hdl is None or connection is None:
return []
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
# Quit if hdl does not produce a study
if study is None:
return []
info = []
for f in get_files(study, released):
item = {
'addon': 'dataverse',
'provider': 'dataverse',
rubeus.KIND: 'file',
'name': f.name,
'path': f.name,
'file_id': f.id,
'ext': os.path.splitext(f.name)[1],
'urls': {
'view': node.web_url_for('dataverse_view_file',
path=f.id),
'download': node.web_url_for('dataverse_download_file',
path=f.id),
'delete': node.api_url_for('dataverse_delete_file',
path=f.id),
},
'permissions': {
'view': can_view,
'edit': can_edit,
},
}
info.append(item)
return {'data': info}
| himanshuo/osf.io | website/addons/dataverse/views/hgrid.py | Python | apache-2.0 | 4,249 |
from django.contrib.contenttypes.models import ContentType
import json
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from guardian.decorators import permission_required
from guardian.shortcuts import get_objects_for_user
from account.models import DepartmentGroup
from backend.tasks import TestConnectionTask
from event.models import NotificationPreferences
from .models import Application, Department, Environment, Server, ServerRole
from task.models import Execution
@login_required
def index(request):
data = {}
executions = Execution.objects.filter(task__application__department_id=request.current_department_id)
if not executions.count():
return redirect(reverse('first_steps_page'))
return render(request, 'page/index.html', data)
@permission_required('core.view_application', (Application, 'id', 'application_id'))
def application_page(request, application_id):
data = {}
data['application'] = get_object_or_404(Application, pk=application_id)
return render(request, 'page/application.html', data)
@permission_required('core.view_environment', (Environment, 'id', 'environment_id'))
def environment_page(request, environment_id):
data = {}
data['environment'] = get_object_or_404(Environment, pk=environment_id)
data['servers'] = list(Server.objects.filter(environment_id=environment_id).prefetch_related('roles'))
return render(request, 'page/environment.html', data)
@permission_required('core.view_environment', (Environment, 'servers__id', 'server_id'))
def server_test(request, server_id):
data = {}
data['server'] = get_object_or_404(Server, pk=server_id)
data['task_id'] = TestConnectionTask().delay(server_id).id
return render(request, 'partial/server_test.html', data)
@login_required
def server_test_ajax(request, task_id):
data = {}
task = TestConnectionTask().AsyncResult(task_id)
if task.status == 'SUCCESS':
status, output = task.get()
data['status'] = status
data['output'] = output
elif task.status == 'FAILED':
data['status'] = False
else:
data['status'] = None
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def first_steps_page(request):
data = {}
return render(request, 'page/first_steps.html', data)
@login_required
def settings_page(request, section='user', subsection='profile'):
data = {}
data['section'] = section
data['subsection'] = subsection
data['department'] = Department(pk=request.current_department_id)
data['on_settings'] = True
handler = '_settings_%s_%s' % (section, subsection)
if section == 'system' and request.user.is_superuser is not True:
return redirect('index')
if section == 'department' and not request.user.has_perm('core.change_department', obj=data['department']):
return redirect('index')
if handler in globals():
data = globals()[handler](request, data)
else:
raise Http404
return render(request, 'page/settings.html', data)
def _settings_account_profile(request, data):
data['subsection_template'] = 'partial/account_profile.html'
from account.forms import account_create_form
form = account_create_form('user_profile', request, request.user.id)
form.fields['email'].widget.attrs['readonly'] = True
data['form'] = form
if request.method == 'POST':
if form.is_valid():
form.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_password(request, data):
data['subsection_template'] = 'partial/account_password.html'
from account.forms import account_create_form
form = account_create_form('user_password', request, request.user.id)
data['form'] = form
if request.method == 'POST':
if form.is_valid():
user = form.save(commit=False)
user.set_password(user.password)
user.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_notifications(request, data):
data['subsection_template'] = 'partial/account_notifications.html'
data['applications'] = get_objects_for_user(request.user, 'core.view_application')
content_type = ContentType.objects.get_for_model(Application)
if request.method == 'POST':
for application in data['applications']:
key = 'notification[%s]' % application.id
notification, created = NotificationPreferences.objects.get_or_create(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type,
object_id=application.id)
if notification.is_active != (key in request.POST):
notification.is_active = key in request.POST
notification.save()
messages.success(request, 'Saved')
data['notifications'] = NotificationPreferences.objects.filter(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type.id).values_list('object_id', 'is_active')
data['notifications'] = dict(data['notifications'])
return data
def _settings_department_applications(request, data):
data['subsection_template'] = 'partial/application_list.html'
data['applications'] = Application.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['applications'].count())
return data
def _settings_department_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
from guardian.shortcuts import get_users_with_perms
department = Department.objects.get(pk=request.current_department_id)
data['users'] = get_users_with_perms(department).prefetch_related('groups__departmentgroup').order_by('name')
data['department_user_list'] = True
data['form_name'] = 'user'
return data
def _settings_department_groups(request, data):
data['subsection_template'] = 'partial/group_list.html'
data['groups'] = DepartmentGroup.objects.filter(department_id=request.current_department_id)
return data
def _settings_department_serverroles(request, data):
data['subsection_template'] = 'partial/serverrole_list.html'
data['serverroles'] = ServerRole.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['serverroles'].count())
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_departments(request, data):
data['subsection_template'] = 'partial/department_list.html'
data['departments'] = Department.objects.all()
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
data['users'] = get_user_model().objects.exclude(id=-1).prefetch_related('groups__departmentgroup__department').order_by('name')
data['form_name'] = 'usersystem'
return data
def department_switch(request, id):
department = get_object_or_404(Department, pk=id)
if request.user.has_perm('core.view_department', department):
request.session['current_department_id'] = int(id)
else:
messages.error(request, 'Access forbidden')
return redirect('index')
def handle_403(request):
print 'aaaaaaaa'
messages.error(request, 'Access forbidden')
return redirect('index') | gunnery/gunnery | gunnery/core/views.py | Python | apache-2.0 | 7,964 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorSignature class and utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
class TensorSignature(collections.namedtuple(
"TensorSignature", ["dtype", "shape", "is_sparse"])):
"""Signature of the `Tensor` object.
Useful to check compatibility of tensors.
Attributes:
dtype: `DType` object.
shape: `TensorShape` object.
"""
def __new__(cls, tensor):
if isinstance(tensor, ops.SparseTensor):
return super(TensorSignature, cls).__new__(
cls, dtype=tensor.values.dtype, shape=None, is_sparse=True)
return super(TensorSignature, cls).__new__(
cls, dtype=tensor.dtype, shape=tensor.get_shape(), is_sparse=False)
def is_compatible_with(self, other):
"""Returns True if signatures are compatible."""
def _shape_is_compatible_0dim(this, other):
other = tensor_shape.as_shape(other)
if this.ndims != other.ndims:
return False
for dim, (x_dim, y_dim) in enumerate(zip(this.dims, other.dims)):
if dim == 0:
continue
if not x_dim.is_compatible_with(y_dim):
return False
return True
if other.is_sparse:
return self.is_sparse and self.dtype.is_compatible_with(other.dtype)
return (self.dtype.is_compatible_with(other.dtype) and
_shape_is_compatible_0dim(self.shape, other.shape) and
not self.is_sparse)
def get_placeholder(self):
if self.is_sparse:
return array_ops.sparse_placeholder(dtype=self.dtype)
return array_ops.placeholder(dtype=self.dtype, shape=self.shape)
def tensors_compatible(tensors, signatures):
"""Check that tensors are compatible with signatures.
Args:
tensors: Dict of `Tensor` objects or single `Tensor` object.
signatures: Dict of `TensorSignature` objects or
single `TensorSignature` object.
Returns:
True if all tensors are compatible, False otherwise.
"""
# Dict of Tensors as input.
if isinstance(tensors, dict):
if not isinstance(signatures, dict):
return False
for key in signatures:
if key not in tensors:
return False
if not TensorSignature(tensors[key]).is_compatible_with(signatures[key]):
return False
return True
# Single tensor as input.
if isinstance(signatures, dict):
return False
return TensorSignature(tensors).is_compatible_with(signatures)
def create_signatures(tensors):
"""Creates TensorSignature objects for given tensors.
Args:
tensors: Dict of `Tensor` objects or single `Tensor`.
Returns:
Dict of `TensorSignature` objects or single `TensorSignature`.
"""
if isinstance(tensors, dict):
return {
key: TensorSignature(tensors[key]) for key in tensors}
return TensorSignature(tensors)
def create_placeholders_from_signatures(signatures):
"""Creates placeholders from given signatures.
Args:
signatures: Dict of `TensorSignature` objects or single `TensorSignature`.
Returns:
Dict of `tf.placeholder` objects or single `tf.placeholder`.
"""
if not isinstance(signatures, dict):
return signatures.get_placeholder()
return {
key: signatures[key].get_placeholder()
for key in signatures}
| ivano666/tensorflow | tensorflow/contrib/learn/python/learn/estimators/tensor_signature.py | Python | apache-2.0 | 4,029 |
import unittest
class Test0017(unittest.TestCase):
def test_problem(self):
one_to_nine = [3, 3, 5, 4, 4, 3, 5, 5, 4]
ten_to_nineteen = [3, 6, 6, 8, 8, 7, 7, 9, 8, 8]
twenty_to_ninety = [6, 6, 5, 5, 5, 7, 6, 6]
words_len = 0
sum_1_to_9 = sum(one_to_nine)
sum_10_to_19 = sum(ten_to_nineteen)
sum_20_to_90 = sum(twenty_to_ninety)
#1~9,10~19
sum_1_to_99 = sum_1_to_9 + sum_10_to_19
#20~99
sum_1_to_99 += len(twenty_to_ninety) * sum_1_to_9 + (len(one_to_nine) +
1) * sum_20_to_90
#1~99
words_len += sum_1_to_99
#100~999, 'hundred and' => 10
words_len += len(one_to_nine) * sum_1_to_99 + 100 * (
sum_1_to_9 + 10 * len(one_to_nine)) - 3 * len(one_to_nine)
#1000
words_len += 11
self.assertEqual(words_len, 21124) | mccxj/leetcode | projecteuler/p0017_test.py | Python | apache-2.0 | 930 |
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestLargeOpsScenario(manager.ScenarioTest):
"""
Test large operations.
This test below:
* Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
"""
@classmethod
def skip_checks(cls):
super(TestLargeOpsScenario, cls).skip_checks()
if CONF.scenario.large_ops_number < 1:
raise cls.skipException("large_ops_number not set to multiple "
"instances")
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestLargeOpsScenario, cls).setup_credentials()
@classmethod
def resource_setup(cls):
super(TestLargeOpsScenario, cls).resource_setup()
# list of cleanup calls to be executed in reverse order
cls._cleanup_resources = []
@classmethod
def resource_cleanup(cls):
while cls._cleanup_resources:
function, args, kwargs = cls._cleanup_resources.pop(-1)
try:
function(*args, **kwargs)
except lib_exc.NotFound:
pass
super(TestLargeOpsScenario, cls).resource_cleanup()
@classmethod
def addCleanupClass(cls, function, *arguments, **keywordArguments):
cls._cleanup_resources.append((function, arguments, keywordArguments))
def _wait_for_server_status(self, status):
for server in self.servers:
# Make sure nova list keeps working throughout the build process
self.servers_client.list_servers()
waiters.wait_for_server_status(self.servers_client,
server['id'], status)
def nova_boot(self):
name = data_utils.rand_name('scenario-server')
flavor_id = CONF.compute.flavor_ref
# Explicitly create secgroup to avoid cleanup at the end of testcases.
# Since no traffic is tested, we don't need to actually add rules to
# secgroup
secgroup = self.security_groups_client.create_security_group(
name='secgroup-%s' % name, description='secgroup-desc-%s' % name)
self.addCleanupClass(self.security_groups_client.delete_security_group,
secgroup['id'])
create_kwargs = {
'min_count': CONF.scenario.large_ops_number,
'security_groups': [{'name': secgroup['name']}]
}
network = self.get_tenant_network()
create_kwargs = fixed_network.set_networks_kwarg(network,
create_kwargs)
#self.servers_client.create_server(
self.create_server(
name,
'',
flavor_id,
**create_kwargs)
# needed because of bug 1199788
params = {'name': name}
server_list = self.servers_client.list_servers(**params)
self.servers = server_list['servers']
for server in self.servers:
# after deleting all servers - wait for all servers to clear
# before cleanup continues
self.addCleanupClass(self.servers_client.
wait_for_server_termination,
server['id'])
for server in self.servers:
self.addCleanupClass(self.servers_client.delete_server,
server['id'])
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
#self.glance_image_create()
self.nova_boot()
@test.idempotent_id('14ba0e78-2ed9-4d17-9659-a48f4756ecb3')
@test.services('compute', 'image')
def test_large_ops_scenario_1(self):
self._large_ops_scenario()
@test.idempotent_id('b9b79b88-32aa-42db-8f8f-dcc8f4b4ccfe')
@test.services('compute', 'image')
def test_large_ops_scenario_2(self):
self._large_ops_scenario()
@test.idempotent_id('3aab7e82-2de3-419a-9da1-9f3a070668fb')
@test.services('compute', 'image')
def test_large_ops_scenario_3(self):
self._large_ops_scenario()
| manasi24/tempest | tempest/scenario/test_large_ops.py | Python | apache-2.0 | 5,090 |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import django
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class RouterMixin(object):
@test.create_stubs({
api.neutron: ('router_get', 'port_list',
'network_get', 'is_extension_supported',
'list_l3_agent_hosting_router'),
})
def _get_detail(self, router, extraroute=True, lookup_l3=False):
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(extraroute)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
if lookup_l3:
agent = self.agents.list()[1]
api.neutron.list_l3_agent_hosting_router(IsA(http.HttpRequest), router.id)\
.AndReturn([agent])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
return res
def _mock_external_network_list(self, alter_ids=False):
search_opts = {'router:external': True}
ext_nets = [n for n in self.networks.list() if n['router:external']]
if alter_ids:
for ext_net in ext_nets:
ext_net.id += 'some extra garbage'
api.neutron.network_list(
IsA(http.HttpRequest),
**search_opts).AndReturn(ext_nets)
def _mock_external_network_get(self, router):
ext_net_id = router.external_gateway_info['network_id']
ext_net = self.networks.list()[2]
api.neutron.network_get(IsA(http.HttpRequest), ext_net_id,
expand_subnet=False).AndReturn(ext_net)
def _mock_network_list(self, tenant_id):
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=tenant_id).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
class RouterTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_index_router_list_exception(self):
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).MultipleTimes().AndRaise(
self.exceptions.neutron)
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertEqual(len(res.context['table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_set_external_network_empty(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).MultipleTimes().AndReturn([router])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list(alter_ids=True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
table_data = res.context['table'].data
self.assertEqual(len(table_data), 1)
self.assertIn('(Not Found)',
table_data[0]['external_gateway_info']['network'])
self.assertTemplateUsed(res, '%s/routers/index.html' % self.DASHBOARD)
self.assertMessageCount(res, error=1)
def test_router_detail(self):
router = self.routers.first()
res = self._get_detail(router)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
ports = res.context['interfaces_table'].data
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('router_get',)})
def test_router_detail_exception(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_delete(self):
router = self.routers.first()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn([])
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name,
res.content.decode('utf-8'))
@test.create_stubs({api.neutron: ('router_list', 'network_list',
'port_list', 'router_remove_interface',
'router_delete',),
quotas: ('tenant_quota_usages',)})
def test_router_with_interface_delete(self):
router = self.routers.first()
ports = self.ports.list()
quota_data = self.neutron_quota_usages.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
self._mock_external_network_list()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id, device_owner=IgnoreArg())\
.AndReturn(ports)
for port in ports:
api.neutron.router_remove_interface(IsA(http.HttpRequest),
router.id, port_id=port.id)
api.neutron.router_delete(IsA(http.HttpRequest), router.id)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
formData = {'action': 'routers__delete__' + router.id}
res = self.client.post(self.INDEX_URL, formData, follow=True)
self.assertNoFormErrors(res)
self.assertMessageCount(response=res, success=1)
self.assertIn('Deleted Router: ' + router.name,
res.content.decode('utf-8'))
class RouterActionTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
api.neutron.network_list(IsA(http.HttpRequest))\
.AndReturn(self.networks.list())
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_router_create_post_mode_server_default(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(True)
api.neutron.network_list(IsA(http.HttpRequest))\
.AndReturn(self.networks.list())
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'server_default',
'ha': 'server_default',
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_dvr_ha_router_create_post(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(True)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(True)
api.neutron.network_list(IsA(http.HttpRequest))\
.AndReturn(self.networks.list())
param = {'name': router.name,
'distributed': True,
'ha': True,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **param)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'distributed',
'ha': 'enabled',
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_router_create_post_exception_error_case_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.AndReturn(False)
self.exceptions.neutron.status_code = 409
api.neutron.network_list(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.networks.list())
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_feature_permission',
'network_list')})
def test_router_create_post_exception_error_case_non_409(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "create")\
.MultipleTimes().AndReturn(False)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"l3-ha", "create")\
.MultipleTimes().AndReturn(False)
self.exceptions.neutron.status_code = 999
api.neutron.network_list(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.networks.list())
params = {'name': router.name,
'admin_state_up': str(router.admin_state_up)}
api.neutron.router_create(IsA(http.HttpRequest), **params)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name,
'admin_state_up': router.admin_state_up}
url = reverse('horizon:%s:routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'get_feature_permission')})
def _test_router_update_get(self, dvr_enabled=False,
current_dvr=False,
ha_enabled=False):
router = [r for r in self.routers.list()
if r.distributed == current_dvr][0]
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(dvr_enabled)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(ha_enabled)
self.mox.ReplayAll()
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
return self.client.get(url)
def test_router_update_get_dvr_disabled(self):
res = self._test_router_update_get(dvr_enabled=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertNotContains(res, 'Router Type')
self.assertNotContains(res, 'id="id_mode"')
def test_router_update_get_dvr_enabled_mode_centralized(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=False)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
# Check both menu are displayed.
self.assertContains(
res,
'<option value="centralized" selected="selected">'
'Centralized</option>',
html=True)
self.assertContains(
res,
'<option value="distributed">Distributed</option>',
html=True)
def test_router_update_get_dvr_enabled_mode_distributed(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=True)
self.assertTemplateUsed(res, 'project/routers/update.html')
self.assertContains(res, 'Router Type')
if django.VERSION >= (1, 10):
pattern = ('<input class="form-control" id="id_mode" name="mode" '
'readonly="readonly" type="text" value="distributed" '
'required/>')
else:
pattern = ('<input class="form-control" id="id_mode" name="mode" '
'readonly="readonly" type="text" '
'value="distributed" />')
self.assertContains(res, pattern, html=True)
self.assertNotContains(res, 'centralized')
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_disabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(False)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(False)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up)\
.AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_feature_permission')})
def test_router_update_post_dvr_ha_enabled(self):
router = self.routers.first()
api.neutron.get_feature_permission(IsA(http.HttpRequest),
"dvr", "update")\
.AndReturn(True)
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# api.neutron.get_feature_permission(IsA(http.HttpRequest),
# "l3-ha", "update")\
# .AndReturn(True)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up,
# ha=True,
distributed=True).AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up,
'mode': 'distributed',
'ha': True}
url = reverse('horizon:%s:routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
def _test_router_addinterface(self, raise_error=False):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
add_interface = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, subnet_id=subnet.id)
if raise_error:
add_interface.AndRaise(self.exceptions.neutron)
else:
add_interface.AndReturn({'subnet_id': subnet.id,
'port_id': port.id})
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
self._check_router_addinterface(router, subnet)
def _check_router_addinterface(self, router, subnet, ip_address=''):
# mock APIs used to show router detail
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.port_list(IsA(http.HttpRequest), device_id=router.id)\
.AndReturn([])
self._mock_network_list(router['tenant_id'])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'subnet_id': subnet.id,
'ip_address': ip_address}
url = reverse('horizon:%s:routers:addinterface' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'port_get',
'network_list',
'port_list')})
def test_router_addinterface(self):
self._test_router_addinterface()
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'network_list',
'port_list')})
def test_router_addinterface_exception(self):
self._test_router_addinterface(raise_error=True)
def _test_router_addinterface_ip_addr(self, errors=None):
errors = errors or []
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
ip_addr = port['fixed_ips'][0]['ip_address']
self._setup_mock_addinterface_ip_addr(router, subnet, port,
ip_addr, errors)
self._check_router_addinterface(router, subnet, ip_addr)
def _setup_mock_addinterface_ip_addr(self, router, subnet, port,
ip_addr, errors=None):
errors = errors or []
subnet_get = api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)
if 'subnet_get' in errors:
subnet_get.AndRaise(self.exceptions.neutron)
return
subnet_get.AndReturn(subnet)
params = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_addr}]}
port_create = api.neutron.port_create(IsA(http.HttpRequest), **params)
if 'port_create' in errors:
port_create.AndRaise(self.exceptions.neutron)
return
port_create.AndReturn(port)
add_inf = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, port_id=port.id)
if 'add_interface' not in errors:
return
add_inf.AndRaise(self.exceptions.neutron)
port_delete = api.neutron.port_delete(IsA(http.HttpRequest), port.id)
if 'port_delete' in errors:
port_delete.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr(self):
self._test_router_addinterface_ip_addr()
@test.create_stubs({api.neutron: ('subnet_get', 'router_get',
'network_list', 'port_list')})
def test_router_addinterface_ip_addr_exception_subnet_get(self):
self._test_router_addinterface_ip_addr(errors=['subnet_get'])
@test.create_stubs({api.neutron: ('subnet_get', 'port_create',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_port_create(self):
self._test_router_addinterface_ip_addr(errors=['port_create'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_add_interface(self):
self._test_router_addinterface_ip_addr(errors=['add_interface'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list',
'port_list')})
def test_router_addinterface_ip_addr_exception_port_delete(self):
self._test_router_addinterface_ip_addr(errors=['add_interface',
'port_delete'])
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndReturn(None)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway_exception(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndRaise(self.exceptions.neutron)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
class RouterRouteTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:routers:detail' % DASHBOARD
def test_extension_hides_without_routes(self):
router = self.routers_with_routes.first()
res = self._get_detail(router, extraroute=False)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertNotIn('extra_routes_table', res.context)
def test_routerroute_detail(self):
router = self.routers_with_routes.first()
res = self._get_detail(router, extraroute=True)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
routes = res.context['extra_routes_table'].data
routes_dict = [r._apidict for r in routes]
self.assertItemsEqual(routes_dict, router['routes'])
@test.create_stubs({api.neutron: ('router_get', 'router_update')})
def _test_router_addrouterroute(self, raise_error=False):
pre_router = self.routers_with_routes.first()
post_router = copy.deepcopy(pre_router)
route = {'nexthop': '10.0.0.5', 'destination': '40.0.1.0/24'}
post_router['routes'].insert(0, route)
api.neutron.router_get(IsA(http.HttpRequest), pre_router.id)\
.MultipleTimes().AndReturn(pre_router)
params = {}
params['routes'] = post_router['routes']
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
if raise_error:
router_update.AndRaise(self.exceptions.neutron)
else:
router_update.AndReturn({'router': post_router})
self.mox.ReplayAll()
form_data = copy.deepcopy(route)
form_data['router_id'] = pre_router.id
url = reverse('horizon:%s:routers:addrouterroute' % self.DASHBOARD,
args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[pre_router.id])
self.assertRedirectsNoFollow(res, detail_url)
def test_router_addrouterroute(self):
if self.DASHBOARD == 'project':
self._test_router_addrouterroute()
self.assertMessageCount(success=1)
def test_router_addrouterroute_exception(self):
if self.DASHBOARD == 'project':
self._test_router_addrouterroute(raise_error=True)
self.assertMessageCount(error=1)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'network_get', 'port_list',
'is_extension_supported')})
def test_router_removeroute(self):
if self.DASHBOARD == 'admin':
return
pre_router = self.routers_with_routes.first()
post_router = copy.deepcopy(pre_router)
route = post_router['routes'].pop()
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'extraroute')\
.MultipleTimes().AndReturn(True)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['routes'] = post_router['routes']
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self.mox.ReplayAll()
form_route_id = route['nexthop'] + ":" + route['destination']
form_data = {'action': 'extra_routes__delete__%s' % form_route_id}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
class RouterViewTests(RouterMixin, test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:routers:index' % DASHBOARD)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_disabled_when_quota_exceeded(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers']['available'] = 0
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertIn('disabled', create_action.classes,
'Create button is not disabled')
self.assertEqual('Create Router (Quota exceeded)',
create_action.verbose_name)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_shown_when_quota_disabled(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers'].pop('available')
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertFalse('disabled' in create_action.classes,
'Create button should not be disabled')
self.assertEqual('Create Router',
create_action.verbose_name)
@test.create_stubs({api.neutron: ('router_list', 'network_list'),
quotas: ('tenant_quota_usages',)})
def test_create_button_attributes(self):
quota_data = self.neutron_quota_usages.first()
quota_data['routers']['available'] = 10
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(self.routers.list())
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, 'project/routers/index.html')
routers = res.context['routers_table'].data
self.assertItemsEqual(routers, self.routers.list())
create_action = self.getAndAssertTableAction(res, 'routers', 'create')
self.assertEqual(set(['ajax-modal']), set(create_action.classes))
self.assertEqual('Create Router',
six.text_type(create_action.verbose_name))
self.assertEqual('horizon:project:routers:create', create_action.url)
self.assertEqual((('network', 'create_router'),),
create_action.policy_rules)
| sandvine/horizon | openstack_dashboard/dashboards/project/routers/tests.py | Python | apache-2.0 | 38,873 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from chaco.label import Label
from six.moves import map
from pychron.core.ui import set_qt
set_qt()
# ============= enthought library imports =======================
from chaco.abstract_overlay import AbstractOverlay
from kiva.fonttools import str_to_font
from traits.api import HasTraits, Instance, Float, File, Property, Str, List
from traitsui.api import View, Controller, UItem
from chaco.api import OverlayPlotContainer
from enable.component_editor import ComponentEditor
from pyface.api import FileDialog, OK
# ============= standard library imports ========================
from lxml.etree import ElementTree, Element
from chaco.plot import Plot
from chaco.array_plot_data import ArrayPlotData
from numpy import linspace, cos, sin, pi
import os
import csv
from chaco.data_label import DataLabel
from pychron.paths import paths
from chaco.plot_graphics_context import PlotGraphicsContext
from traitsui.menu import Action
import math
from pychron.core.helpers.strtools import to_bool
# ============= local library imports ==========================
class myDataLabel(DataLabel):
show_label_coords = False
marker_visible = False
label_position = "center"
border_visible = False
class LabelsOverlay(AbstractOverlay):
labels = List
def overlay(self, other_component, gc, view_bounds=None, mode="normal"):
with gc:
gc.set_font(str_to_font(None, None, "7"))
for x, y, l in self.labels:
ll = Label(x=x, y=y, text=l, font="modern 7")
w, h = ll.get_bounding_box(gc)
x, y = other_component.map_screen([(x, y)])[0]
gc.set_text_position(x - w / 2.0, y + 5)
gc.show_text(l)
class RotatingContainer(OverlayPlotContainer):
rotation = Float(0)
def _draw(self, gc, *args, **kw):
with gc:
w2 = self.width / 2
h2 = self.height / 2
# gc.translate_ctm(w2, h2)
# gc.rotate_ctm(math.radians(self.rotation))
# gc.translate_ctm(-w2, -h2)
super(RotatingContainer, self)._draw(gc, *args, **kw)
class GraphicGeneratorController(Controller):
def save(self, info):
self.model.save()
def traits_view(self):
w, h = 750, 750
v = View(
UItem("srcpath"),
# Item('rotation'),
UItem("container", editor=ComponentEditor(), style="custom"),
width=w + 2,
height=h + 56,
resizable=True,
buttons=[Action(name="Save", action="save"), "OK", "Cancel"],
)
return v
class GraphicModel(HasTraits):
srcpath = File
xmlpath = File
container = Instance(OverlayPlotContainer)
name = Property
_name = Str
rotation = Float(enter_set=True, auto_set=False)
initialized = False
def _get_name(self):
return os.path.splitext(
self._name if self._name else os.path.basename(self.srcpath)
)[0]
def save(self, path=None):
# print self.container.bounds
if path is None:
dlg = FileDialog(action="save as", default_directory=paths.data_dir or "")
if dlg.open() == OK:
path = dlg.path
if path is not None:
_, tail = os.path.splitext(path)
c = self.container
if tail == ".pdf":
from chaco.pdf_graphics_context import PdfPlotGraphicsContext
gc = PdfPlotGraphicsContext(filename=path, pagesize="letter")
else:
if not tail in (".png", ".jpg", ".tiff"):
path = "{}.png".format(path)
gc = PlotGraphicsContext((int(c.outer_width), int(c.outer_height)))
# c.use_backbuffer = False
# for ci in c.components:
# try:
# ci.x_axis.visible = False
# ci.y_axis.visible = False
# except Exception:
# pass
# c.use_backbuffer = False
from reportlab.lib.pagesizes import LETTER
c.do_layout(size=(LETTER[1], LETTER[1]), force=True)
gc.render_component(c)
# c.use_backbuffer = True
gc.save(path)
self._name = os.path.basename(path)
def load(self, path):
parser = ElementTree(file=open(path, "r"))
circles = parser.find("circles")
outline = parser.find("outline")
bb = outline.find("bounding_box")
bs = bb.find("width"), bb.find("height")
w, h = [float(b.text) for b in bs]
use_label = parser.find("use_label")
if use_label is not None:
use_label = to_bool(use_label.text.strip())
else:
use_label = True
data = ArrayPlotData()
p = Plot(data=data, padding=10)
p.x_grid.visible = False
p.y_grid.visible = False
p.x_axis.visible = False
p.y_axis.visible = False
p.x_axis.title = "X cm"
p.y_axis.title = "Y cm"
p.index_range.low_setting = -w / 2
p.index_range.high_setting = w / 2
p.value_range.low_setting = -h / 2
p.value_range.high_setting = h / 2
thetas = linspace(0, 2 * pi)
radius = circles.find("radius").text
radius = float(radius)
face_color = circles.find("face_color")
if face_color is not None:
face_color = face_color.text
else:
face_color = "white"
labels = []
for i, pp in enumerate(circles.findall("point")):
x, y, l = pp.find("x").text, pp.find("y").text, pp.find("label").text
# print i, pp, x, y
# load hole specific attrs
r = pp.find("radius")
if r is None:
r = radius
else:
r = float(r.text)
fc = pp.find("face_color")
if fc is None:
fc = face_color
else:
fc = fc.text
x, y = list(map(float, (x, y)))
xs = x + r * sin(thetas)
ys = y + r * cos(thetas)
xn, yn = "px{:03d}".format(i), "py{:03d}".format(i)
data.set_data(xn, xs)
data.set_data(yn, ys)
plot = p.plot((xn, yn), face_color=fc, type="polygon")[0]
labels.append((x, y, l))
# if use_label:
# label = myDataLabel(component=plot,
# data_point=(x, y),
# label_text=l,
# bgcolor='transparent')
# plot.overlays.append(label)
if use_label:
p.overlays.append(LabelsOverlay(component=plot, labels=labels))
self.container.add(p)
self.container.invalidate_and_redraw()
def _srcpath_changed(self):
# default_radius=radius,
# default_bounds=bounds,
# convert_mm=convert_mm,
# use_label=use_label,
# make=make,
# rotate=rotate)
self._reload()
def _rotation_changed(self):
self._reload()
def _reload(self):
if self.initialized:
self.container = self._container_factory()
print(os.path.isfile(self.srcpath), self.srcpath)
if os.path.isfile(self.srcpath):
p = make_xml(
self.srcpath,
default_bounds=(2.54, 2.54),
default_radius=0.0175 * 2.54,
rotate=self.rotation,
convert_mm=True,
)
self.load(p)
def _container_default(self):
return self._container_factory()
def _container_factory(self):
return RotatingContainer(bgcolor="white")
def make_xml(
path,
offset=100,
default_bounds=(50, 50),
default_radius=3.0,
convert_mm=False,
make=True,
use_label=True,
rotate=0,
):
"""
convert a csv into an xml
use blank line as a group marker
circle labels are offset by ``offset*group_id``
ie. group 0. 1,2,3
group 1. 101,102,103
"""
out = "{}_from_csv.xml".format(os.path.splitext(path)[0])
if not make:
return out
root = Element("root")
ul = Element("use_label")
ul.text = "True" if use_label else "False"
root.append(ul)
outline = Element("outline")
bb = Element("bounding_box")
width, height = Element("width"), Element("height")
width.text, height.text = list(map(str, default_bounds))
bb.append(width)
bb.append(height)
outline.append(bb)
root.append(outline)
circles = Element("circles")
radius = Element("radius")
radius.text = str(default_radius)
circles.append(radius)
face_color = Element("face_color")
face_color.text = "white"
circles.append(face_color)
root.append(circles)
i = 0
off = 0
reader = csv.reader(open(path, "r"), delimiter=",")
# writer = open(path + 'angles.txt', 'w')
nwriter = None
if rotate:
nwriter = csv.writer(open(path + "rotated_{}.txt".format(rotate), "w"))
header = next(reader)
if nwriter:
nwriter.writerow(header)
theta = math.radians(rotate)
for k, row in enumerate(reader):
# print k, row
row = list(map(str.strip, row))
if row:
e = Element("point")
x, y, l = Element("x"), Element("y"), Element("label")
xx, yy = float(row[1]), float(row[2])
try:
r = float(row[4])
rr = Element("radius")
if convert_mm:
r *= 2.54
rr.text = str(r)
e.append(rr)
except IndexError:
r = None
px = math.cos(theta) * xx - math.sin(theta) * yy
py = math.sin(theta) * xx + math.cos(theta) * yy
xx, yy = px, py
if nwriter:
data = ["{:0.4f}".format(xx), "{:0.4f}".format(yy)]
if r is not None:
data.append("{:0.4f}".format(r))
nwriter.writerow(data)
if convert_mm:
xx = xx * 2.54
yy = yy * 2.54
xx *= 1.1
yy *= 1.1
x.text = str(xx)
y.text = str(yy)
# a = math.degrees(math.atan2(yy, xx))
# writer.write('{} {}\n'.format(k + 1, a))
l.text = str(i + 1 + off)
e.append(l)
e.append(x)
e.append(y)
circles.append(e)
i += 1
else:
# use blank rows as group markers
off += offset
i = 0
tree = ElementTree(root)
tree.write(out, xml_declaration=True, method="xml", pretty_print=True)
return out
def open_txt(
p, bounds, radius, use_label=True, convert_mm=False, make=True, rotate=None
):
gm = GraphicModel(srcpath=p, rotation=rotate or 0)
p = make_xml(
p,
offset=0,
default_radius=radius,
default_bounds=bounds,
convert_mm=convert_mm,
use_label=use_label,
make=make,
rotate=rotate,
)
# p = '/Users/ross/Sandbox/graphic_gen_from_csv.xml'
gm.load(p)
gm.initialized = True
gcc = GraphicGeneratorController(model=gm)
return gcc, gm
if __name__ == "__main__":
gm = GraphicModel()
# p = '/Users/ross/Sandbox/2mmirrad.txt'
# p = '/Users/ross/Sandbox/2mmirrad_ordered.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad_ordered.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad_ordered.txt'
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad_ordered1.txt'
# p = '/Users/ross/Sandbox/1_75mmirrad.txt'
p = "/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/1_75mmirrad_continuous.txt"
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad.txt'
# p = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/0_75mmirrad_continuous.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/2mmirrad_continuous.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/40_no_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/26_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/newtrays/26_no_spokes.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes.txt'
# p = '/Users/ross/Desktop/72_spokes'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/16_40_ms.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes_rev2.txt'
# p = '/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/40_spokes-5.txt'
p = "/Users/ross/Pychrondata_dev/setupfiles/irradiation_tray_maps/construction/newtrays/24_spokes.txt"
p = "/Users/ross/PychronDev/data/o2inch.txt"
p = "/Users/ross/PychronDev/data/421.txt"
gcc, gm = open_txt(p, (51, 51), 0.95, convert_mm=False, make=True, rotate=0)
# p2 = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/newtrays/TX_6-Hole.txt'
# gcc, gm2 = open_txt(p2, (2.54, 2.54), .1, make=False)
# p2 = '/Users/ross/Pychrondata_diode/setupfiles/irradiation_tray_maps/newtrays/TX_20-Hole.txt'
# gcc, gm2 = open_txt(p2, (2.54, 2.54), .1, make=False)
# gm2.container.bgcolor = 'transparent'
# gm2.container.add(gm.container)
gcc.configure_traits()
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/entry/graphic_generator.py | Python | apache-2.0 | 14,662 |
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import stat
import warnings
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers import remotefs as remotefs_drv
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('glusterfs_shares_config',
default='/etc/cinder/glusterfs_shares',
help='File with the list of available gluster shares'),
cfg.StrOpt('glusterfs_mount_point_base',
default='$state_path/mnt',
help='Base dir containing mount points for gluster shares.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.CloneableVD,
driver.ExtendVD):
"""Gluster based cinder driver.
Creates file on Gluster share for using it as block device on hypervisor.
Operations such as create/delete/extend volume/snapshot use locking on a
per-process basis to prevent multiple threads from modifying qcow2 chains
or the snapshot .info file simultaneously.
"""
driver_volume_type = 'glusterfs'
driver_prefix = 'glusterfs'
volume_backend_name = 'GlusterFS'
VERSION = '1.3.0'
def __init__(self, execute=processutils.execute, *args, **kwargs):
self._remotefsclient = None
super(GlusterfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
root_helper = utils.get_root_helper()
self.base = getattr(self.configuration,
'glusterfs_mount_point_base',
CONF.glusterfs_mount_point_base)
self._remotefsclient = remotefs_brick.RemoteFsClient(
'glusterfs', root_helper, execute,
glusterfs_mount_point_base=self.base)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(GlusterfsDriver, self).do_setup(context)
config = self.configuration.glusterfs_shares_config
if not config:
msg = (_("There's no Gluster config file configured (%s)") %
'glusterfs_shares_config')
LOG.warning(msg)
raise exception.GlusterfsException(msg)
if not os.path.exists(config):
msg = (_("Gluster config file at %(config)s doesn't exist") %
{'config': config})
LOG.warning(msg)
raise exception.GlusterfsException(msg)
self.shares = {}
try:
self._execute('mount.glusterfs', check_exit_code=False)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise exception.GlusterfsException(
_('mount.glusterfs is not installed'))
else:
raise
self._refresh_mounts()
def _unmount_shares(self):
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._do_umount(True, share)
except Exception as exc:
LOG.warning(_LW('Exception during unmounting %s'), exc)
def _do_umount(self, ignore_not_mounted, share):
mount_path = self._get_mount_point_for_share(share)
command = ['umount', mount_path]
try:
self._execute(*command, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ignore_not_mounted and 'not mounted' in exc.stderr:
LOG.info(_LI("%s is already umounted"), share)
else:
LOG.error(_LE("Failed to umount %(share)s, reason=%(stderr)s"),
{'share': share, 'stderr': exc.stderr})
raise
def _refresh_mounts(self):
try:
self._unmount_shares()
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.stderr:
LOG.warning(_LW("Failed to refresh mounts, reason=%s"),
exc.stderr)
else:
raise
self._ensure_shares_mounted()
def _qemu_img_info(self, path, volume_name):
return super(GlusterfsDriver, self)._qemu_img_info_base(
path, volume_name, self.configuration.glusterfs_mount_point_base)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def _local_volume_dir(self, volume):
hashed = self._get_hash_str(volume['provider_location'])
path = '%s/%s' % (self.configuration.glusterfs_mount_point_base,
hashed)
return path
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
super(GlusterfsDriver, self)._update_volume_stats()
data = self._stats
global_capacity = data['total_capacity_gb']
global_free = data['free_capacity_gb']
thin_enabled = self.configuration.nas_volume_prov_type == 'thin'
if thin_enabled:
provisioned_capacity = self._get_provisioned_capacity()
else:
provisioned_capacity = round(global_capacity - global_free, 2)
data['provisioned_capacity_gb'] = provisioned_capacity
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio)
data['thin_provisioning_support'] = thin_enabled
data['thick_provisioning_support'] = not thin_enabled
self._stats = data
@remotefs_drv.locked_volume_id_operation
def create_volume(self, volume):
"""Creates a volume."""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s",
{'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_path = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_path, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path,
snapshot['volume']['name'])
path_to_snap_img = os.path.join(vol_path, img_info.backing_file)
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s", path_to_snap_img)
if self.configuration.nas_volume_prov_type == 'thin':
out_format = 'qcow2'
else:
out_format = 'raw'
image_utils.convert_image(path_to_snap_img,
path_to_new_vol,
out_format)
self._set_rw_permissions_for_all(path_to_new_vol)
@remotefs_drv.locked_volume_id_operation
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
self._execute('rm', '-f', mounted_path, run_as_root=True)
# If an exception (e.g. timeout) occurred during delete_snapshot, the
# base volume may linger around, so just delete it if it exists
base_volume_path = self._local_path_volume(volume)
fileutils.delete_if_exists(base_volume_path)
info_path = self._local_path_volume_info(volume)
fileutils.delete_if_exists(info_path)
def _get_matching_backing_file(self, backing_chain, snapshot_file):
return next(f for f in backing_chain
if f.get('backing-filename', '') == snapshot_file)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def validate_connector(self, connector):
pass
@remotefs_drv.locked_volume_id_operation
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
# Find active qcow2 file
active_file = self.get_active_image_from_info(volume)
path = '%s/%s/%s' % (self.configuration.glusterfs_mount_point_base,
self._get_hash_str(volume['provider_location']),
active_file)
data = {'export': volume['provider_location'],
'name': active_file}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
# Test file for raw vs. qcow2 format
info = self._qemu_img_info(path, volume['name'])
data['format'] = info.file_format
if data['format'] not in ['raw', 'qcow2']:
msg = _('%s must be a valid raw or qcow2 image.') % path
raise exception.InvalidVolume(msg)
return {
'driver_volume_type': 'glusterfs',
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
@remotefs_drv.locked_volume_id_operation
def extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume)
info = self._qemu_img_info(volume_path, volume['name'])
backing_fmt = info.file_format
if backing_fmt not in ['raw', 'qcow2']:
msg = _('Unrecognized backing format: %s')
raise exception.InvalidVolume(msg % backing_fmt)
# qemu-img can resize both raw and qcow2 files
image_utils.resize_image(volume_path, size_gb)
def _do_create_volume(self, volume):
"""Create a volume on given glusterfs_share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug("creating new volume at %s", volume_path)
if os.path.exists(volume_path):
msg = _('file already exists at %s') % volume_path
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if self.configuration.nas_volume_prov_type == 'thin':
self._create_qcow2_file(volume_path, volume_size)
else:
try:
self._fallocate(volume_path, volume_size)
except processutils.ProcessExecutionError as exc:
if 'Operation not supported' in exc.stderr:
warnings.warn('Fallocate not supported by current version '
'of glusterfs. So falling back to dd.')
self._create_regular_file(volume_path, volume_size)
else:
fileutils.delete_if_exists(volume_path)
raise
self._set_rw_permissions_for_all(volume_path)
def _ensure_shares_mounted(self):
"""Mount all configured GlusterFS shares."""
self._mounted_shares = []
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s'), exc)
LOG.debug('Available shares: %s', self._mounted_shares)
def _ensure_share_mounted(self, glusterfs_share):
"""Mount GlusterFS share.
:param glusterfs_share: string
"""
mount_path = self._get_mount_point_for_share(glusterfs_share)
self._mount_glusterfs(glusterfs_share)
# Ensure we can write to this share
group_id = os.getegid()
current_group_id = utils.get_file_gid(mount_path)
current_mode = utils.get_file_mode(mount_path)
if group_id != current_group_id:
cmd = ['chgrp', group_id, mount_path]
self._execute(*cmd, run_as_root=True)
if not (current_mode & stat.S_IWGRP):
cmd = ['chmod', 'g+w', mount_path]
self._execute(*cmd, run_as_root=True)
self._ensure_share_writable(mount_path)
def _find_share(self, volume_size_for):
"""Choose GlusterFS share among available ones for given volume size.
Current implementation looks for greatest capacity.
:param volume_size_for: int size in GB
"""
if not self._mounted_shares:
raise exception.GlusterfsNoSharesMounted()
greatest_size = 0
greatest_share = None
for glusterfs_share in self._mounted_shares:
capacity = self._get_available_capacity(glusterfs_share)[0]
if capacity > greatest_size:
greatest_share = glusterfs_share
greatest_size = capacity
if volume_size_for * units.Gi > greatest_size:
raise exception.GlusterfsNoSuitableShareFound(
volume_size=volume_size_for)
return greatest_share
def _mount_glusterfs(self, glusterfs_share):
"""Mount GlusterFS share to mount path."""
mnt_flags = []
if self.shares.get(glusterfs_share) is not None:
mnt_flags = self.shares[glusterfs_share].split()
try:
self._remotefsclient.mount(glusterfs_share, mnt_flags)
except processutils.ProcessExecutionError:
LOG.error(_LE("Mount failure for %(share)s."),
{'share': glusterfs_share})
raise
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume.
Allow a backup to occur only if no snapshots exist.
Check both Cinder and the file on-disk. The latter is only
a safety mechanism to prevent further damage if the snapshot
information is already inconsistent.
"""
snapshots = self.db.snapshot_get_all_for_volume(context,
backup['volume_id'])
snap_error_msg = _('Backup is not supported for GlusterFS '
'volumes with snapshots.')
if len(snapshots) > 0:
raise exception.InvalidVolume(snap_error_msg)
volume = self.db.volume_get(context, backup['volume_id'])
volume_dir = self._local_volume_dir(volume)
active_file_path = os.path.join(
volume_dir,
self.get_active_image_from_info(volume))
info = self._qemu_img_info(active_file_path, volume['name'])
if info.backing_file is not None:
LOG.error(_LE('No snapshots found in database, but %(path)s has '
'backing file %(backing_file)s!'),
{'path': active_file_path,
'backing_file': info.backing_file})
raise exception.InvalidVolume(snap_error_msg)
if info.file_format != 'raw':
msg = _('Backup is only supported for raw-formatted '
'GlusterFS volumes.')
raise exception.InvalidVolume(msg)
return super(GlusterfsDriver, self).backup_volume(
context, backup, backup_service)
| Paul-Ezell/cinder-1 | cinder/volume/drivers/glusterfs.py | Python | apache-2.0 | 17,430 |
import os
import shutil
from subprocess import call
def main():
# Clean the build directory
if os.path.isdir('./build'):
shutil.rmtree('./build')
# Freeze it
call('python setup.py build')
# Zip it up - 7-zip provides better compression than the zipfile module
# Make sure the 7-zip folder is on your path
file_name = 'simulation_standalone'
if os.path.isfile('{}.zip'.format(file_name)):
os.remove('{}.zip'.format(file_name))
call('7z a -tzip {}.zip simulation.xlsm'.format(file_name, file_name))
call('7z a -tzip {}.zip LICENSE.txt'.format(file_name))
call('7z a -tzip {}.zip build'.format(file_name))
if __name__ == '__main__':
main() | alekz112/xlwings | examples/simulation/build_standalone.py | Python | apache-2.0 | 704 |
"""Python API for composing notebook elements
The Python representation of a notebook is a nested structure of
dictionary subclasses that support attribute access
(IPython.utils.ipstruct.Struct). The functions in this module are merely
helpers to build the structs in the right form.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from ..notebooknode import from_dict, NotebookNode
# Change this when incrementing the nbformat version
nbformat = 4
nbformat_minor = 1
nbformat_schema = 'nbformat.v4.schema.json'
def validate(node, ref=None):
"""validate a v4 node"""
from .. import validate
return validate(node, ref=ref, version=nbformat)
def new_output(output_type, data=None, **kwargs):
"""Create a new output, to go in the ``cell.outputs`` list of a code cell."""
output = NotebookNode(output_type=output_type)
# populate defaults:
if output_type == 'stream':
output.name = u'stdout'
output.text = u''
elif output_type in {'execute_result', 'display_data'}:
output.metadata = NotebookNode()
output.data = NotebookNode()
# load from args:
output.update(from_dict(kwargs))
if data is not None:
output.data = from_dict(data)
# validate
validate(output, output_type)
return output
def output_from_msg(msg):
"""Create a NotebookNode for an output from a kernel's IOPub message.
Returns
-------
NotebookNode: the output as a notebook node.
Raises
------
ValueError: if the message is not an output message.
"""
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'execute_result':
return new_output(output_type=msg_type,
metadata=content['metadata'],
data=content['data'],
execution_count=content['execution_count'],
)
elif msg_type == 'stream':
return new_output(output_type=msg_type,
name=content['name'],
text=content['text'],
)
elif msg_type == 'display_data':
return new_output(output_type=msg_type,
metadata=content['metadata'],
data=content['data'],
)
elif msg_type == 'error':
return new_output(output_type=msg_type,
ename=content['ename'],
evalue=content['evalue'],
traceback=content['traceback'],
)
else:
raise ValueError("Unrecognized output msg type: %r" % msg_type)
def new_code_cell(source='', **kwargs):
"""Create a new code cell"""
cell = NotebookNode(
cell_type='code',
metadata=NotebookNode(),
execution_count=None,
source=source,
outputs=[],
)
cell.update(from_dict(kwargs))
validate(cell, 'code_cell')
return cell
def new_markdown_cell(source='', **kwargs):
"""Create a new markdown cell"""
cell = NotebookNode(
cell_type='markdown',
source=source,
metadata=NotebookNode(),
)
cell.update(from_dict(kwargs))
validate(cell, 'markdown_cell')
return cell
def new_raw_cell(source='', **kwargs):
"""Create a new raw cell"""
cell = NotebookNode(
cell_type='raw',
source=source,
metadata=NotebookNode(),
)
cell.update(from_dict(kwargs))
validate(cell, 'raw_cell')
return cell
def new_worksheet(name=None, cells=None, metadata=None):
"""Create a worksheet by name with with a list of cells."""
ws = NotebookNode()
if cells is None:
ws.cells = []
else:
ws.cells = list(cells)
ws.metadata = NotebookNode(metadata or {})
return ws
def new_notebook(name=None, metadata=None, worksheets=None):
"""Create a notebook by name, id and a list of worksheets."""
nb = NotebookNode()
nb.nbformat = nbformat
nb.nbformat_minor = nbformat_minor
if worksheets is None:
nb.worksheets = []
else:
nb.worksheets = list(worksheets)
if metadata is None:
nb.metadata = new_metadata()
else:
nb.metadata = NotebookNode(metadata)
if name is not None:
nb.metadata.name = cast_unicode(name)
return nb
def new_metadata(name=None, authors=None, license=None, created=None,
modified=None, gistid=None):
"""Create a new metadata node."""
metadata = NotebookNode()
if name is not None:
metadata.name = cast_unicode(name)
if authors is not None:
metadata.authors = list(authors)
if created is not None:
metadata.created = cast_unicode(created)
if modified is not None:
metadata.modified = cast_unicode(modified)
if license is not None:
metadata.license = cast_unicode(license)
if gistid is not None:
metadata.gistid = cast_unicode(gistid)
return metadata
| wusung/ipython-notebook-tabs | kyper/nbformat/v40/nbbase.py | Python | apache-2.0 | 4,854 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Show the current failures in the repository."""
import sys
from cliff import command
import testtools
from stestr import output
from stestr.repository import util
from stestr import results
from stestr import user_config
class Failing(command.Command):
"""Show the current failures known by the repository.
Without --subunit, the process exit code will be non-zero if the
previous test run was not successful and test failures are shown. But,
with --subunit, the process exit code is non-zero only if the subunit
stream could not be generated successfully from any failures. The test
results and run status are included in the subunit stream emitted for
the failed tests, so the stream should be used for interpretting the
failing tests. If no subunit stream is emitted with --subunit and a
zero exit code then there were no failures in the most recent run in
the repository.
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
"--subunit", action="store_true",
default=False, help="Show output as a subunit stream.")
parser.add_argument(
"--list", action="store_true",
default=False, help="Show only a list of failing tests.")
return parser
def take_action(self, parsed_args):
user_conf = user_config.get_user_config(self.app_args.user_config)
args = parsed_args
if getattr(user_conf, 'failing', False):
list_opt = args.list or user_conf.failing.get('list', False)
else:
list_opt = args.list
return failing(repo_type=self.app_args.repo_type,
repo_url=self.app_args.repo_url,
list_tests=list_opt, subunit=args.subunit)
def _show_subunit(run):
stream = run.get_subunit_stream()
if getattr(sys.stdout, 'buffer', False):
sys.stdout.buffer.write(stream.read())
else:
sys.stdout.write(stream.read())
return 0
def _make_result(repo, list_tests=False, stdout=sys.stdout):
if list_tests:
list_result = testtools.StreamSummary()
return list_result, list_result
else:
def _get_id():
return repo.get_latest_run().get_id()
output_result = results.CLITestResult(_get_id,
stdout, None)
summary_result = output_result.get_summary()
return output_result, summary_result
def failing(repo_type='file', repo_url=None, list_tests=False, subunit=False,
stdout=sys.stdout):
"""Print the failing tests from the most recent run in the repository
This function will print to STDOUT whether there are any tests that failed
in the last run. It optionally will print the test_ids for the failing
tests if ``list_tests`` is true. If ``subunit`` is true a subunit stream
with just the failed tests will be printed to STDOUT.
Note this function depends on the cwd for the repository if `repo_type` is
set to file and `repo_url` is not specified it will use the repository
located at CWD/.stestr
:param str repo_type: This is the type of repository to use. Valid choices
are 'file' and 'sql'.
:param str repo_url: The url of the repository to use.
:param bool list_test: Show only a list of failing tests.
:param bool subunit: Show output as a subunit stream.
:param file stdout: The output file to write all output to. By default
this is sys.stdout
:return return_code: The exit code for the command. 0 for success and > 0
for failures.
:rtype: int
"""
if repo_type not in ['file', 'sql']:
stdout.write('Repository type %s is not a type' % repo_type)
return 1
repo = util.get_repo_open(repo_type, repo_url)
run = repo.get_failing()
if subunit:
return _show_subunit(run)
case = run.get_test()
failed = False
result, summary = _make_result(repo, list_tests=list_tests)
result.startTestRun()
try:
case.run(result)
finally:
result.stopTestRun()
failed = not results.wasSuccessful(summary)
if failed:
result = 1
else:
result = 0
if list_tests:
failing_tests = [
test for test, _ in summary.errors + summary.failures]
output.output_tests(failing_tests, output=stdout)
return result
| mtreinish/stestr | stestr/commands/failing.py | Python | apache-2.0 | 4,990 |
#!/usr/bin/env python
# encoding: utf-8
from .user import User
| luke0922/celery_learning | application/models/__init__.py | Python | apache-2.0 | 63 |
'''
1.create private vpc router network with cidr
2.check dhcp ip address
@author Antony WeiJiang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.net_operations as net_ops
import test_stub_for_dhcp_ip as test_stub_dhcp
import random
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_query_resource = res_ops.L2_NETWORK
type_l2 = ["L2NoVlanNetwork","L2VlanNetwork"]
l3_name = "test_dhcp_server"
ip_range_name = "dhcp_ip_range"
ip_Version = [4,6]
networkcidr = "192.168.1.0/24"
dhcp_ip_for_private_vpc = "192.168.1.3"
dhcp_system_tags = ["flatNetwork::DhcpServer::"+dhcp_ip_for_private_vpc+"::ipUuid::null"]
def test():
test_util.test_logger("start dhcp test for l3 public network")
test_util.test_dsc("get no vlan network uuid")
private_vpcnetwork = test_stub_dhcp.VpcNetwork_IP_For_Dhcp()
private_vpcnetwork.set_l2_query_resource(l2_query_resource)
private_vpcnetwork.set_l2_type(type_l2[1])
l2_no_vlan_uuid = private_vpcnetwork.get_l2uuid()
test_util.test_logger("antony @@@debug : %s" %(l2_no_vlan_uuid))
test_util.test_logger("create l3 network")
private_vpcnetwork.set_ipVersion(ip_Version[0])
private_vpcnetwork.create_l3uuid(l3_name)
test_util.test_logger("antony @@@debug : %s" %(private_vpcnetwork.get_l3uuid()))
private_vpcnetwork.add_service_to_l3_vpcnetwork()
test_util.test_logger("add ip v4 range to l3 network")
private_vpcnetwork.add_ip_by_networkcidr(ip_range_name, networkcidr, dhcp_system_tags)
if private_vpcnetwork.check_dhcp_ipaddress().find(dhcp_ip_for_private_vpc) == -1:
test_util.test_fail("dhcp server ip create fail")
test_util.test_logger("delete l3 network")
private_vpcnetwork.del_l3uuid()
test_util.test_pass("dhcp server ip create successfully")
'''
to be define
'''
def error_cleanup():
pass
'''
to be define
'''
def env_recover():
pass
| zstackorg/zstack-woodpecker | integrationtest/vm/simulator/dhcp_server_ip/test_dhcp_for_vpcrouter_cidr.py | Python | apache-2.0 | 2,037 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_BUDDYNS
from libcloud.dns.drivers.buddyns import BuddyNSDNSDriver
from libcloud.utils.py3 import httplib
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.base import Zone
class BuddyNSDNSTests(unittest.TestCase):
def setUp(self):
BuddyNSMockHttp.type = None
BuddyNSDNSDriver.connectionCls.conn_class = BuddyNSMockHttp
self.driver = BuddyNSDNSDriver(*DNS_PARAMS_BUDDYNS)
self.test_zone = Zone(
id="test.com",
type="master",
ttl=None,
domain="test.com",
extra={},
driver=self,
)
def test_list_zones_empty(self):
BuddyNSMockHttp.type = "EMPTY_ZONES_LIST"
zones = self.driver.list_zones()
self.assertEqual(zones, [])
def test_list_zones_success(self):
BuddyNSMockHttp.type = "LIST_ZONES"
zones = self.driver.list_zones()
self.assertEqual(len(zones), 2)
zone = zones[0]
self.assertEqual(zone.id, "microsoft.com")
self.assertIsNone(zone.type)
self.assertEqual(zone.domain, "microsoft.com")
self.assertIsNone(zone.ttl)
zone = zones[1]
self.assertEqual(zone.id, "google.de")
self.assertIsNone(zone.type)
self.assertEqual(zone.domain, "google.de")
self.assertIsNone(zone.ttl)
def test_delete_zone_zone_does_not_exist(self):
BuddyNSMockHttp.type = "DELETE_ZONE_ZONE_DOES_NOT_EXIST"
try:
self.driver.delete_zone(zone=self.test_zone)
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, self.test_zone.id)
else:
self.fail("Exception was not thrown")
def test_delete_zone_success(self):
BuddyNSMockHttp.type = "DELETE_ZONE_SUCCESS"
status = self.driver.delete_zone(zone=self.test_zone)
self.assertTrue(status)
def test_get_zone_zone_does_not_exist(self):
BuddyNSMockHttp.type = "GET_ZONE_ZONE_DOES_NOT_EXIST"
try:
self.driver.get_zone(zone_id="zonedoesnotexist.com")
except ZoneDoesNotExistError as e:
self.assertEqual(e.zone_id, "zonedoesnotexist.com")
else:
self.fail("Exception was not thrown")
def test_get_zone_success(self):
BuddyNSMockHttp.type = "GET_ZONE_SUCCESS"
zone = self.driver.get_zone(zone_id="myexample.com")
self.assertEqual(zone.id, "myexample.com")
self.assertEqual(zone.domain, "myexample.com")
self.assertIsNone(zone.type)
self.assertIsNone(zone.ttl)
self.assertEqual(zone.driver, self.driver)
def test_create_zone_success(self):
BuddyNSMockHttp.type = "CREATE_ZONE_SUCCESS"
zone = self.driver.create_zone(domain="microsoft.com")
self.assertEqual(zone.id, "microsoft.com")
self.assertEqual(zone.domain, "microsoft.com")
self.assertIsNone(zone.type),
self.assertIsNone(zone.ttl)
def test_create_zone_zone_already_exists(self):
BuddyNSMockHttp.type = "CREATE_ZONE_ZONE_ALREADY_EXISTS"
try:
self.driver.create_zone(domain="newzone.com", extra={"master": "13.0.0.1"})
except ZoneAlreadyExistsError as e:
self.assertEqual(e.zone_id, "newzone.com")
else:
self.fail("Exception was not thrown")
class BuddyNSMockHttp(MockHttp):
fixtures = DNSFileFixtures("buddyns")
def _api_v2_zone_EMPTY_ZONES_LIST(self, method, url, body, headers):
body = self.fixtures.load("empty_zones_list.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_LIST_ZONES(self, method, url, body, headers):
body = self.fixtures.load("list_zones.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_zonedoesnotexist_com_GET_ZONE_ZONE_DOES_NOT_EXIST(
self, method, url, body, headers
):
body = self.fixtures.load("zone_does_not_exist.json")
return 404, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_myexample_com_GET_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("get_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_test_com_DELETE_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("delete_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_test_com_DELETE_ZONE_ZONE_DOES_NOT_EXIST(
self, method, url, body, headers
):
body = self.fixtures.load("zone_does_not_exist.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_CREATE_ZONE_SUCCESS(self, method, url, body, headers):
body = self.fixtures.load("create_zone_success.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _api_v2_zone_CREATE_ZONE_ZONE_ALREADY_EXISTS(self, method, url, body, headers):
body = self.fixtures.load("zone_already_exists.json")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
if __name__ == "__main__":
sys.exit(unittest.main())
| apache/libcloud | libcloud/test/dns/test_buddyns.py | Python | apache-2.0 | 6,194 |
# Copyright 2014 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.tests import base
BR_PREFIX = 'test-br'
class BaseLinuxTestCase(base.BaseTestCase):
def setUp(self, root_helper='sudo'):
super(BaseLinuxTestCase, self).setUp()
self.root_helper = root_helper
def check_command(self, cmd, error_text, skip_msg):
try:
utils.execute(cmd)
except RuntimeError as e:
if error_text in str(e):
self.skipTest(skip_msg)
raise
def check_sudo_enabled(self):
if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING:
self.skipTest('testing with sudo is not enabled')
def get_rand_name(self, max_length, prefix='test'):
name = prefix + str(random.randint(1, 0x7fffffff))
return name[:max_length]
def create_resource(self, name_prefix, creation_func, *args, **kwargs):
"""Create a new resource that does not already exist.
:param name_prefix: The prefix for a randomly generated name
:param creation_func: A function taking the name of the resource
to be created as it's first argument. An error is assumed
to indicate a name collision.
:param *args *kwargs: These will be passed to the create function.
"""
while True:
name = self.get_rand_name(n_const.DEV_NAME_MAX_LEN, name_prefix)
try:
return creation_func(name, *args, **kwargs)
except RuntimeError:
continue
class BaseOVSLinuxTestCase(BaseLinuxTestCase):
def setUp(self, root_helper='sudo'):
super(BaseOVSLinuxTestCase, self).setUp(root_helper)
self.ovs = ovs_lib.BaseOVS(self.root_helper)
def create_ovs_bridge(self, br_prefix=BR_PREFIX):
br = self.create_resource(br_prefix, self.ovs.add_bridge)
self.addCleanup(br.destroy)
return br
| onecloud/neutron | neutron/tests/functional/agent/linux/base.py | Python | apache-2.0 | 2,637 |
import numpy as np
from scipy.io.wavfile import write
a = np.fromfile('/tmp/file.raw', dtype='int16')
write('/tmp/file.wav', 16000, a)
| Jokeren/neon | loader/test/raw_to_wav.py | Python | apache-2.0 | 136 |
# Copyright 2015-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import git
import os
import string
def latest_commit_sha(repo, path):
"""That the last commit sha for a given path in repo"""
log_message = repo.git.log("-1", path)
commit_sha = log_message.split('\n')[0].split(' ')[1]
return commit_sha
def parse_manifest(manifest, repo, repo_name):
# For each release
for release_name, release_data in list(manifest['release_names'].items()):
print('release_name: ', release_name)
# For each os supported
at_least_one_tag = False
for os_name, os_data in list(release_data['os_names'].items()):
print('os_name: ', os_name)
# For each os code name supported
for os_code_name, os_code_data in list(os_data['os_code_names'].items()):
print('os_code_name: ', os_code_name)
if os_code_data['tag_names']:
at_least_one_tag = True
for tag_name, tag_data in os_code_data['tag_names'].items():
print('tag_name: ', tag_name)
tags = []
for alias_pattern in tag_data['aliases']:
alias_template = string.Template(alias_pattern)
alias = alias_template.substitute(
release_name=release_name,
os_name=os_name,
os_code_name=os_code_name)
tags.append(alias)
commit_path = os.path.join(
repo_name, release_name,
os_name, os_code_name, tag_name)
commit_sha = latest_commit_sha(repo, commit_path)
print('tags: ', tags)
tag_data['Tags'] = tags
tag_data['Architectures'] = os_code_data['archs']
tag_data['GitCommit'] = commit_sha
tag_data['Directory'] = commit_path
if not at_least_one_tag:
del manifest['release_names'][release_name]
return manifest
| osrf/docker_templates | docker_templates/library.py | Python | apache-2.0 | 2,737 |
"""Test Home Assistant json utility functions."""
from json import JSONEncoder
import os
import sys
from tempfile import mkdtemp
import unittest
from unittest.mock import Mock
import pytest
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.json import SerializationError, load_json, save_json
# Test data that can be saved as JSON
TEST_JSON_A = {"a": 1, "B": "two"}
TEST_JSON_B = {"a": "one", "B": 2}
# Test data that can not be saved as JSON (keys must be strings)
TEST_BAD_OBJECT = {("A",): 1}
# Test data that can not be loaded as JSON
TEST_BAD_SERIALIED = "THIS IS NOT JSON\n"
TMP_DIR = None
def setup():
"""Set up for tests."""
global TMP_DIR
TMP_DIR = mkdtemp()
def teardown():
"""Clean up after tests."""
for fname in os.listdir(TMP_DIR):
os.remove(os.path.join(TMP_DIR, fname))
os.rmdir(TMP_DIR)
def _path_for(leaf_name):
return os.path.join(TMP_DIR, leaf_name + ".json")
def test_save_and_load():
"""Test saving and loading back."""
fname = _path_for("test1")
save_json(fname, TEST_JSON_A)
data = load_json(fname)
assert data == TEST_JSON_A
# Skipped on Windows
@unittest.skipIf(
sys.platform.startswith("win"), "private permissions not supported on Windows"
)
def test_save_and_load_private():
"""Test we can load private files and that they are protected."""
fname = _path_for("test2")
save_json(fname, TEST_JSON_A, private=True)
data = load_json(fname)
assert data == TEST_JSON_A
stats = os.stat(fname)
assert stats.st_mode & 0o77 == 0
def test_overwrite_and_reload():
"""Test that we can overwrite an existing file and read back."""
fname = _path_for("test3")
save_json(fname, TEST_JSON_A)
save_json(fname, TEST_JSON_B)
data = load_json(fname)
assert data == TEST_JSON_B
def test_save_bad_data():
"""Test error from trying to save unserialisable data."""
fname = _path_for("test4")
with pytest.raises(SerializationError):
save_json(fname, TEST_BAD_OBJECT)
def test_load_bad_data():
"""Test error from trying to load unserialisable data."""
fname = _path_for("test5")
with open(fname, "w") as fh:
fh.write(TEST_BAD_SERIALIED)
with pytest.raises(HomeAssistantError):
load_json(fname)
def test_custom_encoder():
"""Test serializing with a custom encoder."""
class MockJSONEncoder(JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
return "9"
fname = _path_for("test6")
save_json(fname, Mock(), encoder=MockJSONEncoder)
data = load_json(fname)
assert data == "9"
| leppa/home-assistant | tests/util/test_json.py | Python | apache-2.0 | 2,695 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Libvirt volume driver for iSCSI"""
from os_brick import exception as os_brick_exception
from os_brick.initiator import connector
from oslo_log import log as logging
import nova.conf
from nova import utils
from nova.virt.libvirt.volume import volume as libvirt_volume
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, host):
super(LibvirtISCSIVolumeDriver, self).__init__(host,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils.get_root_helper(),
use_multipath=CONF.libvirt.volume_use_multipath,
device_scan_attempts=CONF.libvirt.num_volume_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
conf.driver_io = "native"
return conf
def connect_volume(self, connection_info, disk_info, instance):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev, instance):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
try:
self.connector.disconnect_volume(connection_info['data'], None)
except os_brick_exception.VolumeDeviceNotFound as exc:
LOG.warning('Ignoring VolumeDeviceNotFound: %s', exc)
return
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev, instance)
def extend_volume(self, connection_info, instance):
"""Extend the volume."""
LOG.debug("calling os-brick to extend iSCSI Volume", instance=instance)
new_size = self.connector.extend_volume(connection_info['data'])
LOG.debug("Extend iSCSI Volume %s; new_size=%s",
connection_info['data']['device_path'],
new_size, instance=instance)
return new_size
| Juniper/nova | nova/virt/libvirt/volume/iscsi.py | Python | apache-2.0 | 3,538 |
revision = 'e966a3afd100'
down_revision = '954c3c4caf32'
branch_labels = None
depends_on = None
import alembic
import sqlalchemy
import requests
import pytz
import dateutil.parser
import datetime
def upgrade():
patreon_users = alembic.op.create_table("patreon_users",
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("patreon_id", sqlalchemy.Text, unique=True),
sqlalchemy.Column("full_name", sqlalchemy.Text, nullable=False),
sqlalchemy.Column("access_token", sqlalchemy.Text),
sqlalchemy.Column("refresh_token", sqlalchemy.Text),
sqlalchemy.Column("token_expires", sqlalchemy.DateTime(timezone=True)),
sqlalchemy.Column("pledge_start", sqlalchemy.DateTime(timezone=True)),
sqlalchemy.Column("last_announce_month", sqlalchemy.Integer),
)
alembic.op.add_column("users",
sqlalchemy.Column("patreon_user",
sqlalchemy.Integer, sqlalchemy.ForeignKey("patreon_users.id", onupdate="CASCADE", ondelete="SET NULL"),
unique=True,
)
)
# TODO: migrate
conn = alembic.op.get_bind()
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
users = meta.tables["users"]
existing_accounts = conn.execute(sqlalchemy.select([users.c.id, users.c.patreon_access_token, users.c.patreon_refresh_token, users.c.patreon_token_expires])
.where(users.c.patreon_access_token.isnot(None)))
all_patreon_users = []
all_users = []
clientid = alembic.context.config.get_section_option('lrrbot', 'patreon_clientid')
clientsecret = alembic.context.config.get_section_option('lrrbot', 'patreon_clientsecret')
with requests.Session() as session:
for user_id, access_token, refresh_token, expires in existing_accounts:
now = datetime.datetime.now(tz=pytz.utc)
if expires < now:
req = session.post("https://api.patreon.com/oauth2/token", data={
'grant_type': 'refresh_token',
'client_id': clientid,
'client_secret': clientsecret,
'refresh_token': refresh_token
})
req.raise_for_status()
data = req.json()
access_token = data["access_token"]
refresh_token = data["refresh_token"]
expires = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=data["expires_in"])
req = session.get("https://api.patreon.com/oauth2/api/current_user", headers={"Authorization": "Bearer %s" % access_token})
req.raise_for_status()
data = req.json()
user = {
"patreon_id": data["data"]["id"],
"full_name": data["data"]["attributes"]["full_name"],
"access_token": access_token,
"refresh_token": refresh_token,
"token_expires": expires,
}
if 'pledges' in data["data"].get("relationships", {}):
for pledge in data["data"]["relationships"]["pledges"]["data"]:
for obj in data["included"]:
if obj["id"] == pledge["id"] and obj["type"] == pledge["type"]:
user["pledge_start"] = dateutil.parser.parse(obj["attributes"]["created_at"])
all_patreon_users.append(user)
all_users.append((user_id, data["data"]["id"]))
alembic.op.bulk_insert(patreon_users, all_patreon_users)
for user_id, patreon_id in all_users:
conn.execute(users.update()
.values(patreon_user=patreon_users.c.id)
.where(users.c.id == user_id)
.where(patreon_users.c.patreon_id == patreon_id)
)
alembic.op.drop_column("users", "patreon_access_token")
alembic.op.drop_column("users", "patreon_refresh_token")
alembic.op.drop_column("users", "patreon_token_expires")
def downgrade():
alembic.op.add_column("users", sqlalchemy.Column("patreon_access_token", sqlalchemy.Text))
alembic.op.add_column("users", sqlalchemy.Column("patreon_refresh_token", sqlalchemy.Text))
alembic.op.add_column("users", sqlalchemy.Column("patreon_token_expires", sqlalchemy.DateTime(timezone=True)))
conn = alembic.op.get_bind()
meta = sqlalchemy.MetaData(bind=conn)
meta.reflect()
users = meta.tables["users"]
patreon_users = meta.tables["patreon_users"]
alembic.op.execute(users.update().where(users.c.patreon_id == patreon_users.c.id)).values({
"patreon_access_token": patreon_users.c.access_token,
"patreon_refresh_token": patreon_users.c.refresh_token,
"patreon_token_expires": patreon_users.c.token_expires,
})
alembic.op.drop_column("users", "patreon_id")
alembic.op.drop_table("patreon_users")
| mrphlip/lrrbot | alembic/versions/e966a3afd100_separate_patreon_user_table.py | Python | apache-2.0 | 4,230 |
##############################################################################
#copyright 2013, Hamid MEDJAHED ([email protected]) Prologue #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
#See the License for the specific language governing permissions and #
#limitations under the License. #
##############################################################################
import HTMLParser
data = '''
<table cellspacing="0" class="table table-bordered table-hover table-condensed" id="data">
<thead>
<tr>
<th class="name">Name</th>
<th class="memory">Memory</th>
<th class="computeunits">
<abbr title="One EC2 Compute Unit provides the equivalent CPU capacity of a 1.0-1.2 GHz 2007 Opteron or 2007 Xeon processor.">Compute Units</abbr>
</th>
<th class="storage">Storage</th>
<th class="architecture">Architecture</th>
<th class="ioperf">I/O Performance</th>
<th class="maxips">
<abbr title="Adding additional IPs requires launching the instance in a VPC.">Max IPs</abbr>
</th>
<th class="apiname">API Name</th>
<th class="cost">Linux cost</th>
<th class="cost">Windows cost</th>
</tr>
</thead>
<tbody>
<tr>
<td class="name">M1 Small</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="1">1</span></td>
<td class="storage"><span sort="160">160 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">8</td>
<td class="apiname">m1.small</td>
<td class="cost" hour_cost="0.060">$0.060 per hour</td>
<td class="cost" hour_cost="0.115">$0.115 per hour</td>
</tr>
<tr>
<td class="name">M1 Medium</td>
<td class="memory"><span sort="3.75">3.75 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="410">410 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">m1.medium</td>
<td class="cost" hour_cost="0.12">$0.12 per hour</td>
<td class="cost" hour_cost="0.23">$0.23 per hour</td>
</tr>
<tr>
<td class="name">M1 Large</td>
<td class="memory"><span sort="7.5">7.50 GB</span></td>
<td class="computeunits"><span sort="4">4</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">30</td>
<td class="apiname">m1.large</td>
<td class="cost" hour_cost="0.24">$0.24 per hour</td>
<td class="cost" hour_cost="0.46">$0.46 per hour</td>
</tr>
<tr>
<td class="name">M1 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="8">8</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">60</td>
<td class="apiname">m1.xlarge</td>
<td class="cost" hour_cost="0.48">$0.48 per hour</td>
<td class="cost" hour_cost="0.92">$0.92 per hour</td>
</tr>
<tr>
<td class="name">Micro</td>
<td class="memory"><span sort="0.6">0.60 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="0">Low</span></td>
<td class="maxips">1</td>
<td class="apiname">t1.micro</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Extra Large</td>
<td class="memory"><span sort="17.10">17.10 GB</span></td>
<td class="computeunits"><span sort="6.5">6.5</span></td>
<td class="storage"><span sort="420">420 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m2.xlarge</td>
<td class="cost" hour_cost="0.41">$0.41 per hour</td>
<td class="cost" hour_cost="0.57">$0.57 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Double Extra Large</td>
<td class="memory"><span sort="34.2">34.20 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m2.2xlarge</td>
<td class="cost" hour_cost="0.82">$0.82 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Quadruple Extra Large</td>
<td class="memory"><span sort="68.4">68.40 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">240</td>
<td class="apiname">m2.4xlarge</td>
<td class="cost" hour_cost="1.64">$1.64 per hour</td>
<td class="cost" hour_cost="2.28">$2.28 per hour</td>
</tr>
<tr>
<td class="name">M3 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m3.xlarge</td>
<td class="cost" hour_cost="0.50">$0.50 per hour</td>
<td class="cost" hour_cost="0.98">$0.98 per hour</td>
</tr>
<tr>
<td class="name">M3 Double Extra Large</td>
<td class="memory"><span sort="30">30.00 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m3.2xlarge</td>
<td class="cost" hour_cost="1.00">$1.00 per hour</td>
<td class="cost" hour_cost="1.96">$1.96 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Medium</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="5">5</span></td>
<td class="storage"><span sort="350">350 GB</span></td>
<td class="architecture">32_64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">c1.medium</td>
<td class="cost" hour_cost="0.145">$0.145 per hour</td>
<td class="cost" hour_cost="0.285">$0.285 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Extra Large</td>
<td class="memory"><span sort="7">7.00 GB</span></td>
<td class="computeunits"><span sort="20">20</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">60</td>
<td class="apiname">c1.xlarge</td>
<td class="cost" hour_cost="0.58">$0.58 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Quadruple Extra Large</td>
<td class="memory"><span sort="23">23.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cc1.4xlarge</td>
<td class="cost" hour_cost="1.30">$1.30 per hour</td>
<td class="cost" hour_cost="1.61">$1.61 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Eight Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="3370">3370 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">240</td>
<td class="apiname">cc2.8xlarge</td>
<td class="cost" hour_cost="2.40">$2.40 per hour</td>
<td class="cost" hour_cost="2.97">$2.97 per hour</td>
</tr>
<tr>
<td class="name">Cluster GPU Quadruple Extra Large</td>
<td class="memory"><span sort="22">22.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cg1.4xlarge</td>
<td class="cost" hour_cost="2.10">$2.10 per hour</td>
<td class="cost" hour_cost="2.60">$2.60 per hour</td>
</tr>
<tr>
<td class="name">High I/O Quadruple Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="2048">2048 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hi1.4xlarge</td>
<td class="cost" hour_cost="3.10">$3.10 per hour</td>
<td class="cost" hour_cost="3.58">$3.58 per hour</td>
</tr>
<tr>
<td class="name">High Storage Eight Extra Large</td>
<td class="memory"><span sort="117.00">117.00 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="49152">48 TB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hs1.8xlarge</td>
<td class="cost" hour_cost="4.600">$4.600 per hour</td>
<td class="cost" hour_cost="4.931">$4.931 per hour</td>
</tr>
<tr>
<td class="name">High Memory Cluster Eight Extra Large</td>
<td class="memory"><span sort="244.00">244.00 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="240">240 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cr1.8xlarge</td>
<td class="cost" hour_cost="3.500">$3.500 per hour</td>
<td class="cost" hour_cost="3.831">$3.831 per hour</td>
</tr>
</tbody>
</table> '''
class TableParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.in_td = False
self.flavors = []
def handle_starttag(self, tag, attrs):
if tag == 'td':
self.in_td = True
def handle_data(self, data):
if self.in_td:
self.flavors.append(data)
def handle_endtag(self, tag):
self.in_td = False
| compatibleone/accords-platform | pyaccords/pysrc/ec2instanceinfo.py | Python | apache-2.0 | 13,279 |
# Copyright 2015-2017 F-Secure
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from collections import namedtuple
from see.interfaces import Hook
from see.helpers import lookup_class
HookParameters = namedtuple('HookParameters', ('identifier',
'configuration',
'context'))
def hooks_factory(identifier, configuration, context):
"""
Returns the initialized hooks.
"""
manager = HookManager(identifier, configuration)
manager.load_hooks(context)
return manager
class HookManager(object):
"""
The Hooks Manager takes care the Hooks allocation, configuration
and deallocation.
"""
def __init__(self, identifier, configuration):
self.hooks = []
self.identifier = identifier
self.configuration = configuration
self.logger = logging.getLogger(
'%s.%s' % (self.__module__, self.__class__.__name__))
def load_hooks(self, context):
"""
Initializes the Hooks and loads them within the Environment.
"""
for hook in self.configuration.get('hooks', ()):
config = hook.get('configuration', {})
config.update(self.configuration.get('configuration', {}))
try:
self._load_hook(hook['name'], config, context)
except KeyError:
self.logger.exception('Provided hook has no name: %s.', hook)
def _load_hook(self, name, configuration, context):
self.logger.debug('Loading %s hook.', name)
try:
HookClass = lookup_hook_class(name)
hook = HookClass(HookParameters(self.identifier,
configuration,
context))
self.hooks.append(hook)
except Exception as error:
self.logger.exception('Hook %s initialization failure, error: %s.',
name, error)
def cleanup(self):
for hook in self.hooks:
try:
hook.cleanup()
except NotImplementedError:
pass
except Exception as error:
self.logger.exception('Hook %s cleanup error: %s.',
hook.__class__.__name__, error)
self.hooks = []
def lookup_hook_class(name):
HookClass = lookup_class(name)
if not issubclass(HookClass, Hook):
raise ValueError("%r is not subclass of of %r" % (HookClass, Hook))
else:
return HookClass
| F-Secure/see | see/hooks.py | Python | apache-2.0 | 3,119 |
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=attribute-defined-outside-init
import wlauto.core.signal as signal
from wlauto import Module
from wlauto.exceptions import DeviceError
class CpuidleState(object):
@property
def usage(self):
return self.get('usage')
@property
def time(self):
return self.get('time')
@property
def disable(self):
return self.get('disable')
@disable.setter
def disable(self, value):
self.set('disable', value)
@property
def ordinal(self):
i = len(self.id)
while self.id[i - 1].isdigit():
i -= 1
if not i:
raise ValueError('invalid idle state name: "{}"'.format(self.id))
return int(self.id[i:])
def __init__(self, device, path):
self.device = device
self.path = path
self.id = self.device.path.basename(self.path)
self.cpu = self.device.path.basename(self.device.path.dirname(path))
self.desc = self.get('desc')
self.name = self.get('name')
self.latency = self.get('latency')
self.power = self.get('power')
def get(self, prop):
property_path = self.device.path.join(self.path, prop)
return self.device.get_sysfile_value(property_path)
def set(self, prop, value):
property_path = self.device.path.join(self.path, prop)
self.device.set_sysfile_value(property_path, value)
def __eq__(self, other):
if isinstance(other, CpuidleState):
return (self.name == other.name) and (self.desc == other.desc)
elif isinstance(other, basestring):
return (self.name == other) or (self.desc == other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class Cpuidle(Module):
name = 'cpuidle'
description = """
Adds cpuidle state query and manupution APIs to a Device interface.
"""
capabilities = ['cpuidle']
root_path = '/sys/devices/system/cpu/cpuidle'
def probe(self, device):
return device.file_exists(self.root_path)
def initialize(self, context):
self.device = self.root_owner
signal.connect(self._on_device_init, signal.RUN_INIT, priority=1)
def get_cpuidle_driver(self):
return self.device.get_sysfile_value(self.device.path.join(self.root_path, 'current_driver')).strip()
def get_cpuidle_governor(self):
return self.device.get_sysfile_value(self.device.path.join(self.root_path, 'current_governor_ro')).strip()
def get_cpuidle_states(self, cpu=0):
if isinstance(cpu, int):
cpu = 'cpu{}'.format(cpu)
states_dir = self.device.path.join(self.device.path.dirname(self.root_path), cpu, 'cpuidle')
idle_states = []
for state in self.device.listdir(states_dir):
if state.startswith('state'):
idle_states.append(CpuidleState(self.device, self.device.path.join(states_dir, state)))
return idle_states
def _on_device_init(self, context): # pylint: disable=unused-argument
if not self.device.file_exists(self.root_path):
raise DeviceError('Device kernel does not appear to have cpuidle enabled.')
| freedomtan/workload-automation | wlauto/modules/cpuidle.py | Python | apache-2.0 | 3,815 |
'''
Integration Test Teardown case
@author: Youyk
'''
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.clean_util as clean_util
import zstackwoodpecker.test_lib as test_lib
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
def test():
clean_util.cleanup_all_vms_violently()
clean_util.cleanup_none_vm_volumes_violently()
clean_util.umount_all_primary_storages_violently()
clean_util.cleanup_backup_storage()
#linux.remove_vlan_eth("eth0", 10)
#linux.remove_vlan_eth("eth0", 11)
cmd = host_plugin.DeleteVlanDeviceCmd()
cmd.vlan_ethname = 'eth0.10'
hosts = test_lib.lib_get_all_hosts_from_plan()
if type(hosts) != type([]):
hosts = [hosts]
for host in hosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd)
cmd.vlan_ethname = 'eth0.11'
for host in hosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd)
test_lib.setup_plan.stop_node()
test_lib.lib_cleanup_host_ip_dict()
test_util.test_pass('VPC Teardown Success')
| zstackio/zstack-woodpecker | integrationtest/vm/vpc_ha/suite_teardown.py | Python | apache-2.0 | 1,332 |
"""passlib.bcrypt -- implementation of OpenBSD's BCrypt algorithm.
TODO:
* support 2x and altered-2a hashes?
http://www.openwall.com/lists/oss-security/2011/06/27/9
* deal with lack of PY3-compatibile c-ext implementation
"""
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement, absolute_import
# core
import os
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
try:
from bcrypt import hashpw as pybcrypt_hashpw
except ImportError: # pragma: no cover
pybcrypt_hashpw = None
try:
from bcryptor.engine import Engine as bcryptor_engine
except ImportError: # pragma: no cover
bcryptor_engine = None
# pkg
from passlib.exc import PasslibHashWarning
from passlib.utils import bcrypt64, safe_crypt, repeat_string, \
classproperty, rng, getrandstr, test_crypt
from passlib.utils.compat import bytes, b, u, uascii_to_str, unicode, str_to_uascii
import passlib.utils.handlers as uh
# local
__all__ = [
"bcrypt",
]
#=============================================================================
# support funcs & constants
#=============================================================================
_builtin_bcrypt = None
def _load_builtin():
global _builtin_bcrypt
if _builtin_bcrypt is None:
from passlib.utils._blowfish import raw_bcrypt as _builtin_bcrypt
IDENT_2 = u("$2$")
IDENT_2A = u("$2a$")
IDENT_2X = u("$2x$")
IDENT_2Y = u("$2y$")
_BNULL = b('\x00')
#=============================================================================
# handler
#=============================================================================
class bcrypt(uh.HasManyIdents, uh.HasRounds, uh.HasSalt, uh.HasManyBackends, uh.GenericHandler):
"""This class implements the BCrypt password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 22 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 12, must be between 4 and 31, inclusive.
This value is logarithmic, the actual number of iterations used will be :samp:`2**{rounds}`
-- increasing the rounds by +1 will double the amount of time taken.
:type ident: str
:param ident:
Specifies which version of the BCrypt algorithm will be used when creating a new hash.
Typically this option is not needed, as the default (``"2a"``) is usually the correct choice.
If specified, it must be one of the following:
* ``"2"`` - the first revision of BCrypt, which suffers from a minor security flaw and is generally not used anymore.
* ``"2a"`` - latest revision of the official BCrypt algorithm, and the current default.
* ``"2y"`` - format specific to the *crypt_blowfish* BCrypt implementation,
identical to ``"2a"`` in all but name.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
.. versionchanged:: 1.6
This class now supports ``"2y"`` hashes, and recognizes
(but does not support) the broken ``"2x"`` hashes.
(see the :ref:`crypt_blowfish bug <crypt-blowfish-bug>`
for details).
.. versionchanged:: 1.6
Added a pure-python backend.
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "bcrypt"
setting_kwds = ("salt", "rounds", "ident")
checksum_size = 31
checksum_chars = bcrypt64.charmap
#--HasManyIdents--
default_ident = u("$2a$")
ident_values = (u("$2$"), IDENT_2A, IDENT_2X, IDENT_2Y)
ident_aliases = {u("2"): u("$2$"), u("2a"): IDENT_2A, u("2y"): IDENT_2Y}
#--HasSalt--
min_salt_size = max_salt_size = 22
salt_chars = bcrypt64.charmap
# NOTE: 22nd salt char must be in bcrypt64._padinfo2[1], not full charmap
#--HasRounds--
default_rounds = 12 # current passlib default
min_rounds = 4 # bcrypt spec specified minimum
max_rounds = 31 # 32-bit integer limit (since real_rounds=1<<rounds)
rounds_cost = "log2"
#===================================================================
# formatting
#===================================================================
@classmethod
def from_string(cls, hash):
ident, tail = cls._parse_ident(hash)
if ident == IDENT_2X:
raise ValueError("crypt_blowfish's buggy '2x' hashes are not "
"currently supported")
rounds_str, data = tail.split(u("$"))
rounds = int(rounds_str)
if rounds_str != u('%02d') % (rounds,):
raise uh.exc.MalformedHashError(cls, "malformed cost field")
salt, chk = data[:22], data[22:]
return cls(
rounds=rounds,
salt=salt,
checksum=chk or None,
ident=ident,
)
def to_string(self):
hash = u("%s%02d$%s%s") % (self.ident, self.rounds, self.salt,
self.checksum or u(''))
return uascii_to_str(hash)
def _get_config(self, ident=None):
"internal helper to prepare config string for backends"
if ident is None:
ident = self.ident
if ident == IDENT_2Y:
ident = IDENT_2A
else:
assert ident != IDENT_2X
config = u("%s%02d$%s") % (ident, self.rounds, self.salt)
return uascii_to_str(config)
#===================================================================
# specialized salt generation - fixes passlib issue 25
#===================================================================
@classmethod
def _bind_needs_update(cls, **settings):
return cls._needs_update
@classmethod
def _needs_update(cls, hash, secret):
if isinstance(hash, bytes):
hash = hash.decode("ascii")
# check for incorrect padding bits (passlib issue 25)
if hash.startswith(IDENT_2A) and hash[28] not in bcrypt64._padinfo2[1]:
return True
# TODO: try to detect incorrect $2x$ hashes using *secret*
return False
@classmethod
def normhash(cls, hash):
"helper to normalize hash, correcting any bcrypt padding bits"
if cls.identify(hash):
return cls.from_string(hash).to_string()
else:
return hash
def _generate_salt(self, salt_size):
# override to correct generate salt bits
salt = super(bcrypt, self)._generate_salt(salt_size)
return bcrypt64.repair_unused(salt)
def _norm_salt(self, salt, **kwds):
salt = super(bcrypt, self)._norm_salt(salt, **kwds)
assert salt is not None, "HasSalt didn't generate new salt!"
changed, salt = bcrypt64.check_repair_unused(salt)
if changed:
# FIXME: if salt was provided by user, this message won't be
# correct. not sure if we want to throw error, or use different warning.
warn(
"encountered a bcrypt salt with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; see Passlib 1.5.3 changelog.",
PasslibHashWarning)
return salt
def _norm_checksum(self, checksum):
checksum = super(bcrypt, self)._norm_checksum(checksum)
if not checksum:
return None
changed, checksum = bcrypt64.check_repair_unused(checksum)
if changed:
warn(
"encountered a bcrypt hash with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; see Passlib 1.5.3 changelog.",
PasslibHashWarning)
return checksum
#===================================================================
# primary interface
#===================================================================
backends = ("pybcrypt", "bcryptor", "os_crypt", "builtin")
@classproperty
def _has_backend_pybcrypt(cls):
return pybcrypt_hashpw is not None
@classproperty
def _has_backend_bcryptor(cls):
return bcryptor_engine is not None
@classproperty
def _has_backend_builtin(cls):
if os.environ.get("PASSLIB_BUILTIN_BCRYPT") not in ["enable","enabled"]:
return False
# look at it cross-eyed, and it loads itself
_load_builtin()
return True
@classproperty
def _has_backend_os_crypt(cls):
# XXX: what to do if only h2 is supported? h1 is *very* rare.
h1 = '$2$04$......................1O4gOrCYaqBG3o/4LnT2ykQUt1wbyju'
h2 = '$2a$04$......................qiOQjkB8hxU8OzRhS.GhRMa4VUnkPty'
return test_crypt("test",h1) and test_crypt("test", h2)
@classmethod
def _no_backends_msg(cls):
return "no bcrypt backends available - please install py-bcrypt"
def _calc_checksum_os_crypt(self, secret):
config = self._get_config()
hash = safe_crypt(secret, config)
if hash:
assert hash.startswith(config) and len(hash) == len(config)+31
return hash[-31:]
else:
# NOTE: it's unlikely any other backend will be available,
# but checking before we bail, just in case.
for name in self.backends:
if name != "os_crypt" and self.has_backend(name):
func = getattr(self, "_calc_checksum_" + name)
return func(secret)
raise uh.exc.MissingBackendError(
"password can't be handled by os_crypt, "
"recommend installing py-bcrypt.",
)
def _calc_checksum_pybcrypt(self, secret):
# py-bcrypt behavior:
# py2: unicode secret/hash encoded as ascii bytes before use,
# bytes taken as-is; returns ascii bytes.
# py3: not supported (patch submitted)
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
raise uh.exc.NullPasswordError(self)
config = self._get_config()
hash = pybcrypt_hashpw(secret, config)
assert hash.startswith(config) and len(hash) == len(config)+31
return str_to_uascii(hash[-31:])
def _calc_checksum_bcryptor(self, secret):
# bcryptor behavior:
# py2: unicode secret/hash encoded as ascii bytes before use,
# bytes taken as-is; returns ascii bytes.
# py3: not supported
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
# NOTE: especially important to forbid NULLs for bcryptor,
# since it happily accepts them, and then silently truncates
# the password at first one it encounters :(
raise uh.exc.NullPasswordError(self)
if self.ident == IDENT_2:
# bcryptor doesn't support $2$ hashes; but we can fake $2$ behavior
# using the $2a$ algorithm, by repeating the password until
# it's at least 72 chars in length.
if secret:
secret = repeat_string(secret, 72)
config = self._get_config(IDENT_2A)
else:
config = self._get_config()
hash = bcryptor_engine(False).hash_key(secret, config)
assert hash.startswith(config) and len(hash) == len(config)+31
return str_to_uascii(hash[-31:])
def _calc_checksum_builtin(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
raise uh.exc.NullPasswordError(self)
chk = _builtin_bcrypt(secret, self.ident.strip("$"),
self.salt.encode("ascii"), self.rounds)
return chk.decode("ascii")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#=============================================================================
| ioram7/keystone-federado-pgid2013 | build/passlib/passlib/handlers/bcrypt.py | Python | apache-2.0 | 13,180 |
# Copyright (c) 2011 Intel Corporation
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The FilterScheduler is for creating shares.
You can customize this scheduler by specifying your own share Filters and
Weighing Functions.
"""
import operator
from manila import exception
from manila.openstack.common import importutils
from manila.openstack.common import log as logging
from manila.scheduler import driver
from manila.scheduler import scheduler_options
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
self.max_attempts = self._max_attempts()
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
"""
self._schedule(context, topic, *args, **kwargs)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def _post_select_populate_filter_properties(self, filter_properties,
host_state):
"""Add additional information to the filter properties after a host has
been selected by the scheduling process.
"""
# Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.host)
def _add_retry_host(self, filter_properties, host):
"""Add a retry entry for the selected volume backend. In the event that
the request gets re-scheduled, this entry will signal that the given
backend has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append(host)
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
msg = _("Invalid value for 'scheduler_max_attempts', "
"must be >=1")
raise exception.InvalidParameterValue(err=msg)
return max_attempts
def schedule_create_share(self, context, request_spec, filter_properties):
weighed_host = self._schedule_share(context,
request_spec,
filter_properties)
if not weighed_host:
raise exception.NoValidHost(reason="")
host = weighed_host.obj.host
share_id = request_spec['share_id']
snapshot_id = request_spec['snapshot_id']
updated_share = driver.share_update_db(context, share_id, host)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
self.share_rpcapi.create_share(context, updated_share, host,
request_spec=request_spec,
filter_properties=filter_properties,
snapshot_id=snapshot_id)
def _schedule_share(self, context, request_spec, filter_properties=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
share_properties = request_spec['share_properties']
# Since Manila is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
resource_properties = share_properties.copy()
share_type = request_spec.get("share_type", {})
resource_type = request_spec.get("share_type", {})
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
if filter_properties is None:
filter_properties = {}
self._populate_retry_share(filter_properties, resource_properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'share_type': share_type,
'resource_type': resource_type
})
self.populate_filter_properties_share(request_spec, filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
hosts = self.host_manager.get_all_host_states_share(elevated)
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
return None
LOG.debug(_("Filtered share %(hosts)s") % locals())
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
best_host = weighed_hosts[0]
LOG.debug(_("Choosing for share: %(best_host)s") % locals())
#NOTE(rushiagr): updating the available space parameters at same place
best_host.obj.consume_from_volume(share_properties)
return best_host
def _populate_retry_share(self, filter_properties, properties):
"""Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of share service hosts tried
}
filter_properties['retry'] = retry
share_id = properties.get('share_id')
self._log_share_error(share_id, retry)
if retry['num_attempts'] > max_attempts:
msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
"share %(share_id)s") % locals()
raise exception.NoValidHost(reason=msg)
def _log_share_error(self, share_id, retry):
"""If the request contained an exception from a previous share
create operation, log it to aid debugging.
"""
exc = retry.pop('exc', None) # string-ified exception from share
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host = hosts[-1]
msg = _("Error scheduling %(share_id)s from last share-service: "
"%(last_host)s : %(exc)s") % locals()
LOG.error(msg)
def populate_filter_properties_share(self, request_spec,
filter_properties):
"""Stuff things into filter_properties. Can be overridden in a
subclass to add more data.
"""
shr = request_spec['share_properties']
filter_properties['size'] = shr['size']
filter_properties['availability_zone'] = shr.get('availability_zone')
filter_properties['user_id'] = shr.get('user_id')
filter_properties['metadata'] = shr.get('metadata')
| tucbill/manila | manila/scheduler/filter_scheduler.py | Python | apache-2.0 | 8,816 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Mozilla Firefox history database plugin."""
import collections
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import firefox_history
from tests.parsers.sqlite_plugins import test_lib
class FirefoxHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox history database plugin."""
def testProcessPriorTo24(self):
"""Tests the Process function on a Firefox History database file."""
# This is probably version 23 but potentially an older version.
plugin = firefox_history.FirefoxHistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['places.sqlite'], plugin)
# The places.sqlite file contains 205 events (1 page visit,
# 2 x 91 bookmark records, 2 x 3 bookmark annotations,
# 2 x 8 bookmark folders).
# However there are three events that do not have a timestamp
# so the test file will show 202 extracted events.
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 202)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the first page visited event.
expected_event_values = {
'data_type': 'firefox:places:page_visited',
'date_time': '2011-07-01 11:16:21.371935',
'host': 'news.google.com',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': 'Google News',
'url': 'http://news.google.com/',
'visit_count': 1,
'visit_type': 2}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the first bookmark event.
expected_event_values = {
'data_type': 'firefox:places:bookmark',
'date_time': '2011-07-01 11:13:59.266344',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
# Check the second bookmark event.
expected_event_values = {
'data_type': 'firefox:places:bookmark',
'date_time': '2011-07-01 11:13:59.267198',
'places_title': (
'folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder=TOOLBAR&'
'sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation=livemark'
'%2FfeedURI&maxResults=10&queryType=1'),
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'title': 'Recently Bookmarked',
'type': 'URL',
'url': (
'place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder='
'TOOLBAR&sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation='
'livemark%2FfeedURI&maxResults=10&queryType=1'),
'visit_count': 0}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
# Check the first bookmark annotation event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_annotation',
'date_time': '2011-07-01 11:13:59.267146',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[183], expected_event_values)
# Check another bookmark annotation event.
expected_event_values = {
'content': 'RecentTags',
'data_type': 'firefox:places:bookmark_annotation',
'date_time': '2011-07-01 11:13:59.267605',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED,
'title': 'Recent Tags',
'url': 'place:sort=14&type=6&maxResults=10&queryType=1'}
self.CheckEventValues(storage_writer, events[184], expected_event_values)
# Check the second last bookmark folder event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_folder',
'date_time': '2011-03-21 10:05:01.553774',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[200], expected_event_values)
# Check the last bookmark folder event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_folder',
'date_time': '2011-07-01 11:14:11.766851',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'title': 'Latest Headlines'}
self.CheckEventValues(storage_writer, events[201], expected_event_values)
def testProcessVersion25(self):
"""Tests the Process function on a Firefox History database file v 25."""
plugin = firefox_history.FirefoxHistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['places_new.sqlite'], plugin)
# The places.sqlite file contains 84 events:
# 34 page visits.
# 28 bookmarks
# 14 bookmark folders
# 8 annotations
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 84)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
counter = collections.Counter()
for event in events:
event_data = self._GetEventDataOfEvent(storage_writer, event)
counter[event_data.data_type] += 1
self.assertEqual(counter['firefox:places:bookmark'], 28)
self.assertEqual(counter['firefox:places:page_visited'], 34)
self.assertEqual(counter['firefox:places:bookmark_folder'], 14)
self.assertEqual(counter['firefox:places:bookmark_annotation'], 8)
expected_event_values = {
'data_type': 'firefox:places:page_visited',
'date_time': '2013-10-30 21:57:11.281942',
'host': 'code.google.com',
'url': 'http://code.google.com/p/plaso',
'visit_count': 1,
'visit_type': 2}
self.CheckEventValues(storage_writer, events[10], expected_event_values)
if __name__ == '__main__':
unittest.main()
| log2timeline/plaso | tests/parsers/sqlite_plugins/firefox_history.py | Python | apache-2.0 | 6,326 |
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from .factory import toolkit_factory
myTextEditor = toolkit_factory("text_editor", "myTextEditor")
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/core/ui/text_editor.py | Python | apache-2.0 | 1,141 |
# Copyright 2015 Google Inc. All Rights Reserved.
"""Context manager to help with Control-C handling during critical commands."""
import signal
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.test.lib import exit_code
class CancellableTestSection(object):
"""Cancel a test matrix if CTRL-C is typed during a section of code.
While within this context manager, the CTRL-C signal is caught and a test
matrix is cancelled. This should only be used with a section of code where
the test matrix is running.
"""
def __init__(self, matrix_id, testing_api_helper):
self._old_handler = None
self._matrix_id = matrix_id
self._testing_api_helper = testing_api_helper
def __enter__(self):
self._old_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self._Handler)
return self
def __exit__(self, typ, value, traceback):
signal.signal(signal.SIGINT, self._old_handler)
return False
def _Handler(self, unused_signal, unused_frame):
log.status.write('\n\nCancelling test [{id}]...\n\n'
.format(id=self._matrix_id))
self._testing_api_helper.CancelTestMatrix(self._matrix_id)
raise exceptions.ExitCodeNoError(exit_code=exit_code.MATRIX_CANCELLED)
| wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/test/lib/ctrl_c_handler.py | Python | apache-2.0 | 1,298 |
#!/usr/bin/env python
import sys, random
if len(sys.argv) != 3:
sys.stderr.write("Must provide file with list of filenames and number of files to pick\n")
sys.exit(1)
file_list = open(sys.argv[1])
file_array = []
for filepath in file_list:
file_array.append(filepath.strip())
try:
choices = int(sys.argv[2])
except:
sys.stderr.write("Can't get the number of files to pick\n")
sys.exit(1)
for i in range(choices):
sys.stdout.write("%s\n" % random.choice(file_array))
| DHTC-Tools/parrot-benchmark-tools | Root/parrot-root/get_file_list.py | Python | apache-2.0 | 494 |
from __future__ import division
import json
import os
import copy
import collections
import argparse
import csv
import neuroglancer
import neuroglancer.cli
import numpy as np
class State(object):
def __init__(self, path):
self.path = path
self.body_labels = collections.OrderedDict()
def load(self):
if os.path.exists(self.path):
with open(self.path, 'r') as f:
self.body_labels = collections.OrderedDict(json.load(f))
def save(self):
tmp_path = self.path + '.tmp'
with open(tmp_path, 'w') as f:
f.write(json.dumps(self.body_labels.items()))
os.rename(tmp_path, self.path)
Body = collections.namedtuple('Body', ['segment_id', 'num_voxels', 'bbox_start', 'bbox_size'])
class Tool(object):
def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
self.state = State(state_path)
self.num_to_prefetch = num_to_prefetch
self.viewer = neuroglancer.Viewer()
self.bodies = bodies
self.state.load()
self.total_voxels = sum(x.num_voxels for x in bodies)
self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])
with self.viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
s.show_slices = False
s.concurrent_downloads = 256
s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
s.layout = '3d'
key_bindings = [
['bracketleft', 'prev-index'],
['bracketright', 'next-index'],
['home', 'first-index'],
['end', 'last-index'],
['control+keys', 'save'],
]
label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
for label, label_key in zip(labels, label_keys):
key_bindings.append([label_key, 'label-%s' % label])
def label_func(s, label=label):
self.set_label(s, label)
self.viewer.actions.add('label-%s' % label, label_func)
self.viewer.actions.add('prev-index', self._prev_index)
self.viewer.actions.add('next-index', self._next_index)
self.viewer.actions.add('first-index', self._first_index)
self.viewer.actions.add('last-index', self._last_index)
self.viewer.actions.add('save', self.save)
with self.viewer.config_state.txn() as s:
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
for key, command in key_bindings))
self.index = -1
self.set_index(self._find_one_after_last_labeled_index())
def _find_one_after_last_labeled_index(self):
body_index = 0
while self.bodies[body_index].segment_id in self.state.body_labels:
body_index += 1
return body_index
def set_index(self, index):
if index == self.index:
return
body = self.bodies[index]
self.index = index
def modify_state_for_body(s, body):
s.layers['segmentation'].segments = frozenset([body.segment_id])
s.voxel_coordinates = body.bbox_start + body.bbox_size // 2
with self.viewer.txn() as s:
modify_state_for_body(s, body)
prefetch_states = []
for i in range(self.num_to_prefetch):
prefetch_index = self.index + i + 1
if prefetch_index >= len(self.bodies):
break
prefetch_state = copy.deepcopy(self.viewer.state)
prefetch_state.layout = '3d'
modify_state_for_body(prefetch_state, self.bodies[prefetch_index])
prefetch_states.append(prefetch_state)
with self.viewer.config_state.txn() as s:
s.prefetch = [
neuroglancer.PrefetchState(state=prefetch_state, priority=-i)
for i, prefetch_state in enumerate(prefetch_states)
]
label = self.state.body_labels.get(body.segment_id, '')
with self.viewer.config_state.txn() as s:
s.status_messages['status'] = (
'[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s' %
(index, len(self.bodies), self.cumulative_voxels[index], self.total_voxels,
self.cumulative_voxels[index] / self.total_voxels, label))
def save(self, s):
self.state.save()
def set_label(self, s, label):
self.state.body_labels[self.bodies[self.index].segment_id] = label
self.set_index(self.index + 1)
def _first_index(self, s):
self.set_index(0)
def _last_index(self, s):
self.set_index(max(0, self._find_one_after_last_labeled_index() - 1))
def _next_index(self, s):
self.set_index(self.index + 1)
def _prev_index(self, s):
self.set_index(max(0, self.index - 1))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
ap.add_argument('--image-url', required=True, help='Neuroglancer data source URL for image')
ap.add_argument('--segmentation-url',
required=True,
help='Neuroglancer data source URL for segmentation')
ap.add_argument('--state', required=True, help='Path to proofreading state file')
ap.add_argument('--bodies', required=True, help='Path to list of bodies to proofread')
ap.add_argument('--labels', nargs='+', help='Labels to use')
ap.add_argument('--prefetch', type=int, default=10, help='Number of bodies to prefetch')
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
bodies = []
with open(args.bodies, 'r') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
bodies.append(
Body(
segment_id=int(row['id']),
num_voxels=int(row['num_voxels']),
bbox_start=np.array([
int(row['bbox.start.x']),
int(row['bbox.start.y']),
int(row['bbox.start.z'])
],
dtype=np.int64),
bbox_size=np.array(
[int(row['bbox.size.x']),
int(row['bbox.size.y']),
int(row['bbox.size.z'])],
dtype=np.int64),
))
tool = Tool(
state_path=args.state,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
labels=args.labels,
bodies=bodies,
num_to_prefetch=args.prefetch,
)
print(tool.viewer)
| janelia-flyem/neuroglancer | python/neuroglancer/tool/filter_bodies.py | Python | apache-2.0 | 6,932 |
"""
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
from .StrStrHashMap import *
from ..msg.Field import *
from ..msg.ImportExportHelper import *
from ..msg.StructValue import *
from ..msg.Type import *
from ..msg.ValueFactory import *
from ..support.Class2TypeMap import *
from ..support.Validator_object import *
class StrStrHashMapSerializer(ImportExportHelper):
"""
etch serializer for StrStrHashMap
"""
FIELD_NAME = "keysAndValues"
@classmethod
def init(cls, typ, class2type):
"""
Defines custom fields in the value factory so that the importer can find them
@param typ
@param class2type
"""
field = typ.getField(cls.FIELD_NAME)
class2type.put( StrStrHashMap , typ )
typ.setComponentType( StrStrHashMap )
typ.setImportExportHelper( StrStrHashMapSerializer(typ, field))
typ.putValidator(field, Validator_object.get(1))
typ.lock()
def __init__(self, typ, field):
self.__type = typ
self.__field = field
def importHelper(self, struct):
m = StrStrHashMap()
keysAndValues = struct.get(self.__field)
for i in range(0, len(keysAndValues), 2):
m[keysAndValues[i]] = keysAndValues[i+1]
return m
def exportValue(self, vf, value):
m = StrStrHashMap(value)
keysAndValues = []
for i in m.keys():
keysAndValues.append(i)
keysAndValues.append(m[i])
struct = StructValue(self.__type, vf)
struct.put(self.__field, keysAndValues)
return struct
| OBIGOGIT/etch | binding-python/runtime/src/main/python/etch/binding/util/StrStrHashMapSerializer.py | Python | apache-2.0 | 2,642 |
"""
Support for MQTT vacuums.
For more details about this platform, please refer to the documentation at
https://www.home-assistant.io/components/vacuum.mqtt/
"""
import logging
import voluptuous as vol
from homeassistant.components.vacuum import DOMAIN
from homeassistant.components.mqtt import ATTR_DISCOVERY_HASH
from homeassistant.components.mqtt.discovery import (
MQTT_DISCOVERY_NEW,
clear_discovery_hash,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .schema import CONF_SCHEMA, LEGACY, STATE, MQTT_VACUUM_SCHEMA
from .schema_legacy import PLATFORM_SCHEMA_LEGACY, async_setup_entity_legacy
from .schema_state import PLATFORM_SCHEMA_STATE, async_setup_entity_state
_LOGGER = logging.getLogger(__name__)
def validate_mqtt_vacuum(value):
"""Validate MQTT vacuum schema."""
schemas = {LEGACY: PLATFORM_SCHEMA_LEGACY, STATE: PLATFORM_SCHEMA_STATE}
return schemas[value[CONF_SCHEMA]](value)
PLATFORM_SCHEMA = vol.All(
MQTT_VACUUM_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA), validate_mqtt_vacuum
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up MQTT vacuum through configuration.yaml."""
await _async_setup_entity(config, async_add_entities, discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT vacuum dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT vacuum."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
config, async_add_entities, config_entry, discovery_hash
)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
config, async_add_entities, config_entry, discovery_hash=None
):
"""Set up the MQTT vacuum."""
setup_entity = {LEGACY: async_setup_entity_legacy, STATE: async_setup_entity_state}
await setup_entity[config[CONF_SCHEMA]](
config, async_add_entities, config_entry, discovery_hash
)
| joopert/home-assistant | homeassistant/components/mqtt/vacuum/__init__.py | Python | apache-2.0 | 2,361 |
import json
from idpproxy.social.oauth import OAuth
import oauth2 as oauth
#from xml.etree import ElementTree as ET
import logging
logger = logging.getLogger(__name__)
__author__ = 'rohe0002'
class LinkedIn(OAuth):
def __init__(self, client_id, client_secret, **kwargs):
OAuth.__init__(self, client_id, client_secret, **kwargs)
def get_profile(self, info_set):
token = oauth.Token(key=info_set["oauth_token"][0],
secret=info_set["oauth_token_secret"][0])
client = oauth.Client(self.consumer, token)
resp, content = client.request(self.extra["userinfo_endpoint"], "GET")
# # content in XML :-(
# logger.debug("UserInfo XML: %s" % content)
# res = {}
# root = ET.fromstring(content)
# for child in root:
# res[child.tag] = child.text
res = json.loads(content)
logger.debug("userinfo: %s" % res)
res["user_id"] = info_set["oauth_token"]
return resp, res | rohe/IdPproxy | src/idpproxy/social/linkedin/__init__.py | Python | bsd-2-clause | 1,002 |
import angr
class InterlockedExchange(angr.SimProcedure):
def run(self, target, value): #pylint:disable=arguments-differ
if not self.state.solver.symbolic(target):
old_value = self.state.memory.load(target, 4, endness=self.state.arch.memory_endness)
self.state.memory.store(target, value)
else:
old_value = self.state.solver.Unconstrained("unconstrained_ret_%s" % self.display_name, self.state.arch.bits, key=('api', 'InterlockedExchange'))
return old_value
| angr/angr | angr/procedures/win32/InterlockedExchange.py | Python | bsd-2-clause | 525 |
# -*- coding: UTF-8 -*-
# Copyright 2019-2021 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
import subprocess
from rstgen.utils import confirm
from django.core.management.base import BaseCommand
from django.conf import settings
def runcmd(cmd, **kw): # same code as in getlino.py
"""Run the cmd similar as os.system(), but stop when Ctrl-C."""
# kw.update(stdout=subprocess.PIPE)
# kw.update(stderr=subprocess.STDOUT)
kw.update(shell=True)
kw.update(universal_newlines=True)
kw.update(check=True)
# subprocess.check_output(cmd, **kw)
subprocess.run(cmd, **kw)
# os.system(cmd)
class Command(BaseCommand):
help = "Run 'pip install --upgrade' for all Python packages required by this site."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('--noinput', action='store_false',
dest='interactive', default=True,
help='Do not prompt for input of any kind.')
parser.add_argument('-l', '--list', action='store_true',
dest='list', default=False,
help="Just list the requirements, don't install them.")
def handle(self, *args, **options):
reqs = set(settings.SITE.get_requirements())
if len(reqs) == 0:
print("No requirements")
else:
reqs = sorted(reqs)
if options['list']:
print('\n'.join(reqs))
return
runcmd('pip install --upgrade pip')
# cmd = "pip install --upgrade --trusted-host svn.forge.pallavi.be {}".format(' '.join(reqs))
cmd = "pip install --upgrade {}".format(' '.join(reqs))
if not options['interactive'] or confirm("{} (y/n) ?".format(cmd)):
runcmd(cmd)
| lino-framework/lino | lino/management/commands/install.py | Python | bsd-2-clause | 1,888 |
import shutil
import json
from rest_framework import routers, serializers, viewsets, parsers, filters
from rest_framework.views import APIView
from rest_framework.exceptions import APIException
from rest_framework.response import Response
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile, InMemoryUploadedFile
from django.core.validators import URLValidator
from base.models import Project, SeedsList
from apps.crawl_space.models import Crawl, CrawlModel
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError, NotFoundError
class DataWakeIndexUnavailable(APIException):
status_code = 404
default_detail = "The server failed to find the DataWake index in elasticsearch."
class SlugModelSerializer(serializers.ModelSerializer):
slug = serializers.SlugField(required=False, read_only=True)
class ProjectSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
class Meta:
model = Project
class CrawlSerializer(SlugModelSerializer):
# Expose these fields, but only as read only.
id = serializers.ReadOnlyField()
seeds_list = serializers.FileField(read_only=True, use_url=False)
status = serializers.CharField(read_only=True)
config = serializers.CharField(read_only=True)
index_name = serializers.CharField(read_only=True)
url = serializers.CharField(read_only=True)
pages_crawled = serializers.IntegerField(read_only=True)
harvest_rate = serializers.FloatField(read_only=True)
location = serializers.CharField(read_only=True)
def validate_crawler(self, value):
if value == "ache" and not self.initial_data.get("crawl_model"):
raise serializers.ValidationError("Ache crawls require a Crawl Model.")
return value
class Meta:
model = Crawl
class CrawlModelSerializer(SlugModelSerializer):
model = serializers.FileField(use_url=False)
features = serializers.FileField(use_url=False)
url = serializers.CharField(read_only=True)
def validate_model(self, value):
if value.name != "pageclassifier.model":
raise serializers.ValidationError("File must be named pageclassifier.model")
return value
def validate_features(self, value):
if value.name != "pageclassifier.features":
raise serializers.ValidationError("File must be named pageclassifier.features")
return value
class Meta:
model = CrawlModel
class SeedsListSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
file_string = serializers.CharField(read_only=True)
def validate_seeds(self, value):
try:
seeds = json.loads(value)
except ValueError:
raise serializers.ValidationError("Seeds must be a JSON encoded string.")
if type(seeds) != list:
raise serializers.ValidationError("Seeds must be an array of URLs.")
validator = URLValidator()
errors = []
for index, x in enumerate(seeds):
try:
validator(x)
except ValidationError:
# Add index to make it easier for CodeMirror to select the right
# line.
errors.append({index: x})
if errors:
errors.insert(0, "The seeds list contains invalid urls.")
errors.append({"list": "\n".join(seeds)})
raise serializers.ValidationError(errors)
return value
class Meta:
model = SeedsList
"""
Viewset Classes.
Filtering is provided by django-filter.
Backend settings are in common_settings.py under REST_FRAMEWORK. Setting is:
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
This backend is supplied to every viewset by default. Alter query fields by adding
or removing items from filter_fields
"""
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
filter_fields = ('id', 'slug', 'name',)
class CrawlViewSet(viewsets.ModelViewSet):
queryset = Crawl.objects.all()
serializer_class = CrawlSerializer
filter_fields = ('id', 'slug', 'name', 'description', 'status', 'project',
'crawl_model', 'crawler', 'seeds_object')
class CrawlModelViewSet(viewsets.ModelViewSet):
queryset = CrawlModel.objects.all()
serializer_class = CrawlModelSerializer
filter_fields = ('id', 'slug', 'name', 'project',)
def destroy(self, request, pk=None):
model = CrawlModel.objects.get(pk=pk)
crawls = Crawl.objects.all().filter(crawl_model=pk)
if crawls:
message = "The Crawl Model is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({
"message": message,
"errors": [x.name for x in crawls],
})
else:
shutil.rmtree(model.get_model_path())
return super(CrawlModelViewSet, self).destroy(request)
class SeedsListViewSet(viewsets.ModelViewSet):
queryset = SeedsList.objects.all()
serializer_class = SeedsListSerializer
filter_fields = ('id', 'name', 'seeds', 'slug',)
def create(self, request):
# If a seeds file or a textseeds exists, then use those. Otherwise, look
# for a string in request.data["seeds"]
seeds_list = request.FILES.get("seeds", False)
textseeds = request.data.get("textseeds", False)
if seeds_list:
request.data["seeds"] = json.dumps(map(str.strip, seeds_list.readlines()))
elif textseeds:
if type(textseeds) == unicode:
request.data["seeds"] = json.dumps(map(unicode.strip, textseeds.split("\n")))
# Get rid of carriage return character.
elif type(textseeds) == str:
request.data["seeds"] = json.dumps(map(str.strip, textseeds.split("\n")))
return super(SeedsListViewSet, self).create(request)
def destroy(self, request, pk=None):
seeds = SeedsList.objects.get(pk=pk)
crawls = Crawl.objects.all().filter(seeds_object=pk)
if crawls:
message = "The Seeds List is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({
"message": message,
"errors": [x.name for x in crawls],
})
else:
return super(SeedsListViewSet, self).destroy(request)
class DataWakeView(APIView):
index = "datawake"
es = Elasticsearch()
def create_trails(self, trail_ids):
trails = []
for x in trail_ids:
url_search = self.es.search(index=self.index, q="trail_id:%d" % x,
fields="url", size=1000)["hits"]["hits"]
new_trail = {"trail_id": x, "urls": [], "domain_name":url_search[0]["_type"]}
for y in url_search:
new_trail["urls"].append(y["fields"]["url"][0])
new_trail.update({"urls_string": "\n".join(new_trail["urls"])})
trails.append(new_trail)
return trails
def get(self, request, format=None):
# TODO: catch all exception. At the very least, deal with 404 not found and
# connection refused exceptions.
# Temporarily remove exceptions for debugging.
try:
trail_ids = [x["key"] for x in self.es.search(index=self.index, body={
"aggs" : {
"trail_id" : {
"terms" : { "field" : "trail_id" }
}
}
})["aggregations"]["trail_id"]["buckets"]]
response = self.create_trails(trail_ids)
except ConnectionError as e:
raise OSError("Failed to connect to local elasticsearch instance.")
except NotFoundError:
raise DataWakeIndexUnavailable
return Response(response)
router = routers.DefaultRouter()
router.register(r"projects", ProjectViewSet)
router.register(r"crawls", CrawlViewSet)
router.register(r"crawl_models", CrawlModelViewSet)
router.register(r"seeds_list", SeedsListViewSet)
| memex-explorer/memex-explorer | source/memex/rest.py | Python | bsd-2-clause | 8,218 |
from . import numeric as _nx
from .numeric import asanyarray, newaxis
def atleast_1d(*arys):
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1)
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1, 1)
elif len(ary.shape) == 1 :
result = ary[newaxis,:]
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
def hstack(tup):
arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
| FilipeMaia/afnumpy | afnumpy/core/shape_base.py | Python | bsd-2-clause | 1,076 |
#!/usr/bin/env python
# Author: Timm Linder, [email protected]
#
# Publishes fake tracked persons and the corresponding detections (if not occluded) at
# /spencer/perception/tracked_persons and /spencer/perception/detected_persons.
import rospy, yaml, tf
from spencer_tracking_msgs.msg import TrackedPersons, TrackedPerson
from nav_msgs.msg import GridCells
from math import cos, sin, tan, pi, radians
def createTrackedPerson(track_id, x, y, theta):
trackedPerson = TrackedPerson()
theta = radians(theta) + pi/2.0
trackedPerson.track_id = track_id
quaternion = tf.transformations.quaternion_from_euler(0, 0, theta)
trackedPerson.pose.pose.position.x = x
trackedPerson.pose.pose.position.y = y
trackedPerson.pose.pose.orientation.x = quaternion[0]
trackedPerson.pose.pose.orientation.y = quaternion[1]
trackedPerson.pose.pose.orientation.z = quaternion[2]
trackedPerson.pose.pose.orientation.w = quaternion[3]
trackedPerson.pose.covariance[0 + 0 * 6] = 0.001 # x
trackedPerson.pose.covariance[1 + 1 * 6] = 0.001 # y
trackedPerson.pose.covariance[2 + 2 * 6] = 999999 # z
trackedPerson.pose.covariance[3 + 3 * 6] = 999999 # x rotation
trackedPerson.pose.covariance[4 + 5 * 6] = 999999 # y rotation
trackedPerson.pose.covariance[4 + 5 * 6] = 999999 # z rotation
trackedPerson.twist.twist.linear.x = cos(theta)
trackedPerson.twist.twist.linear.y = sin(theta)
for i in range(0, 3):
trackedPerson.twist.covariance[i + i * 6] = 1.0 # linear velocity
for i in range(3, 6):
trackedPerson.twist.covariance[i + i * 6] = float("inf") # rotational velocity
return trackedPerson
def main():
# Main code
trackPublisher = rospy.Publisher('/spencer/perception/tracked_persons', TrackedPersons )
#obstaclesPublisher = rospy.Publisher('/pedsim/static_obstacles', GridCells )
rospy.init_node( 'mock_tracked_persons' )
rate = rospy.Rate(10)
#obstacles = yaml.load(OBSTACLE_YAML)
#obstacles = [ d for d in obstacles]
seqCounter = 0
while not rospy.is_shutdown():
trackedPersons = TrackedPersons()
trackedPersons.header.seq = seqCounter
trackedPersons.header.frame_id = "odom"
trackedPersons.header.stamp = rospy.Time.now()
#trackedPersons.tracks.append( createTrackedPerson( trackId, x, y, theta ) )
trackedPersons.tracks.append( createTrackedPerson( 01, 5, 4, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 02, 6, 5.45878, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 03, 7.22, 5.70, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 04, 2+7.22, 7.33, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 05, 2+8.92, 8.42, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 06, 2+7.92, 10.41, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 07, 2+7.2, 9.44, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 8, 2+7, 14-2, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 9, 2+6, 15.4123-2, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 10, 5-1, 18.595-5, 280 ) )
trackedPersons.tracks.append( createTrackedPerson( 11, 5-1, 20-5, 270 ) )
trackedPersons.tracks.append( createTrackedPerson( 12, 6-1, 21.5491-5, 240 ) )
trackedPersons.tracks.append( createTrackedPerson( 13, 7.48044-1, 19-5, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 14, 6, 24.5463, 45 ) )
trackedPersons.tracks.append( createTrackedPerson( 15, 8, 28, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 16, 10.4458, 23, 68 ) )
trackedPersons.tracks.append( createTrackedPerson( 17, 11.5004, 27, 88 ) )
trackedPersons.tracks.append( createTrackedPerson( 18, 14, 25.4389, 20 ) )
trackedPersons.tracks.append( createTrackedPerson( 19, 15, 21, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 20, 15, 22.4308, 92 ) )
trackedPersons.tracks.append( createTrackedPerson( 21, 15.4676, 24, 91 ) )
trackedPersons.tracks.append( createTrackedPerson( 22, 16.5423, 25.4178, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 23, 18, 20, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 24, 18.5532, 21.5011, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 25, 15.4739, 16.5314, 45 ) )
trackedPersons.tracks.append( createTrackedPerson( 26, 20, 25.5746, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 27, 21.5327, 24, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 28, 22, 26.4632, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 29, 21, 18, 45 ) )
trackedPersons.tracks.append( createTrackedPerson( 30, 23, 20.4335, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 31, 23.4972, 21.4055, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 32, 23.4025, 22.4749, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 33, 24.5281, 18.5868, 54 ) )
trackedPersons.tracks.append( createTrackedPerson( 34, 16.554, 3.40568-2, 94 ) )
trackedPersons.tracks.append( createTrackedPerson( 35, 16, 6-1, 94 ) )
trackedPersons.tracks.append( createTrackedPerson( 36, 20, 4, 0 ) )
trackedPersons.tracks.append( createTrackedPerson( 37, 19, 12, 25 ) )
trackedPersons.tracks.append( createTrackedPerson( 38, 23, 8, 50 ) )
trackedPersons.tracks.append( createTrackedPerson( 39, 24, 10, 90 ) )
trackedPersons.tracks.append( createTrackedPerson( 40, 25, 12, 120 ) )
trackedPersons.tracks.append( createTrackedPerson( 41, 7.51, 22.41, 80 ) )
trackedPersons.tracks.append( createTrackedPerson( 42, 8.21, 25.7, 81 ) )
trackedPersons.tracks.append( createTrackedPerson( 43, 3.31, 27.7, 81 ) )
trackedPersons.tracks.append( createTrackedPerson( 44, 11.421, 18.7, 75 ) )
trackedPersons.tracks.append( createTrackedPerson( 45, 25.21, 27.0, 85 ) )
trackedPersons.tracks.append( createTrackedPerson( 46, 18.23, 6.87, -91 ) )
trackedPersons.tracks.append( createTrackedPerson( 47, 18.6, 8.90, -90 ) )
trackedPersons.tracks.append( createTrackedPerson( 48, 20.4, 7.87, 85 ) )
trackedPersons.tracks.append( createTrackedPerson( 49, 15.684, 10.74, 75 ) )
trackedPersons.tracks.append( createTrackedPerson( 50, 15.72,14.51 , 70 ) )
trackPublisher.publish( trackedPersons )
#obstacles['header'] = trackedPersons.header
#obstaclesPublisher.publish( obstacles )
seqCounter += 1
rate.sleep()
# Constants
OBSTACLE_YAML= """
header:
seq: 48860
stamp:
secs: 0
nsecs: 0
frame_id: world
cell_width: 1.0
cell_height: 1.0
cells:
-
x: -0.5
y: -0.5
z: 0.0
-
x: 0.5
y: -0.5
z: 0.0
-
x: 1.5
y: -0.5
z: 0.0
-
x: 2.5
y: -0.5
z: 0.0
-
x: 3.5
y: -0.5
z: 0.0
-
x: 4.5
y: -0.5
z: 0.0
-
x: 5.5
y: -0.5
z: 0.0
-
x: 6.5
y: -0.5
z: 0.0
-
x: 7.5
y: -0.5
z: 0.0
-
x: 8.5
y: -0.5
z: 0.0
-
x: 9.5
y: -0.5
z: 0.0
-
x: 10.5
y: -0.5
z: 0.0
-
x: 11.5
y: -0.5
z: 0.0
-
x: 12.5
y: -0.5
z: 0.0
-
x: 13.5
y: -0.5
z: 0.0
-
x: 14.5
y: -0.5
z: 0.0
-
x: 15.5
y: -0.5
z: 0.0
-
x: 16.5
y: -0.5
z: 0.0
-
x: 17.5
y: -0.5
z: 0.0
-
x: 18.5
y: -0.5
z: 0.0
-
x: 19.5
y: -0.5
z: 0.0
-
x: 20.5
y: -0.5
z: 0.0
-
x: 21.5
y: -0.5
z: 0.0
-
x: 22.5
y: -0.5
z: 0.0
-
x: 23.5
y: -0.5
z: 0.0
-
x: 24.5
y: -0.5
z: 0.0
-
x: 25.5
y: -0.5
z: 0.0
-
x: 26.5
y: -0.5
z: 0.0
-
x: 27.5
y: -0.5
z: 0.0
-
x: -0.5
y: -0.5
z: 0.0
-
x: -0.5
y: 0.5
z: 0.0
-
x: -0.5
y: 1.5
z: 0.0
-
x: -0.5
y: 2.5
z: 0.0
-
x: -0.5
y: 3.5
z: 0.0
-
x: -0.5
y: 4.5
z: 0.0
-
x: -0.5
y: 5.5
z: 0.0
-
x: -0.5
y: 6.5
z: 0.0
-
x: -0.5
y: 7.5
z: 0.0
-
x: -0.5
y: 8.5
z: 0.0
-
x: -0.5
y: 9.5
z: 0.0
-
x: -0.5
y: 10.5
z: 0.0
-
x: -0.5
y: 11.5
z: 0.0
-
x: -0.5
y: 12.5
z: 0.0
-
x: -0.5
y: 13.5
z: 0.0
-
x: -0.5
y: 14.5
z: 0.0
-
x: -0.5
y: 15.5
z: 0.0
-
x: -0.5
y: 16.5
z: 0.0
-
x: -0.5
y: 17.5
z: 0.0
-
x: -0.5
y: 18.5
z: 0.0
-
x: -0.5
y: 19.5
z: 0.0
-
x: -0.5
y: 20.5
z: 0.0
-
x: -0.5
y: 21.5
z: 0.0
-
x: -0.5
y: 22.5
z: 0.0
-
x: -0.5
y: 23.5
z: 0.0
-
x: -0.5
y: 24.5
z: 0.0
-
x: -0.5
y: 25.5
z: 0.0
-
x: -0.5
y: 26.5
z: 0.0
-
x: -0.5
y: 27.5
z: 0.0
-
x: -0.5
y: 28.5
z: 0.0
-
x: -0.5
y: 29.5
z: 0.0
-
x: -0.5
y: 30.5
z: 0.0
-
x: -0.5
y: 31.5
z: 0.0
-
x: -0.5
y: 31.5
z: 0.0
-
x: 0.5
y: 31.5
z: 0.0
-
x: 1.5
y: 31.5
z: 0.0
-
x: 2.5
y: 31.5
z: 0.0
-
x: 3.5
y: 31.5
z: 0.0
-
x: 4.5
y: 31.5
z: 0.0
-
x: 5.5
y: 31.5
z: 0.0
-
x: 6.5
y: 31.5
z: 0.0
-
x: 7.5
y: 31.5
z: 0.0
-
x: 8.5
y: 31.5
z: 0.0
-
x: 9.5
y: 31.5
z: 0.0
-
x: 10.5
y: 31.5
z: 0.0
-
x: 11.5
y: 31.5
z: 0.0
-
x: 12.5
y: 31.5
z: 0.0
-
x: 13.5
y: 31.5
z: 0.0
-
x: 14.5
y: 31.5
z: 0.0
-
x: 15.5
y: 31.5
z: 0.0
-
x: 16.5
y: 31.5
z: 0.0
-
x: 17.5
y: 31.5
z: 0.0
-
x: 18.5
y: 31.5
z: 0.0
-
x: 19.5
y: 31.5
z: 0.0
-
x: 20.5
y: 31.5
z: 0.0
-
x: 21.5
y: 31.5
z: 0.0
-
x: 22.5
y: 31.5
z: 0.0
-
x: 23.5
y: 31.5
z: 0.0
-
x: 24.5
y: 31.5
z: 0.0
-
x: 25.5
y: 31.5
z: 0.0
-
x: 26.5
y: 31.5
z: 0.0
-
x: 27.5
y: 31.5
z: 0.0
-
x: 27.5
y: -0.5
z: 0.0
-
x: 27.5
y: 0.5
z: 0.0
-
x: 27.5
y: 1.5
z: 0.0
-
x: 27.5
y: 2.5
z: 0.0
-
x: 27.5
y: 3.5
z: 0.0
-
x: 27.5
y: 4.5
z: 0.0
-
x: 27.5
y: 5.5
z: 0.0
-
x: 27.5
y: 6.5
z: 0.0
-
x: 27.5
y: 7.5
z: 0.0
-
x: 27.5
y: 8.5
z: 0.0
-
x: 27.5
y: 9.5
z: 0.0
-
x: 27.5
y: 10.5
z: 0.0
-
x: 27.5
y: 11.5
z: 0.0
-
x: 27.5
y: 12.5
z: 0.0
-
x: 27.5
y: 13.5
z: 0.0
-
x: 27.5
y: 14.5
z: 0.0
-
x: 27.5
y: 15.5
z: 0.0
-
x: 27.5
y: 16.5
z: 0.0
-
x: 27.5
y: 17.5
z: 0.0
-
x: 27.5
y: 18.5
z: 0.0
-
x: 27.5
y: 19.5
z: 0.0
-
x: 27.5
y: 20.5
z: 0.0
-
x: 27.5
y: 21.5
z: 0.0
-
x: 27.5
y: 22.5
z: 0.0
-
x: 27.5
y: 23.5
z: 0.0
-
x: 27.5
y: 24.5
z: 0.0
-
x: 27.5
y: 25.5
z: 0.0
-
x: 27.5
y: 26.5
z: 0.0
-
x: 27.5
y: 27.5
z: 0.0
-
x: 27.5
y: 28.5
z: 0.0
-
x: 27.5
y: 29.5
z: 0.0
-
x: 27.5
y: 30.5
z: 0.0
-
x: 27.5
y: 31.5
z: 0.0
-
x: 26.5
y: 3.5
z: 0.0
-
x: 26.5
y: 4.5
z: 0.0
-
x: 26.5
y: 5.5
z: 0.0
-
x: 26.5
y: 6.5
z: 0.0
-
x: 26.5
y: 7.5
z: 0.0
-
x: 26.5
y: 9.5
z: 0.0
-
x: 26.5
y: 10.5
z: 0.0
-
x: 26.5
y: 11.5
z: 0.0
-
x: 26.5
y: 12.5
z: 0.0
-
x: 26.5
y: 13.5
z: 0.0
"""
if __name__ == '__main__':
main()
| pirobot/pedsim_ros | pedsim_simulator/scripts/mocktracks_rss_scenario_one.py | Python | bsd-2-clause | 12,091 |
"""
Implements a simple, robust, safe, Messenger class that allows one to
register callbacks for a signal/slot (or event/handler) kind of
messaging system. One can basically register a callback
function/method to be called when an object sends a particular event.
The Messenger class is Borg. So it is easy to instantiate and use.
This module is also reload-safe, so if the module is reloaded the
callback information is not lost. Method callbacks do not have a
reference counting problem since weak references are used.
The main functionality of this module is provided by three functions,
`connect`, `disconnect` and `send`.
Here is example usage with VTK::
>>> import messenger, vtk
>>> def cb(obj, evt):
... print obj.__class__.__name__, evt
...
>>> o = vtk.vtkProperty()
>>> o.AddObserver('ModifiedEvent', messenger.send)
1
>>> messenger.connect(o, 'ModifiedEvent', cb)
>>>
>>> o.SetRepresentation(1)
vtkOpenGLProperty ModifiedEvent
>>> messenger.connect(o, 'AnyEvent', cb)
>>> o.SetRepresentation(2)
vtkOpenGLProperty ModifiedEvent
vtkOpenGLProperty ModifiedEvent
>>>
>>> messenger.send(o, 'foo')
vtkOpenGLProperty foo
>>> messenger.disconnect(o, 'AnyEvent')
>>> messenger.send(o, 'foo')
>>>
This approach is necessary if you don't want to be bitten by reference
cycles. If you have a Python object holding a reference to a VTK
object and pass a method of the object to the AddObserver call, you
will get a reference cycle that cannot be collected by the garbage
collector. Using this messenger module gets around the problem.
Also note that adding a connection for 'AnyEvent' will trigger a
callback no matter what event was generated. The code above also
shows how disconnection works.
"""
# Author: Prabhu Ramachandran
# Copyright (c) 2004-2007, Enthought, Inc.
# License: BSD Style.
__all__ = ['Messenger', 'MessengerError',
'connect', 'disconnect', 'send']
import types
import sys
import weakref
#################################################################
# This code makes the module reload-safe.
#################################################################
_saved = {}
for name in ['messenger', 'tvtk.messenger']:
if sys.modules.has_key(name):
mod = sys.modules[name]
if hasattr(mod, 'Messenger'):
_saved = mod.Messenger._shared_data
del mod
break
#################################################################
# `MessengerError` class for exceptions raised by Messenger.
#################################################################
class MessengerError(Exception):
pass
#################################################################
# `Messenger` class.
#################################################################
class Messenger:
"""Implements a messenger class which deals with something like
signals and slots. Basically, an object can register a signal
that it plans to emit. Any other object can decide to handle that
signal (of that particular object) by registering itself with the
messenger. When a signal is emitted the messenger calls all
handlers. This makes it totally easy to deal with communication
between objects. The class is Borg. Rather than use this class,
please use the 'connect' and 'disconnect' functions.
"""
_shared_data = _saved
def __init__(self):
"""Create the messenger. This class is Borg. So all
instances are the same.
"""
self.__dict__ = self._shared_data
if not hasattr(self, '_signals'):
# First instantiation.
self._signals = {}
self._catch_all = ['AnyEvent', 'all']
#################################################################
# 'Messenger' interface.
#################################################################
def connect(self, obj, event, callback):
""" Registers a slot given an object and its signal to slot
into and also given a bound method in `callback` that should
have two arguments. `send` will call the callback
with the object that emitted the signal and the actual
event/signal as arguments.
Parameters
----------
- obj : Python object
Any Python object that will generate the particular event.
- event : An event (can be anything, usually strings)
The event `obj` will generate. If this is in the list
`self._catch_all`, then any event will call this callback.
- callback : `function` or `method`
This callback will be called when the object generates the
particular event. The object, event and any other arguments
and keyword arguments given by the `obj` are passed along to
the callback.
"""
typ = type(callback)
key = hash(obj)
if not self._signals.has_key(key):
self._signals[key] = {}
signals = self._signals[key]
if not signals.has_key(event):
signals[event] = {}
slots = signals[event]
callback_key = hash(callback)
if typ is types.FunctionType:
slots[callback_key] = (None, callback)
elif typ is types.MethodType:
obj = weakref.ref(callback.im_self)
name = callback.__name__
slots[callback_key] = (obj, name)
else:
raise MessengerError, \
"Callback must be a function or method. "\
"You passed a %s."%(str(callback))
def disconnect(self, obj, event=None, callback=None, obj_is_hash=False):
"""Disconnects the object and its event handlers.
Parameters
----------
- obj : Object
The object that generates events.
- event : The event. (defaults to None)
- callback : `function` or `method`
The event handler.
If `event` and `callback` are None (the default) all the
events and handlers for the object are removed. If only
`callback` is None, only this handler is removed. If `obj`
and 'event' alone are specified, all handlers for the event
are removed.
- obj_is_hash : `bool`
Specifies if the object passed is a hash instead of the object itself.
This is needed if the object is gc'd but only the hash exists and one
wants to disconnect the object.
"""
signals = self._signals
if obj_is_hash:
key = obj
else:
key = hash(obj)
if not signals.has_key(key):
return
if callback is None:
if event is None:
del signals[key]
else:
del signals[key][event]
else:
del signals[key][event][hash(callback)]
def send(self, source, event, *args, **kw_args):
"""To be called by the object `source` that desires to
generate a particular event. This function in turn invokes
all the handlers for the event passing the `source` object,
event and any additional arguments and keyword arguments. If
any connected callback is garbage collected without being
disconnected, it is silently removed from the existing slots.
Parameters
----------
- source : Python object
This is the object that generated the event.
- event : The event.
If there are handlers connected to events called 'AnyEvent'
or 'all', then any event will invoke these.
"""
try:
sigs = self._get_signals(source)
except (MessengerError, KeyError):
return
events = self._catch_all[:]
if event not in events:
events.append(event)
for evt in events:
if sigs.has_key(evt):
slots = sigs[evt]
for key in slots.keys():
obj, meth = slots[key]
if obj: # instance method
inst = obj()
if inst:
getattr(inst, meth)(source, event, *args, **kw_args)
else:
# Oops, dead reference.
del slots[key]
else: # normal function
meth(source, event, *args, **kw_args)
def is_registered(self, obj):
"""Returns if the given object has registered itself with the
messenger.
"""
try:
sigs = self._get_signals(obj)
except MessengerError:
return 0
else:
return 1
def get_signal_names(self, obj):
"""Returns a list of signal names the object passed has
registered.
"""
return self._get_signals(obj).keys()
#################################################################
# Non-public interface.
#################################################################
def _get_signals(self, obj):
"""Given an object `obj` it returns the signals of that
object.
"""
ret = self._signals.get(hash(obj))
if ret is None:
raise MessengerError, \
"No such object: %s, has registered itself "\
"with the messenger."%obj
else:
return ret
#################################################################
# Convenience functions.
#################################################################
_messenger = Messenger()
def connect(obj, event, callback):
_messenger.connect(obj, event, callback)
connect.__doc__ = _messenger.connect.__doc__
def disconnect(obj, event=None, callback=None, obj_is_hash=False):
_messenger.disconnect(obj, event, callback)
disconnect.__doc__ = _messenger.disconnect.__doc__
def send(obj, event, *args, **kw_args):
_messenger.send(obj, event, *args, **kw_args)
send.__doc__ = _messenger.send.__doc__
del _saved
| liulion/mayavi | tvtk/messenger.py | Python | bsd-3-clause | 10,137 |
import unittest
from django.db import connection, migrations, models
from django.db.migrations.state import ProjectState
from django.test import override_settings
from .test_operations import OperationTestBase
try:
import sqlparse
except ImportError:
sqlparse = None
class AgnosticRouter(object):
"""
A router that doesn't have an opinion regarding migrating.
"""
def allow_migrate(self, db, model, **hints):
return None
class MigrateNothingRouter(object):
"""
A router that doesn't allow migrating.
"""
def allow_migrate(self, db, model, **hints):
return False
class MigrateEverythingRouter(object):
"""
A router that always allows migrating.
"""
def allow_migrate(self, db, model, **hints):
return True
class MigrateWhenFooRouter(object):
"""
A router that allows migrating depending on a hint.
"""
def allow_migrate(self, db, model, **hints):
return hints.get('foo', False)
class MultiDBOperationTests(OperationTestBase):
multi_db = True
def _test_create_model(self, app_label, should_run):
"""
Tests that CreateModel honours multi-db settings.
"""
operation = migrations.CreateModel(
"Pony",
[("id", models.AutoField(primary_key=True))],
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Test the database alteration
self.assertTableNotExists("%s_pony" % app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if should_run:
self.assertTableExists("%s_pony" % app_label)
else:
self.assertTableNotExists("%s_pony" % app_label)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertTableNotExists("%s_pony" % app_label)
@override_settings(DATABASE_ROUTERS=[AgnosticRouter()])
def test_create_model(self):
"""
Test when router doesn't have an opinion (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo", should_run=True)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_create_model2(self):
"""
Test when router returns False (i.e. CreateModel shouldn't run).
"""
self._test_create_model("test_mltdb_crmo2", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()])
def test_create_model3(self):
"""
Test when router returns True (i.e. CreateModel should run).
"""
self._test_create_model("test_mltdb_crmo3", should_run=True)
def test_create_model4(self):
"""
Test multiple routers.
"""
with override_settings(DATABASE_ROUTERS=[AgnosticRouter(), AgnosticRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
with override_settings(DATABASE_ROUTERS=[MigrateNothingRouter(), MigrateEverythingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=False)
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter(), MigrateNothingRouter()]):
self._test_create_model("test_mltdb_crmo4", should_run=True)
def _test_run_sql(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
sql = """
INSERT INTO {0}_pony (pink, weight) VALUES (1, 3.55);
INSERT INTO {0}_pony (pink, weight) VALUES (3, 5.0);
""".format(app_label)
operation = migrations.RunSQL(sql, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_sql(self):
self._test_run_sql("test_mltdb_runsql", should_run=False)
@unittest.skipIf(sqlparse is None and connection.features.requires_sqlparse_for_splitting, "Missing sqlparse")
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_sql2(self):
self._test_run_sql("test_mltdb_runsql2", should_run=False)
self._test_run_sql("test_mltdb_runsql2", should_run=True, hints={'foo': True})
def _test_run_python(self, app_label, should_run, hints=None):
with override_settings(DATABASE_ROUTERS=[MigrateEverythingRouter()]):
project_state = self.set_up_test_model(app_label)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model(app_label, "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
operation = migrations.RunPython(inner_method, hints=hints or {})
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(project_state.apps.get_model(app_label, "Pony").objects.count(), 0)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = project_state.apps.get_model(app_label, "Pony")
if should_run:
self.assertEqual(Pony.objects.count(), 2)
else:
self.assertEqual(Pony.objects.count(), 0)
@override_settings(DATABASE_ROUTERS=[MigrateNothingRouter()])
def test_run_python(self):
self._test_run_python("test_mltdb_runpython", should_run=False)
@override_settings(DATABASE_ROUTERS=[MigrateWhenFooRouter()])
def test_run_python2(self):
self._test_run_python("test_mltdb_runpython2", should_run=False)
self._test_run_python("test_mltdb_runpython2", should_run=True, hints={'foo': True})
| memtoko/django | tests/migrations/test_multidb.py | Python | bsd-3-clause | 6,893 |
# -*- coding: utf-8 -*-
import ipaddress
__all__ = [
'config_to_map',
'get_region'
]
def config_to_map(topology_config):
"""
args:
topology_config: dict
{
'region1': [
'10.1.1.0/24',
'10.1.10.0/24',
'172.16.1.0/24'
],
'region2': [
'192.168.1.0/24',
'10.2.0.0/16',
]
}
Region cannot be "_default"
returns:
topology_map: dict
{
ip_network('10.1.1.0/24'): 'region1',
ip_network('10.1.10.0/24'): 'region1',
ip_network('172.16.1.0/24'): 'region1',
ip_network('192.168.1.0/24'): 'region2',
ip_network('10.2.0.0/16'): 'region2',
}
raises:
ValueError: if a region value is "_default"
"""
topology_map = {}
for region in topology_config:
# "_default" cannot be used as a region name
if region == '_default':
raise ValueError('cannot use "_default" as a region name')
for net_str in topology_config[region]:
net = ipaddress.ip_network(net_str)
topology_map[net] = region
return topology_map
def get_region(ip_str, topology_map):
"""Return name of a region from the topology map for
the given IP address, if multiple networks contain the IP,
region of the most specific(longest prefix length) match is returned,
if multiple equal prefix length found the behavior of which
entry is returned is undefined.
args:
ip_str: string representing an IP address
returns:
string: region name
None: if no region has been found
raises:
ValueError: raised by ipaddress if ip_str isn't a valid IP address
"""
ip = ipaddress.ip_address(ip_str)
# find all the matching networks
matches = []
for net in topology_map:
if ip in net:
matches.append(net)
# if only a single match is found return it
if len(matches) == 1:
return topology_map[matches[0]]
# if more than 1 match is found, sort the matches
# by prefixlen, return the longest prefixlen entry
elif len(matches) > 1:
matches.sort(key=lambda net: net.prefixlen)
return topology_map[matches[-1]]
# no matches found
return None
| polaris-gslb/polaris-core | polaris_common/topology.py | Python | bsd-3-clause | 2,452 |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.db.models import Q
# from djangobmf.permissions import ModulePermission
from djangobmf.utils import FilterQueryset
class GoalFilter(FilterQueryset):
def filter_queryset(self, qs, user):
if user.has_perm('%s.can_manage' % qs.model._meta.app_label, qs.model):
return qs
qs_filter = Q(referee=user.djangobmf.employee or -1)
qs_filter |= Q(employees=user.djangobmf.employee or -1)
qs_filter |= Q(team__in=user.djangobmf.team)
if hasattr(qs.model, "project"): # pragma: no branch
project = qs.model._meta.get_field_by_name("project")[0].model
if user.has_perm('%s.can_manage' % project._meta.app_label, project):
qs_filter |= Q(project__isnull=False)
else:
qs_filter |= Q(project__isnull=False, project__employees=user.djangobmf.employee or -1)
qs_filter |= Q(project__isnull=False, project__team__in=user.djangobmf.team)
return qs.filter(qs_filter)
class TaskFilter(FilterQueryset):
def filter_queryset(self, qs, user):
qs_filter = Q(project__isnull=True, goal__isnull=True)
qs_filter |= Q(employee=user.djangobmf.employee or -1)
qs_filter |= Q(in_charge=user.djangobmf.employee)
if hasattr(qs.model, "goal"): # pragma: no branch
goal = qs.model._meta.get_field_by_name("goal")[0].model
if user.has_perm('%s.can_manage' % goal._meta.app_label, goal):
qs_filter |= Q(goal__isnull=False)
else:
qs_filter |= Q(goal__isnull=False, goal__referee=user.djangobmf.employee or -1)
qs_filter |= Q(goal__isnull=False, goal__employees=user.djangobmf.employee or -1)
qs_filter |= Q(goal__isnull=False, goal__team__in=user.djangobmf.team)
if hasattr(qs.model, "project"): # pragma: no branch
project = qs.model._meta.get_field_by_name("project")[0].model
if user.has_perm('%s.can_manage' % project._meta.app_label, project):
qs_filter |= Q(project__isnull=False)
else:
qs_filter |= Q(project__isnull=False, project__employees=user.djangobmf.employee or -1)
qs_filter |= Q(project__isnull=False, project__team__in=user.djangobmf.team)
return qs.filter(qs_filter)
| django-bmf/django-bmf | djangobmf/contrib/task/permissions.py | Python | bsd-3-clause | 2,444 |
#!/usr/bin/env python
"""Command-line tool for starting a local Vitess database for testing.
USAGE:
$ run_local_database --port 12345 \
--topology test_keyspace/-80:test_keyspace_0,test_keyspace/80-:test_keyspace_1 \
--schema_dir /path/to/schema/dir
It will run the tool, logging to stderr. On stdout, a small json structure
can be waited on and then parsed by the caller to figure out how to reach
the vtgate process.
Once done with the test, send an empty line to this process for it to clean-up,
and then just wait for it to exit.
"""
import json
import logging
import optparse
import os
import re
import sys
from vttest import environment
from vttest import local_database
from vttest import mysql_flavor
from vttest import vt_processes
shard_exp = re.compile(r'(.+)/(.+):(.+)')
def main(port, topology, schema_dir, vschema, mysql_only):
shards = []
for shard in topology.split(','):
m = shard_exp.match(shard)
if m:
shards.append(
vt_processes.ShardInfo(m.group(1), m.group(2), m.group(3)))
else:
sys.stderr.write('invalid --shard flag format: %s\n' % shard)
sys.exit(1)
environment.base_port = port
with local_database.LocalDatabase(shards, schema_dir, vschema, mysql_only) as local_db:
print json.dumps(local_db.config())
sys.stdout.flush()
try:
raw_input()
except EOFError:
sys.stderr.write(
'WARNING: %s: No empty line was received on stdin.'
' Instead, stdin was closed and the cluster will be shut down now.'
' Make sure to send the empty line instead to proactively shutdown'
' the local cluster. For example, did you forget the shutdown in'
' your test\'s tearDown()?\n' % os.path.basename(__file__))
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port', type='int',
help='Port to use for vtcombo. If this is 0, a random port '
'will be chosen.')
parser.add_option(
'-t', '--topology',
help='Define which shards exist in the test topology in the'
' form <keyspace>/<shardrange>:<dbname>,... The dbname'
' must be unique among all shards, since they share'
' a MySQL instance in the test environment.')
parser.add_option(
'-s', '--schema_dir',
help='Directory for initial schema files. Within this dir,'
' there should be a subdir for each keyspace. Within'
' each keyspace dir, each file is executed as SQL'
' after the database is created on each shard.'
' If the directory contains a vschema.json file, it'
' will be used as the vschema for the V3 API.')
parser.add_option(
'-e', '--vschema',
help='If this file is specified, it will be used'
' as the vschema for the V3 API.')
parser.add_option(
'-m', '--mysql_only', action='store_true',
help='If this flag is set only mysql is initialized.'
' The rest of the vitess components are not started.'
' Also, the output specifies the mysql unix socket'
' instead of the vtgate port.')
parser.add_option(
'-v', '--verbose', action='store_true',
help='Display extra error messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
# This will set the flavor based on the MYSQL_FLAVOR env var,
# or default to MariaDB.
mysql_flavor.set_mysql_flavor(None)
main(options.port, options.topology, options.schema_dir, options.vschema, options.mysql_only)
| tjyang/vitess | py/vttest/run_local_database.py | Python | bsd-3-clause | 3,537 |
"""
TutorialWorld - basic objects - Griatch 2011
This module holds all "dead" object definitions for
the tutorial world. Object-commands and -cmdsets
are also defined here, together with the object.
Objects:
TutorialObject
Readable
Climbable
Obelisk
LightSource
CrumblingWall
Weapon
WeaponRack
"""
from future.utils import listvalues
import random
from evennia import DefaultObject, DefaultExit, Command, CmdSet
from evennia import utils
from evennia.utils import search
from evennia.utils.spawner import spawn
#------------------------------------------------------------
#
# TutorialObject
#
# The TutorialObject is the base class for all items
# in the tutorial. They have an attribute "tutorial_info"
# on them that the global tutorial command can use to extract
# interesting behind-the scenes information about the object.
#
# TutorialObjects may also be "reset". What the reset means
# is up to the object. It can be the resetting of the world
# itself, or the removal of an inventory item from a
# character's inventory when leaving the tutorial, for example.
#
#------------------------------------------------------------
class TutorialObject(DefaultObject):
"""
This is the baseclass for all objects in the tutorial.
"""
def at_object_creation(self):
"Called when the object is first created."
super(TutorialObject, self).at_object_creation()
self.db.tutorial_info = "No tutorial info is available for this object."
def reset(self):
"Resets the object, whatever that may mean."
self.location = self.home
#------------------------------------------------------------
#
# Readable - an object that can be "read"
#
#------------------------------------------------------------
#
# Read command
#
class CmdRead(Command):
"""
Usage:
read [obj]
Read some text of a readable object.
"""
key = "read"
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""
Implements the read command. This simply looks for an
Attribute "readable_text" on the object and displays that.
"""
if self.args:
obj = self.caller.search(self.args.strip())
else:
obj = self.obj
if not obj:
return
# we want an attribute read_text to be defined.
readtext = obj.db.readable_text
if readtext:
string = "You read {C%s{n:\n %s" % (obj.key, readtext)
else:
string = "There is nothing to read on %s." % obj.key
self.caller.msg(string)
class CmdSetReadable(CmdSet):
"""
A CmdSet for readables.
"""
def at_cmdset_creation(self):
"""
Called when the cmdset is created.
"""
self.add(CmdRead())
class Readable(TutorialObject):
"""
This simple object defines some attributes and
"""
def at_object_creation(self):
"""
Called when object is created. We make sure to set the needed
Attribute and add the readable cmdset.
"""
super(Readable, self).at_object_creation()
self.db.tutorial_info = "This is an object with a 'read' command defined in a command set on itself."
self.db.readable_text = "There is no text written on %s." % self.key
# define a command on the object.
self.cmdset.add_default(CmdSetReadable, permanent=True)
#------------------------------------------------------------
#
# Climbable object
#
# The climbable object works so that once climbed, it sets
# a flag on the climber to show that it was climbed. A simple
# command 'climb' handles the actual climbing. The memory
# of what was last climbed is used in a simple puzzle in the
# tutorial.
#
#------------------------------------------------------------
class CmdClimb(Command):
"""
Climb an object
Usage:
climb <object>
This allows you to climb.
"""
key = "climb"
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"Implements function"
if not self.args:
self.caller.msg("What do you want to climb?")
return
obj = self.caller.search(self.args.strip())
if not obj:
return
if obj != self.obj:
self.caller.msg("Try as you might, you cannot climb that.")
return
ostring = self.obj.db.climb_text
if not ostring:
ostring = "You climb %s. Having looked around, you climb down again." % self.obj.name
self.caller.msg(ostring)
# set a tag on the caller to remember that we climbed.
self.caller.tags.add("tutorial_climbed_tree", category="tutorial_world")
class CmdSetClimbable(CmdSet):
"Climbing cmdset"
def at_cmdset_creation(self):
"populate set"
self.add(CmdClimb())
class Climbable(TutorialObject):
"""
A climbable object. All that is special about it is that it has
the "climb" command available on it.
"""
def at_object_creation(self):
"Called at initial creation only"
self.cmdset.add_default(CmdSetClimbable, permanent=True)
#------------------------------------------------------------
#
# Obelisk - a unique item
#
# The Obelisk is an object with a modified return_appearance method
# that causes it to look slightly different every time one looks at it.
# Since what you actually see is a part of a game puzzle, the act of
# looking also stores a key attribute on the looking object (different
# depending on which text you saw) for later reference.
#
#------------------------------------------------------------
class Obelisk(TutorialObject):
"""
This object changes its description randomly, and which is shown
determines which order "clue id" is stored on the Character for
future puzzles.
Important Attribute:
puzzle_descs (list): list of descriptions. One of these is
picked randomly when this object is looked at and its index
in the list is used as a key for to solve the puzzle.
"""
def at_object_creation(self):
"Called when object is created."
super(Obelisk, self).at_object_creation()
self.db.tutorial_info = "This object changes its desc randomly, and makes sure to remember which one you saw."
self.db.puzzle_descs = ["You see a normal stone slab"]
# make sure this can never be picked up
self.locks.add("get:false()")
def return_appearance(self, caller):
"""
This hook is called by the look command to get the description
of the object. We overload it with our own version.
"""
# randomly get the index for one of the descriptions
descs = self.db.puzzle_descs
clueindex = random.randint(0, len(descs) - 1)
# set this description, with the random extra
string = "The surface of the obelisk seem to waver, shift and writhe under your gaze, with " \
"different scenes and structures appearing whenever you look at it. "
self.db.desc = string + descs[clueindex]
# remember that this was the clue we got. The Puzzle room will
# look for this later to determine if you should be teleported
# or not.
caller.db.puzzle_clue = clueindex
# call the parent function as normal (this will use
# the new desc Attribute we just set)
return super(Obelisk, self).return_appearance(caller)
#------------------------------------------------------------
#
# LightSource
#
# This object emits light. Once it has been turned on it
# cannot be turned off. When it burns out it will delete
# itself.
#
# This could be implemented using a single-repeat Script or by
# registering with the TickerHandler. We do it simpler by
# using the delay() utility function. This is very simple
# to use but does not survive a server @reload. Because of
# where the light matters (in the Dark Room where you can
# find new light sources easily), this is okay here.
#
#------------------------------------------------------------
class CmdLight(Command):
"""
Creates light where there was none. Something to burn.
"""
key = "on"
aliases = ["light", "burn"]
# only allow this command if command.obj is carried by caller.
locks = "cmd:holds()"
help_category = "TutorialWorld"
def func(self):
"""
Implements the light command. Since this command is designed
to sit on a "lightable" object, we operate only on self.obj.
"""
if self.obj.light():
self.caller.msg("You light %s." % self.obj.key)
self.caller.location.msg_contents("%s lights %s!" % (self.caller, self.obj.key), exclude=[self.caller])
else:
self.caller.msg("%s is already burning." % self.obj.key)
class CmdSetLight(CmdSet):
"CmdSet for the lightsource commands"
key = "lightsource_cmdset"
# this is higher than the dark cmdset - important!
priority = 3
def at_cmdset_creation(self):
"called at cmdset creation"
self.add(CmdLight())
class LightSource(TutorialObject):
"""
This implements a light source object.
When burned out, the object will be deleted.
"""
def at_init(self):
"""
If this is called with the Attribute is_giving_light already
set, we know that the timer got killed by a server
reload/reboot before it had time to finish. So we kill it here
instead. This is the price we pay for the simplicity of the
non-persistent delay() method.
"""
if self.db.is_giving_light:
self.delete()
def at_object_creation(self):
"Called when object is first created."
super(LightSource, self).at_object_creation()
self.db.tutorial_info = "This object can be lit to create light. It has a timeout for how long it burns."
self.db.is_giving_light = False
self.db.burntime = 60 * 3 # 3 minutes
# this is the default desc, it can of course be customized
# when created.
self.db.desc = "A splinter of wood with remnants of resin on it, enough for burning."
# add the Light command
self.cmdset.add_default(CmdSetLight, permanent=True)
def _burnout(self):
"""
This is called when this light source burns out. We make no
use of the return value.
"""
# delete ourselves from the database
self.db.is_giving_light = False
try:
self.location.location.msg_contents("%s's %s flickers and dies." %
(self.location, self.key), exclude=self.location)
self.location.msg("Your %s flickers and dies." % self.key)
self.location.location.check_light_state()
except AttributeError:
try:
self.location.msg_contents("A %s on the floor flickers and dies." % self.key)
self.location.location.check_light_state()
except AttributeError:
pass
self.delete()
def light(self):
"""
Light this object - this is called by Light command.
"""
if self.db.is_giving_light:
return False
# burn for 3 minutes before calling _burnout
self.db.is_giving_light = True
# if we are in a dark room, trigger its light check
try:
self.location.location.check_light_state()
except AttributeError:
try:
# maybe we are directly in the room
self.location.check_light_state()
except AttributeError:
pass
finally:
# start the burn timer. When it runs out, self._burnout
# will be called.
utils.delay(60 * 3, self._burnout)
return True
#------------------------------------------------------------
#
# Crumbling wall - unique exit
#
# This implements a simple puzzle exit that needs to be
# accessed with commands before one can get to traverse it.
#
# The puzzle-part is simply to move roots (that have
# presumably covered the wall) aside until a button for a
# secret door is revealed. The original position of the
# roots blocks the button, so they have to be moved to a certain
# position - when they have, the "press button" command
# is made available and the Exit is made traversable.
#
#------------------------------------------------------------
# There are four roots - two horizontal and two vertically
# running roots. Each can have three positions: top/middle/bottom
# and left/middle/right respectively. There can be any number of
# roots hanging through the middle position, but only one each
# along the sides. The goal is to make the center position clear.
# (yes, it's really as simple as it sounds, just move the roots
# to each side to "win". This is just a tutorial, remember?)
#
# The ShiftRoot command depends on the root object having an
# Attribute root_pos (a dictionary) to describe the current
# position of the roots.
class CmdShiftRoot(Command):
"""
Shifts roots around.
Usage:
shift blue root left/right
shift red root left/right
shift yellow root up/down
shift green root up/down
"""
key = "shift"
aliases = ["shiftroot", "push", "pull", "move"]
# we only allow to use this command while the
# room is properly lit, so we lock it to the
# setting of Attribute "is_lit" on our location.
locks = "cmd:locattr(is_lit)"
help_category = "TutorialWorld"
def parse(self):
"""
Custom parser; split input by spaces for simplicity.
"""
self.arglist = self.args.strip().split()
def func(self):
"""
Implement the command.
blue/red - vertical roots
yellow/green - horizontal roots
"""
if not self.arglist:
self.caller.msg("What do you want to move, and in what direction?")
return
if "root" in self.arglist:
# we clean out the use of the word "root"
self.arglist.remove("root")
# we accept arguments on the form <color> <direction>
if not len(self.arglist) > 1:
self.caller.msg("You must define which colour of root you want to move, and in which direction.")
return
color = self.arglist[0].lower()
direction = self.arglist[1].lower()
# get current root positions dict
root_pos = self.obj.db.root_pos
if not color in root_pos:
self.caller.msg("No such root to move.")
return
# first, vertical roots (red/blue) - can be moved left/right
if color == "red":
if direction == "left":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the reddish root to the left.")
if root_pos[color] != 0 and root_pos[color] == root_pos["blue"]:
root_pos["blue"] += 1
self.caller.msg("The root with blue flowers gets in the way and is pushed to the right.")
elif direction == "right":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the reddish root to the right.")
if root_pos[color] != 0 and root_pos[color] == root_pos["blue"]:
root_pos["blue"] -= 1
self.caller.msg("The root with blue flowers gets in the way and is pushed to the left.")
else:
self.caller.msg("You cannot move the root in that direction.")
elif color == "blue":
if direction == "left":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the root with small blue flowers to the left.")
if root_pos[color] != 0 and root_pos[color] == root_pos["red"]:
root_pos["red"] += 1
self.caller.msg("The reddish root is to big to fit as well, so that one falls away to the left.")
elif direction == "right":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the root adorned with small blue flowers to the right.")
if root_pos[color] != 0 and root_pos[color] == root_pos["red"]:
root_pos["red"] -= 1
self.caller.msg("The thick reddish root gets in the way and is pushed back to the left.")
else:
self.caller.msg("You cannot move the root in that direction.")
# now the horizontal roots (yellow/green). They can be moved up/down
elif color == "yellow":
if direction == "up":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the root with small yellow flowers upwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["green"]:
root_pos["green"] += 1
self.caller.msg("The green weedy root falls down.")
elif direction == "down":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the root adorned with small yellow flowers downwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["green"]:
root_pos["green"] -= 1
self.caller.msg("The weedy green root is shifted upwards to make room.")
else:
self.caller.msg("You cannot move the root in that direction.")
elif color == "green":
if direction == "up":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the weedy green root upwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["yellow"]:
root_pos["yellow"] += 1
self.caller.msg("The root with yellow flowers falls down.")
elif direction == "down":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the weedy green root downwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["yellow"]:
root_pos["yellow"] -= 1
self.caller.msg("The root with yellow flowers gets in the way and is pushed upwards.")
else:
self.caller.msg("You cannot move the root in that direction.")
# we have moved the root. Store new position
self.obj.db.root_pos = root_pos
# Check victory condition
if listvalues(root_pos).count(0) == 0: # no roots in middle position
# This will affect the cmd: lock of CmdPressButton
self.obj.db.button_exposed = True
self.caller.msg("Holding aside the root you think you notice something behind it ...")
class CmdPressButton(Command):
"""
Presses a button.
"""
key = "press"
aliases = ["press button", "button", "push button"]
# only accessible if the button was found and there is light. This checks
# the Attribute button_exposed on the Wall object so that
# you can only push the button when the puzzle is solved. It also
# checks the is_lit Attribute on the location.
locks = "cmd:objattr(button_exposed) and objlocattr(is_lit)"
help_category = "TutorialWorld"
def func(self):
"Implements the command"
if self.caller.db.crumbling_wall_found_exit:
# we already pushed the button
self.caller.msg("The button folded away when the secret passage opened. You cannot push it again.")
return
# pushing the button
string = "You move your fingers over the suspicious depression, then gives it a " \
"decisive push. First nothing happens, then there is a rumble and a hidden " \
"{wpassage{n opens, dust and pebbles rumbling as part of the wall moves aside."
self.caller.msg(string)
string = "%s moves their fingers over the suspicious depression, then gives it a " \
"decisive push. First nothing happens, then there is a rumble and a hidden " \
"{wpassage{n opens, dust and pebbles rumbling as part of the wall moves aside."
self.caller.location.msg_contents(string % self.caller.key, exclude=self.caller)
self.obj.open_wall()
class CmdSetCrumblingWall(CmdSet):
"Group the commands for crumblingWall"
key = "crumblingwall_cmdset"
priority = 2
def at_cmdset_creation(self):
"called when object is first created."
self.add(CmdShiftRoot())
self.add(CmdPressButton())
class CrumblingWall(TutorialObject, DefaultExit):
"""
This is a custom Exit.
The CrumblingWall can be examined in various ways, but only if a
lit light source is in the room. The traversal itself is blocked
by a traverse: lock on the exit that only allows passage if a
certain attribute is set on the trying player.
Important attribute
destination - this property must be set to make this a valid exit
whenever the button is pushed (this hides it as an exit
until it actually is)
"""
def at_init(self):
"""
Called when object is recalled from cache.
"""
self.reset()
def at_object_creation(self):
"called when the object is first created."
super(CrumblingWall, self).at_object_creation()
self.aliases.add(["secret passage", "passage",
"crack", "opening", "secret door"])
# starting root positions. H1/H2 are the horizontally hanging roots,
# V1/V2 the vertically hanging ones. Each can have three positions:
# (-1, 0, 1) where 0 means the middle position. yellow/green are
# horizontal roots and red/blue vertical, all may have value 0, but n
# ever any other identical value.
self.db.root_pos = {"yellow": 0, "green": 0, "red": 0, "blue": 0}
# flags controlling the puzzle victory conditions
self.db.button_exposed = False
self.db.exit_open = False
# this is not even an Exit until it has a proper destination, and we won't assign
# that until it is actually open. Until then we store the destination here. This
# should be given a reasonable value at creation!
self.db.destination = 2
# we lock this Exit so that one can only execute commands on it
# if its location is lit and only traverse it once the Attribute
# exit_open is set to True.
self.locks.add("cmd:locattr(is_lit);traverse:objattr(exit_open)")
# set cmdset
self.cmdset.add(CmdSetCrumblingWall, permanent=True)
def open_wall(self):
"""
This method is called by the push button command once the puzzle
is solved. It opens the wall and sets a timer for it to reset
itself.
"""
# this will make it into a proper exit (this returns a list)
eloc = search.search_object(self.db.destination)
if not eloc:
self.caller.msg("The exit leads nowhere, there's just more stone behind it ...")
else:
self.destination = eloc[0]
self.exit_open = True
# start a 45 second timer before closing again
utils.delay(45, self.reset)
def _translate_position(self, root, ipos):
"Translates the position into words"
rootnames = {"red": "The {rreddish{n vertical-hanging root ",
"blue": "The thick vertical root with {bblue{n flowers ",
"yellow": "The thin horizontal-hanging root with {yyellow{n flowers ",
"green": "The weedy {ggreen{n horizontal root "}
vpos = {-1: "hangs far to the {wleft{n on the wall.",
0: "hangs straight down the {wmiddle{n of the wall.",
1: "hangs far to the {wright{n of the wall."}
hpos = {-1: "covers the {wupper{n part of the wall.",
0: "passes right over the {wmiddle{n of the wall.",
1: "nearly touches the floor, near the {wbottom{n of the wall."}
if root in ("yellow", "green"):
string = rootnames[root] + hpos[ipos]
else:
string = rootnames[root] + vpos[ipos]
return string
def return_appearance(self, caller):
"""
This is called when someone looks at the wall. We need to echo the
current root positions.
"""
if self.db.button_exposed:
# we found the button by moving the roots
string = "Having moved all the roots aside, you find that the center of the wall, " \
"previously hidden by the vegetation, hid a curious square depression. It was maybe once " \
"concealed and made to look a part of the wall, but with the crumbling of stone around it," \
"it's now easily identifiable as some sort of button."
elif self.db.exit_open:
# we pressed the button; the exit is open
string = "With the button pressed, a crack has opened in the root-covered wall, just wide enough " \
"to squeeze through. A cold draft is coming from the hole and you get the feeling the " \
"opening may close again soon."
else:
# puzzle not solved yet.
string = "The wall is old and covered with roots that here and there have permeated the stone. " \
"The roots (or whatever they are - some of them are covered in small non-descript flowers) " \
"crisscross the wall, making it hard to clearly see its stony surface. Maybe you could " \
"try to {wshift{n or {wmove{n them.\n"
# display the root positions to help with the puzzle
for key, pos in self.db.root_pos.items():
string += "\n" + self._translate_position(key, pos)
self.db.desc = string
# call the parent to continue execution (will use the desc we just set)
return super(CrumblingWall, self).return_appearance(caller)
def at_after_traverse(self, traverser, source_location):
"""
This is called after we traversed this exit. Cleans up and resets
the puzzle.
"""
del traverser.db.crumbling_wall_found_buttothe
del traverser.db.crumbling_wall_found_exit
self.reset()
def at_failed_traverse(self, traverser):
"This is called if the player fails to pass the Exit."
traverser.msg("No matter how you try, you cannot force yourself through %s." % self.key)
def reset(self):
"""
Called by tutorial world runner, or whenever someone successfully
traversed the Exit.
"""
self.location.msg_contents("The secret door closes abruptly, roots falling back into place.")
# reset the flags and remove the exit destination
self.db.button_exposed = False
self.db.exit_open = False
self.destination = None
# Reset the roots with some random starting positions for the roots:
start_pos = [{"yellow":1, "green":0, "red":0, "blue":0},
{"yellow":0, "green":0, "red":0, "blue":0},
{"yellow":0, "green":1, "red":-1, "blue":0},
{"yellow":1, "green":0, "red":0, "blue":0},
{"yellow":0, "green":0, "red":0, "blue":1}]
self.db.root_pos = random.choice(start_pos)
#------------------------------------------------------------
#
# Weapon - object type
#
# A weapon is necessary in order to fight in the tutorial
# world. A weapon (which here is assumed to be a bladed
# melee weapon for close combat) has three commands,
# stab, slash and defend. Weapons also have a property "magic"
# to determine if they are usable against certain enemies.
#
# Since Characters don't have special skills in the tutorial,
# we let the weapon itself determine how easy/hard it is
# to hit with it, and how much damage it can do.
#
#------------------------------------------------------------
class CmdAttack(Command):
"""
Attack the enemy. Commands:
stab <enemy>
slash <enemy>
parry
stab - (thrust) makes a lot of damage but is harder to hit with.
slash - is easier to land, but does not make as much damage.
parry - forgoes your attack but will make you harder to hit on next
enemy attack.
"""
# this is an example of implementing many commands as a single
# command class, using the given command alias to separate between them.
key = "attack"
aliases = ["hit","kill", "fight", "thrust", "pierce", "stab",
"slash", "chop", "parry", "defend"]
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"Implements the stab"
cmdstring = self.cmdstring
if cmdstring in ("attack", "fight"):
string = "How do you want to fight? Choose one of 'stab', 'slash' or 'defend'."
self.caller.msg(string)
return
# parry mode
if cmdstring in ("parry", "defend"):
string = "You raise your weapon in a defensive pose, ready to block the next enemy attack."
self.caller.msg(string)
self.caller.db.combat_parry_mode = True
self.caller.location.msg_contents("%s takes a defensive stance" % self.caller, exclude=[self.caller])
return
if not self.args:
self.caller.msg("Who do you attack?")
return
target = self.caller.search(self.args.strip())
if not target:
return
string = ""
tstring = ""
ostring = ""
if cmdstring in ("thrust", "pierce", "stab"):
hit = float(self.obj.db.hit) * 0.7 # modified due to stab
damage = self.obj.db.damage * 2 # modified due to stab
string = "You stab with %s. " % self.obj.key
tstring = "%s stabs at you with %s. " % (self.caller.key, self.obj.key)
ostring = "%s stabs at %s with %s. " % (self.caller.key, target.key, self.obj.key)
self.caller.db.combat_parry_mode = False
elif cmdstring in ("slash", "chop"):
hit = float(self.obj.db.hit) # un modified due to slash
damage = self.obj.db.damage # un modified due to slash
string = "You slash with %s. " % self.obj.key
tstring = "%s slash at you with %s. " % (self.caller.key, self.obj.key)
ostring = "%s slash at %s with %s. " % (self.caller.key, target.key, self.obj.key)
self.caller.db.combat_parry_mode = False
else:
self.caller.msg("You fumble with your weapon, unsure of whether to stab, slash or parry ...")
self.caller.location.msg_contents("%s fumbles with their weapon." % self.caller, exclude=self.caller)
self.caller.db.combat_parry_mode = False
return
if target.db.combat_parry_mode:
# target is defensive; even harder to hit!
target.msg("{GYou defend, trying to avoid the attack.{n")
hit *= 0.5
if random.random() <= hit:
self.caller.msg(string + "{gIt's a hit!{n")
target.msg(tstring + "{rIt's a hit!{n")
self.caller.location.msg_contents(ostring + "It's a hit!", exclude=[target,self.caller])
# call enemy hook
if hasattr(target, "at_hit"):
# should return True if target is defeated, False otherwise.
return target.at_hit(self.obj, self.caller, damage)
elif target.db.health:
target.db.health -= damage
else:
# sorry, impossible to fight this enemy ...
self.caller.msg("The enemy seems unaffacted.")
return False
else:
self.caller.msg(string + "{rYou miss.{n")
target.msg(tstring + "{gThey miss you.{n")
self.caller.location.msg_contents(ostring + "They miss.", exclude=[target, self.caller])
class CmdSetWeapon(CmdSet):
"Holds the attack command."
def at_cmdset_creation(self):
"called at first object creation."
self.add(CmdAttack())
class Weapon(TutorialObject):
"""
This defines a bladed weapon.
Important attributes (set at creation):
hit - chance to hit (0-1)
parry - chance to parry (0-1)
damage - base damage given (modified by hit success and
type of attack) (0-10)
"""
def at_object_creation(self):
"Called at first creation of the object"
super(Weapon, self).at_object_creation()
self.db.hit = 0.4 # hit chance
self.db.parry = 0.8 # parry chance
self.db.damage = 1.0
self.db.magic = False
self.cmdset.add_default(CmdSetWeapon, permanent=True)
def reset(self):
"""
When reset, the weapon is simply deleted, unless it has a place
to return to.
"""
if self.location.has_player and self.home == self.location:
self.location.msg_contents("%s suddenly and magically fades into nothingness, as if it was never there ..." % self.key)
self.delete()
else:
self.location = self.home
#------------------------------------------------------------
#
# Weapon rack - spawns weapons
#
# This is a spawner mechanism that creates custom weapons from a
# spawner prototype dictionary. Note that we only create a single typeclass
# (Weapon) yet customize all these different weapons using the spawner.
# The spawner dictionaries could easily sit in separate modules and be
# used to create unique and interesting variations of typeclassed
# objects.
#
#------------------------------------------------------------
WEAPON_PROTOTYPES = {
"weapon": {
"typeclass": "evennia.contrib.tutorial_world.objects.Weapon",
"key": "Weapon",
"hit": 0.2,
"parry": 0.2,
"damage": 1.0,
"magic": False,
"desc": "A generic blade."},
"knife": {
"prototype": "weapon",
"aliases": "sword",
"key": "Kitchen knife",
"desc":"A rusty kitchen knife. Better than nothing.",
"damage": 3},
"dagger": {
"prototype": "knife",
"key": "Rusty dagger",
"aliases": ["knife", "dagger"],
"desc": "A double-edged dagger with a nicked edge and a wooden handle.",
"hit": 0.25},
"sword": {
"prototype": "weapon",
"key": "Rusty sword",
"aliases": ["sword"],
"desc": "A rusty shortsword. It has a leather-wrapped handle covered i food grease.",
"hit": 0.3,
"damage": 5,
"parry": 0.5},
"club": {
"prototype": "weapon",
"key":"Club",
"desc": "A heavy wooden club, little more than a heavy branch.",
"hit": 0.4,
"damage": 6,
"parry": 0.2},
"axe": {
"prototype": "weapon",
"key":"Axe",
"desc": "A woodcutter's axe with a keen edge.",
"hit": 0.4,
"damage": 6,
"parry": 0.2},
"ornate longsword": {
"prototype":"sword",
"key": "Ornate longsword",
"desc": "A fine longsword with some swirling patterns on the handle.",
"hit": 0.5,
"magic": True,
"damage": 5},
"warhammer": {
"prototype": "club",
"key": "Silver Warhammer",
"aliases": ["hammer", "warhammer", "war"],
"desc": "A heavy war hammer with silver ornaments. This huge weapon causes massive damage - if you can hit.",
"hit": 0.4,
"magic": True,
"damage": 8},
"rune axe": {
"prototype": "axe",
"key": "Runeaxe",
"aliases": ["axe"],
"hit": 0.4,
"magic": True,
"damage": 6},
"thruning": {
"prototype": "ornate longsword",
"key": "Broadsword named Thruning",
"desc": "This heavy bladed weapon is marked with the name 'Thruning'. It is very powerful in skilled hands.",
"hit": 0.6,
"parry": 0.6,
"damage": 7},
"slayer waraxe": {
"prototype": "rune axe",
"key": "Slayer waraxe",
"aliases": ["waraxe", "war", "slayer"],
"desc": "A huge double-bladed axe marked with the runes for 'Slayer'. It has more runic inscriptions on its head, which you cannot decipher.",
"hit": 0.7,
"damage": 8},
"ghostblade": {
"prototype": "ornate longsword",
"key": "The Ghostblade",
"aliases": ["blade", "ghost"],
"desc": "This massive sword is large as you are tall, yet seems to weigh almost nothing. It's almost like it's not really there.",
"hit": 0.9,
"parry": 0.8,
"damage": 10},
"hawkblade": {
"prototype": "ghostblade",
"key": "The Hawblade",
"aliases": ["hawk", "blade"],
"desc": "The weapon of a long-dead heroine and a more civilized age, the hawk-shaped hilt of this blade almost has a life of its own.",
"hit": 0.85,
"parry": 0.7,
"damage": 11}
}
class CmdGetWeapon(Command):
"""
Usage:
get weapon
This will try to obtain a weapon from the container.
"""
key = "get weapon"
aliases = "get weapon"
locks = "cmd:all()"
help_cateogory = "TutorialWorld"
def func(self):
"""
Get a weapon from the container. It will
itself handle all messages.
"""
self.obj.produce_weapon(self.caller)
class CmdSetWeaponRack(CmdSet):
"""
The cmdset for the rack.
"""
key = "weaponrack_cmdset"
def at_cmdset_creation(self):
"Called at first creation of cmdset"
self.add(CmdGetWeapon())
class WeaponRack(TutorialObject):
"""
This object represents a weapon store. When people use the
"get weapon" command on this rack, it will produce one
random weapon from among those registered to exist
on it. This will also set a property on the character
to make sure they can't get more than one at a time.
Attributes to set on this object:
available_weapons: list of prototype-keys from
WEAPON_PROTOTYPES, the weapons available in this rack.
no_more_weapons_msg - error message to return to players
who already got one weapon from the rack and tries to
grab another one.
"""
def at_object_creation(self):
"""
called at creation
"""
self.cmdset.add_default(CmdSetWeaponRack, permanent=True)
self.db.rack_id = "weaponrack_1"
# these are prototype names from the prototype
# dictionary above.
self.db.get_weapon_msg = "You find {c%s{n."
self.db.no_more_weapons_msg = "you find nothing else of use."
self.db.available_weapons = ["knife", "dagger",
"sword", "club"]
def produce_weapon(self, caller):
"""
This will produce a new weapon from the rack,
assuming the caller hasn't already gotten one. When
doing so, the caller will get Tagged with the id
of this rack, to make sure they cannot keep
pulling weapons from it indefinitely.
"""
rack_id = self.db.rack_id
if caller.tags.get(rack_id, category="tutorial_world"):
caller.msg(self.db.no_more_weapons_msg)
else:
prototype = random.choice(self.db.available_weapons)
# use the spawner to create a new Weapon from the
# spawner dictionary, tag the caller
wpn = spawn(WEAPON_PROTOTYPES[prototype], prototype_parents=WEAPON_PROTOTYPES)[0]
caller.tags.add(rack_id, category="tutorial_world")
wpn.location = caller
caller.msg(self.db.get_weapon_msg % wpn.key)
| ergodicbreak/evennia | evennia/contrib/tutorial_world/objects.py | Python | bsd-3-clause | 40,255 |
# -*- coding: utf-8 -*-
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
# limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
api_name = None
resource_name = None
resp_message = 'Good!'
resp_script = None
resp_success = True
resp_template = 'adminpanel/ap-test.html'
resp_type = 'tpl'
resp_render_data = None
make_function = None
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
| codeboy/coddy-sitetools | sitetools/coddy_api/api_resource.py | Python | bsd-3-clause | 1,729 |
from rest_framework.exceptions import MethodNotAllowed
from rest_framework.permissions import SAFE_METHODS, BasePermission
from olympia.amo import permissions
from olympia.access import acl
# Most of these classes come from zamboni, check out
# https://github.com/mozilla/zamboni/blob/master/mkt/api/permissions.py for
# more.
class GroupPermission(BasePermission):
"""
Allow access depending on the result of action_allowed_user().
"""
def __init__(self, permission):
self.permission = permission
def has_permission(self, request, view):
if not request.user.is_authenticated:
return False
return acl.action_allowed_user(request.user, self.permission)
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
def __call__(self, *a):
"""
ignore DRF's nonsensical need to call this object.
"""
return self
class AnyOf(BasePermission):
"""
Takes multiple permission objects and succeeds if any single one does.
"""
def __init__(self, *perms):
# DRF calls the items in permission_classes, might as well do
# it here too.
self.perms = [p() for p in perms]
def has_permission(self, request, view):
return any(perm.has_permission(request, view) for perm in self.perms)
def has_object_permission(self, request, view, obj):
# This method *must* call `has_permission` for each
# sub-permission since the default implementation of
# `has_object_permission` returns True unconditionally, and
# some permission objects might not override it.
return any((perm.has_permission(request, view) and
perm.has_object_permission(request, view, obj))
for perm in self.perms)
def __call__(self):
return self
class AllOf(BasePermission):
"""
Takes multiple permission objects and succeeds if all of them do.
"""
def __init__(self, *perms):
# DRF calls the items in permission_classes, might as well do
# it here too.
self.perms = [p() for p in perms]
def has_permission(self, request, view):
return all(perm.has_permission(request, view) for perm in self.perms)
def has_object_permission(self, request, view, obj):
# This method *must* call `has_permission` for each
# sub-permission since the default implementation of
# `has_object_permission` returns True unconditionally, and
# some permission objects might not override it.
return all((perm.has_permission(request, view) and
perm.has_object_permission(request, view, obj))
for perm in self.perms)
def __call__(self):
return self
class AllowNone(BasePermission):
def has_permission(self, request, view):
return False
def has_object_permission(self, request, view, obj):
return False
class AllowAddonAuthor(BasePermission):
"""Allow access if the user is in the object authors."""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return obj.authors.filter(pk=request.user.pk).exists()
class AllowOwner(BasePermission):
"""
Permission class to use when you are dealing with a model instance that has
a "user" FK pointing to an UserProfile, and you want only the corresponding
user to be able to access your instance.
"""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return ((obj == request.user) or
(getattr(obj, 'user', None) == request.user))
class AllowNotOwner(AllowOwner):
"""
Permission class to use when you are dealing with a model instance that has
a "user" FK pointing to an UserProfile, and you want only the corresponding
user to be able to access your instance.
"""
def has_object_permission(self, request, view, obj):
return not super().has_object_permission(request, view, obj)
class AllowReviewer(BasePermission):
"""Allow reviewers to access add-ons with listed versions.
The user logged in must either be making a read-only request and have the
'ReviewerTools:View' permission, or simply be a reviewer or admin.
The definition of an add-on reviewer depends on the object:
- For static themes, it's someone with 'Addons:ThemeReview'
- For personas, it's someone with 'Personas:Review'
- For the rest of the add-ons, is someone who has either
'Addons:Review', 'Addons:PostReview' or 'Addons:ContentReview'
permission.
"""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
can_access_because_viewer = (
request.method in SAFE_METHODS and
acl.action_allowed(request, permissions.REVIEWER_TOOLS_VIEW))
can_access_because_listed_reviewer = (
obj.has_listed_versions() and acl.is_reviewer(request, obj))
return can_access_because_viewer or can_access_because_listed_reviewer
class AllowReviewerUnlisted(AllowReviewer):
"""Allow unlisted reviewers to access add-ons with unlisted versions, or
add-ons with no listed versions at all.
Like reviewers.decorators.unlisted_addons_reviewer_required, but as a
permission class and not a decorator.
The user logged in must an unlisted add-on reviewer or admin.
An unlisted add-on reviewer is someone who is in the group with the
following permission: 'Addons:ReviewUnlisted'.
"""
def has_permission(self, request, view):
return acl.check_unlisted_addons_reviewer(request)
def has_object_permission(self, request, view, obj):
return (
(obj.has_unlisted_versions() or not obj.has_listed_versions()) and
self.has_permission(request, view))
class AllowAnyKindOfReviewer(BasePermission):
"""Allow access to any kind of reviewer. Use only for views that don't
alter add-on data.
Allows access to users with any of those permissions:
- ReviewerTools:View
- Addons:Review
- Addons:ReviewUnlisted
- Addons:ContentReview
- Addons:PostReview
- Personas:Review
Uses acl.is_user_any_kind_of_reviewer() behind the scenes.
See also any_reviewer_required() decorator.
"""
def has_permission(self, request, view):
return acl.is_user_any_kind_of_reviewer(request.user)
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
class AllowIfPublic(BasePermission):
"""
Allow access when the object's is_public() method returns True.
"""
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return (obj.is_public() and self.has_permission(request, view))
class AllowReadOnlyIfPublic(AllowIfPublic):
"""
Allow access when the object's is_public() method returns True and the
request HTTP method is GET/OPTIONS/HEAD.
"""
def has_permission(self, request, view):
return request.method in SAFE_METHODS
class ByHttpMethod(BasePermission):
"""
Permission class allowing you to define different permissions depending on
the HTTP method used.
method_permission is a dict with the lowercase http method names as keys,
permission classes (not instantiated, like DRF expects them) as values.
Warning: you probably want to define AllowAny for 'options' if you are
using a CORS-enabled endpoint.
If using this permission, any method that does not have a permission set
will raise MethodNotAllowed.
"""
def __init__(self, method_permissions):
# Initialize the permissions by calling them like DRF does.
self.method_permissions = {
method: perm() for method, perm in method_permissions.items()}
def has_permission(self, request, view):
try:
perm = self.method_permissions[request.method.lower()]
except KeyError:
raise MethodNotAllowed(request.method)
return perm.has_permission(request, view)
def has_object_permission(self, request, view, obj):
try:
perm = self.method_permissions[request.method.lower()]
except KeyError:
raise MethodNotAllowed(request.method)
return perm.has_object_permission(request, view, obj)
def __call__(self):
return self
class AllowRelatedObjectPermissions(BasePermission):
"""
Permission class that tests given permissions against a related object.
The first argument, `related_property`, is the property that will be used
to find the related object to test the permissions against.
The second argument, `related_permissions`, is the list of permissions
(behaving like DRF default implementation: all need to pass to be allowed).
"""
def __init__(self, related_property, related_permissions):
self.perms = [p() for p in related_permissions]
self.related_property = related_property
def has_permission(self, request, view):
return all(perm.has_permission(request, view) for perm in self.perms)
def has_object_permission(self, request, view, obj):
related_obj = getattr(obj, self.related_property)
return all(perm.has_object_permission(request, view, related_obj)
for perm in self.perms)
def __call__(self):
return self
class PreventActionPermission(BasePermission):
"""
Allow access except for a given action(s).
"""
def __init__(self, actions):
if not isinstance(actions, (list, tuple)):
actions = [actions]
self.actions = actions
def has_permission(self, request, view):
return getattr(view, 'action', '') not in self.actions
def has_object_permission(self, request, view, obj):
return True
def __call__(self, *a):
"""
ignore DRF's nonsensical need to call this object.
"""
return self
| kumar303/addons-server | src/olympia/api/permissions.py | Python | bsd-3-clause | 10,291 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_site(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
Site = apps.get_model('sites', 'Site')
db_alias = schema_editor.connection.alias
Site.objects.using(db_alias).get_or_create(
pk=1,
defaults= {
"pk": 1,
"domain": "us.pycon.org",
"name": "PyCon 2017"
}
)
class Migration(migrations.Migration):
dependencies = [
('conference', '0001_initial'),
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(create_site),
]
| SaptakS/pune.pycon.org | symposion/conference/migrations/0002_create_site.py | Python | bsd-3-clause | 753 |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/_fontdata.py
#$Header $
__version__=''' $Id: _fontdata.py 3052 2007-03-07 14:04:49Z rgbecker $ '''
__doc__="""
database of font related things
standardFonts tuple of the 14 standard string font names
standardEncodings tuple of the known standard font names
encodings a mapping object from standard encoding names (and minor variants)
to the encoding vectors ie the tuple of string glyph names
widthsByFontGlyph fontname x glyphname --> width of glyph
widthVectorsByFont fontName -> vector of widths
"""
import UserDict, os, sys
# mapping of name to width vector, starts empty until fonts are added
# e.g. widths['Courier'] = [...600,600,600,...]
widthVectorsByFont = {}
fontsByName = {}
fontsByBaseEnc = {}
# this is a list of the standard 14 font names in Acrobat Reader
standardFonts = (
'Courier', 'Courier-Bold', 'Courier-Oblique', 'Courier-BoldOblique',
'Helvetica', 'Helvetica-Bold', 'Helvetica-Oblique', 'Helvetica-BoldOblique',
'Times-Roman', 'Times-Bold', 'Times-Italic', 'Times-BoldItalic',
'Symbol','ZapfDingbats')
standardFontAttributes = {
#family, bold, italic defined for basic ones
'Courier':('Courier',0,0),
'Courier-Bold':('Courier',1,0),
'Courier-Oblique':('Courier',0,1),
'Courier-BoldOblique':('Courier',1,1),
'Helvetica':('Helvetica',0,0),
'Helvetica-Bold':('Helvetica',1,0),
'Helvetica-Oblique':('Helvetica',0,1),
'Helvetica-BoldOblique':('Helvetica',1,1),
'Times-Roman':('Times-Roman',0,0),
'Times-Bold':('Times-Roman',1,0),
'Times-Italic':('Times-Roman',0,1),
'Times-BoldItalic':('Times-Roman',1,1),
'Symbol':('Symbol',0,0),
'ZapfDingbats':('ZapfDingbats',0,0)
}
#this maps fontnames to the equivalent filename root.
_font2fnrMapWin32 = {
'symbol': 'Sy______',
'zapfdingbats': 'Zd______',
'helvetica': '_a______',
'helvetica-bold': '_ab_____',
'helvetica-boldoblique': '_abi____',
'helvetica-oblique': '_ai_____',
'times-bold': '_eb_____',
'times-bolditalic': '_ebi____',
'times-italic': '_ei_____',
'times-roman': '_er_____',
'courier-bold': 'cob_____',
'courier-boldoblique': 'cobo____',
'courier': 'com_____',
'courier-oblique': 'coo_____',
}
if sys.platform in ('linux2',):
_font2fnrMapLinux2 ={
'symbol': 'Symbol',
'zapfdingbats': 'ZapfDingbats',
'helvetica': 'Arial',
'helvetica-bold': 'Arial-Bold',
'helvetica-boldoblique': 'Arial-BoldItalic',
'helvetica-oblique': 'Arial-Italic',
'times-bold': 'TimesNewRoman-Bold',
'times-bolditalic':'TimesNewRoman-BoldItalic',
'times-italic': 'TimesNewRoman-Italic',
'times-roman': 'TimesNewRoman',
'courier-bold': 'Courier-Bold',
'courier-boldoblique': 'Courier-BoldOblique',
'courier': 'Courier',
'courier-oblique': 'Courier-Oblique',
}
_font2fnrMap = _font2fnrMapLinux2
for k, v in _font2fnrMap.items():
if k in _font2fnrMapWin32.keys():
_font2fnrMapWin32[v.lower()] = _font2fnrMapWin32[k]
del k, v
else:
_font2fnrMap = _font2fnrMapWin32
def _findFNR(fontName):
return _font2fnrMap[fontName.lower()]
from reportlab.rl_config import T1SearchPath
from reportlab.lib.utils import rl_isfile
def _searchT1Dirs(n,rl_isfile=rl_isfile,T1SearchPath=T1SearchPath):
assert T1SearchPath!=[], "No Type-1 font search path"
for d in T1SearchPath:
f = os.path.join(d,n)
if rl_isfile(f): return f
return None
del T1SearchPath, rl_isfile
def findT1File(fontName,ext='.pfb'):
if sys.platform in ('linux2',) and ext=='.pfb':
try:
f = _searchT1Dirs(_findFNR(fontName))
if f: return f
except:
pass
try:
f = _searchT1Dirs(_font2fnrMapWin32[fontName.lower()]+ext)
if f: return f
except:
pass
return _searchT1Dirs(_findFNR(fontName)+ext)
# this lists the predefined font encodings - WinAnsi and MacRoman. We have
# not added MacExpert - it's possible, but would complicate life and nobody
# is asking. StandardEncoding means something special.
standardEncodings = ('WinAnsiEncoding','MacRomanEncoding','StandardEncoding','SymbolEncoding','ZapfDingbatsEncoding','PDFDocEncoding', 'MacExpertEncoding')
#this is the global mapping of standard encodings to name vectors
class _Name2StandardEncodingMap(UserDict.UserDict):
'''Trivial fake dictionary with some [] magic'''
_XMap = {'winansi':'WinAnsiEncoding','macroman': 'MacRomanEncoding','standard':'StandardEncoding','symbol':'SymbolEncoding', 'zapfdingbats':'ZapfDingbatsEncoding','pdfdoc':'PDFDocEncoding', 'macexpert':'MacExpertEncoding'}
def __setitem__(self,x,v):
y = x.lower()
if y[-8:]=='encoding': y = y[:-8]
y = self._XMap[y]
if y in self.keys(): raise IndexError, 'Encoding %s is already set' % y
self.data[y] = v
def __getitem__(self,x):
y = x.lower()
if y[-8:]=='encoding': y = y[:-8]
y = self._XMap[y]
return self.data[y]
encodings = _Name2StandardEncodingMap()
encodings['WinAnsiEncoding'] = (
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 'space', 'exclam',
'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less',
'equal', 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', 'bracketright',
'asciicircum', 'underscore', 'grave', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
'asciitilde', 'bullet', 'Euro', 'bullet', 'quotesinglbase', 'florin',
'quotedblbase', 'ellipsis', 'dagger', 'daggerdbl', 'circumflex',
'perthousand', 'Scaron', 'guilsinglleft', 'OE', 'bullet', 'Zcaron',
'bullet', 'bullet', 'quoteleft', 'quoteright', 'quotedblleft',
'quotedblright', 'bullet', 'endash', 'emdash', 'tilde', 'trademark',
'scaron', 'guilsinglright', 'oe', 'bullet', 'zcaron', 'Ydieresis',
'space', 'exclamdown', 'cent', 'sterling', 'currency', 'yen', 'brokenbar',
'section', 'dieresis', 'copyright', 'ordfeminine', 'guillemotleft',
'logicalnot', 'hyphen', 'registered', 'macron', 'degree', 'plusminus',
'twosuperior', 'threesuperior', 'acute', 'mu', 'paragraph', 'periodcentered',
'cedilla', 'onesuperior', 'ordmasculine', 'guillemotright', 'onequarter',
'onehalf', 'threequarters', 'questiondown', 'Agrave', 'Aacute',
'Acircumflex', 'Atilde', 'Adieresis', 'Aring', 'AE', 'Ccedilla',
'Egrave', 'Eacute', 'Ecircumflex', 'Edieresis', 'Igrave', 'Iacute',
'Icircumflex', 'Idieresis', 'Eth', 'Ntilde', 'Ograve', 'Oacute',
'Ocircumflex', 'Otilde', 'Odieresis', 'multiply', 'Oslash', 'Ugrave',
'Uacute', 'Ucircumflex', 'Udieresis', 'Yacute', 'Thorn', 'germandbls',
'agrave', 'aacute', 'acircumflex', 'atilde', 'adieresis', 'aring', 'ae',
'ccedilla', 'egrave', 'eacute', 'ecircumflex', 'edieresis', 'igrave',
'iacute', 'icircumflex', 'idieresis', 'eth', 'ntilde', 'ograve', 'oacute',
'ocircumflex', 'otilde', 'odieresis', 'divide', 'oslash', 'ugrave', 'uacute',
'ucircumflex', 'udieresis', 'yacute', 'thorn', 'ydieresis')
encodings['MacRomanEncoding'] = (
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 'space', 'exclam',
'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand',
'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma',
'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less',
'equal', 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', 'bracketright',
'asciicircum', 'underscore', 'grave', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
'asciitilde', None, 'Adieresis', 'Aring', 'Ccedilla', 'Eacute',
'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex',
'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave',
'ecircumflex', 'edieresis', 'iacute', 'igrave', 'icircumflex',
'idieresis', 'ntilde', 'oacute', 'ograve', 'ocircumflex', 'odieresis',
'otilde', 'uacute', 'ugrave', 'ucircumflex', 'udieresis', 'dagger',
'degree', 'cent', 'sterling', 'section', 'bullet', 'paragraph',
'germandbls', 'registered', 'copyright', 'trademark', 'acute',
'dieresis', None, 'AE', 'Oslash', None, 'plusminus', None, None, 'yen',
'mu', None, None, None, None, None, 'ordfeminine', 'ordmasculine', None,
'ae', 'oslash', 'questiondown', 'exclamdown', 'logicalnot', None, 'florin',
None, None, 'guillemotleft', 'guillemotright', 'ellipsis', 'space', 'Agrave',
'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', 'quotedblleft',
'quotedblright', 'quoteleft', 'quoteright', 'divide', None, 'ydieresis',
'Ydieresis', 'fraction', 'currency', 'guilsinglleft', 'guilsinglright',
'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase',
'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute',
'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave',
'Oacute', 'Ocircumflex', None, 'Ograve', 'Uacute', 'Ucircumflex',
'Ugrave', 'dotlessi', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron')
encodings['SymbolEncoding']=(None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 'space',
'exclam', 'universal', 'numbersign', 'existential', 'percent', 'ampersand', 'suchthat',
'parenleft', 'parenright', 'asteriskmath', 'plus', 'comma', 'minus', 'period', 'slash', 'zero',
'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon',
'less', 'equal', 'greater', 'question', 'congruent', 'Alpha', 'Beta', 'Chi', 'Delta', 'Epsilon',
'Phi', 'Gamma', 'Eta', 'Iota', 'theta1', 'Kappa', 'Lambda', 'Mu', 'Nu', 'Omicron', 'Pi', 'Theta',
'Rho', 'Sigma', 'Tau', 'Upsilon', 'sigma1', 'Omega', 'Xi', 'Psi', 'Zeta', 'bracketleft',
'therefore', 'bracketright', 'perpendicular', 'underscore', 'radicalex', 'alpha', 'beta', 'chi',
'delta', 'epsilon', 'phi', 'gamma', 'eta', 'iota', 'phi1', 'kappa', 'lambda', 'mu', 'nu',
'omicron', 'pi', 'theta', 'rho', 'sigma', 'tau', 'upsilon', 'omega1', 'omega', 'xi', 'psi', 'zeta',
'braceleft', 'bar', 'braceright', 'similar', None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 'Euro', 'Upsilon1', 'minute', 'lessequal',
'fraction', 'infinity', 'florin', 'club', 'diamond', 'heart', 'spade', 'arrowboth', 'arrowleft',
'arrowup', 'arrowright', 'arrowdown', 'degree', 'plusminus', 'second', 'greaterequal', 'multiply',
'proportional', 'partialdiff', 'bullet', 'divide', 'notequal', 'equivalence', 'approxequal',
'ellipsis', 'arrowvertex', 'arrowhorizex', 'carriagereturn', 'aleph', 'Ifraktur', 'Rfraktur',
'weierstrass', 'circlemultiply', 'circleplus', 'emptyset', 'intersection', 'union',
'propersuperset', 'reflexsuperset', 'notsubset', 'propersubset', 'reflexsubset', 'element',
'notelement', 'angle', 'gradient', 'registerserif', 'copyrightserif', 'trademarkserif', 'product',
'radical', 'dotmath', 'logicalnot', 'logicaland', 'logicalor', 'arrowdblboth', 'arrowdblleft',
'arrowdblup', 'arrowdblright', 'arrowdbldown', 'lozenge', 'angleleft', 'registersans',
'copyrightsans', 'trademarksans', 'summation', 'parenlefttp', 'parenleftex', 'parenleftbt',
'bracketlefttp', 'bracketleftex', 'bracketleftbt', 'bracelefttp', 'braceleftmid', 'braceleftbt',
'braceex', None, 'angleright', 'integral', 'integraltp', 'integralex', 'integralbt',
'parenrighttp', 'parenrightex', 'parenrightbt', 'bracketrighttp', 'bracketrightex',
'bracketrightbt', 'bracerighttp', 'bracerightmid', 'bracerightbt', None)
encodings['ZapfDingbatsEncoding'] = ( None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
'space', 'a1', 'a2', 'a202', 'a3', 'a4', 'a5', 'a119', 'a118', 'a117', 'a11', 'a12', 'a13', 'a14',
'a15', 'a16', 'a105', 'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27',
'a28', 'a6', 'a7', 'a8', 'a9', 'a10', 'a29', 'a30', 'a31', 'a32', 'a33', 'a34', 'a35', 'a36',
'a37', 'a38', 'a39', 'a40', 'a41', 'a42', 'a43', 'a44', 'a45', 'a46', 'a47', 'a48', 'a49', 'a50',
'a51', 'a52', 'a53', 'a54', 'a55', 'a56', 'a57', 'a58', 'a59', 'a60', 'a61', 'a62', 'a63', 'a64',
'a65', 'a66', 'a67', 'a68', 'a69', 'a70', 'a71', 'a72', 'a73', 'a74', 'a203', 'a75', 'a204', 'a76',
'a77', 'a78', 'a79', 'a81', 'a82', 'a83', 'a84', 'a97', 'a98', 'a99', 'a100', None, 'a89', 'a90',
'a93', 'a94', 'a91', 'a92', 'a205', 'a85', 'a206', 'a86', 'a87', 'a88', 'a95', 'a96', None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, 'a101', 'a102', 'a103', 'a104', 'a106', 'a107', 'a108', 'a112', 'a111', 'a110', 'a109',
'a120', 'a121', 'a122', 'a123', 'a124', 'a125', 'a126', 'a127', 'a128', 'a129', 'a130', 'a131',
'a132', 'a133', 'a134', 'a135', 'a136', 'a137', 'a138', 'a139', 'a140', 'a141', 'a142', 'a143',
'a144', 'a145', 'a146', 'a147', 'a148', 'a149', 'a150', 'a151', 'a152', 'a153', 'a154', 'a155',
'a156', 'a157', 'a158', 'a159', 'a160', 'a161', 'a163', 'a164', 'a196', 'a165', 'a192', 'a166',
'a167', 'a168', 'a169', 'a170', 'a171', 'a172', 'a173', 'a162', 'a174', 'a175', 'a176', 'a177',
'a178', 'a179', 'a193', 'a180', 'a199', 'a181', 'a200', 'a182', None, 'a201', 'a183', 'a184',
'a197', 'a185', 'a194', 'a198', 'a186', 'a195', 'a187', 'a188', 'a189', 'a190', 'a191', None)
encodings['StandardEncoding']=(None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,"space","exclam",
"quotedbl","numbersign","dollar","percent","ampersand","quoteright","parenleft","parenright","asterisk","plus",
"comma","hyphen","period","slash","zero","one","two","three","four","five","six","seven","eight","nine","colon",
"semicolon","less","equal","greater","question","at","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O",
"P","Q","R","S","T","U","V","W","X","Y","Z","bracketleft","backslash","bracketright","asciicircum","underscore",
"quoteleft","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y",
"z","braceleft","bar","braceright","asciitilde",None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,
None,None,None,"exclamdown","cent","sterling","fraction","yen","florin","section","currency","quotesingle","quotedblleft",
"guillemotleft","guilsinglleft","guilsinglright","fi","fl",None,"endash","dagger","daggerdbl","periodcentered",None,
"paragraph","bullet","quotesinglbase","quotedblbase","quotedblright","guillemotright","ellipsis","perthousand",
None,"questiondown",None,"grave","acute","circumflex","tilde","macron","breve","dotaccent","dieresis",None,"ring",
"cedilla",None,"hungarumlaut","ogonek","caron","emdash",None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,"AE",None,"ordfeminine",
None,None,None,None,"Lslash","Oslash","OE","ordmasculine",None,None,None,None,None,"ae",None,None,None,"dotlessi",None,None,"lslash","oslash",
"oe","germandbls",None,None,None,None)
encodings['PDFDocEncoding']=(None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,
None,None,None,None,None,"breve","caron","circumflex",
"dotaccent","hungarumlaut","ogonek","ring","tilde","space","exclam","quotedbl","numbersign","dollar","percent",
"ampersand","quotesingle","parenleft","parenright","asterisk","plus","comma","hyphen","period","slash","zero",
"one","two","three","four","five","six","seven","eight","nine","colon","semicolon","less","equal","greater",
"question","at","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X",
"Y","Z","bracketleft","backslash","bracketright","asciicircum","underscore","grave","a","b","c","d","e","f","g",
"h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","braceleft","bar","braceright",
"asciitilde",None,"bullet","dagger","daggerdbl","ellipsis","emdash","endash","florin","fraction","guilsinglleft",
"guilsinglright","minus","perthousand","quotedblbase","quotedblleft","quotedblright","quoteleft","quoteright",
"quotesinglbase","trademark","fi","fl","Lslash","OE","Scaron","Ydieresis","Zcaron","dotlessi","lslash","oe",
"scaron","zcaron",None,"Euro","exclamdown","cent","sterling","currency","yen","brokenbar","section","dieresis",
"copyright","ordfeminine","guillemotleft","logicalnot",None,"registered","macron","degree","plusminus","twosuperior",
"threesuperior","acute","mu","paragraph","periodcentered","cedilla","onesuperior","ordmasculine","guillemotright",
"onequarter","onehalf","threequarters","questiondown","Agrave","Aacute","Acircumflex","Atilde","Adieresis","Aring",
"AE","Ccedilla","Egrave","Eacute","Ecircumflex","Edieresis","Igrave","Iacute","Icircumflex","Idieresis","Eth",
"Ntilde","Ograve","Oacute","Ocircumflex","Otilde","Odieresis","multiply","Oslash","Ugrave","Uacute","Ucircumflex",
"Udieresis","Yacute","Thorn","germandbls","agrave","aacute","acircumflex","atilde","adieresis","aring","ae",
"ccedilla","egrave","eacute","ecircumflex","edieresis","igrave","iacute","icircumflex","idieresis","eth","ntilde",
"ograve","oacute","ocircumflex","otilde","odieresis","divide","oslash","ugrave","uacute","ucircumflex","udieresis",
"yacute","thorn","ydieresis")
encodings['MacExpertEncoding'] = (None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
'space', 'exclamsmall', 'Hungarumlautsmall', 'centoldstyle', 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall',
'Acutesmall', 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', 'comma', 'hyphen',
'period', 'fraction', 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', 'nineoldstyle', 'colon', 'semicolon', None,
'threequartersemdash', None, 'questionsmall', None, None, None, None, 'Ethsmall', None, None, 'onequarter',
'onehalf', 'threequarters', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', 'onethird', 'twothirds',
None, None, None, None, None, None, 'ff', 'fi', 'fl', 'ffi', 'ffl', 'parenleftinferior', None,
'parenrightinferior', 'Circumflexsmall', 'hypheninferior', 'Gravesmall', 'Asmall', 'Bsmall', 'Csmall', 'Dsmall',
'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall',
'Psmall', 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall', 'Zsmall',
'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', None, None, 'asuperior', 'centsuperior', None, None, None,
None, 'Aacutesmall', 'Agravesmall', 'Acircumflexsmall', 'Adieresissmall', 'Atildesmall', 'Aringsmall',
'Ccedillasmall', 'Eacutesmall', 'Egravesmall', 'Ecircumflexsmall', 'Edieresissmall', 'Iacutesmall', 'Igravesmall',
'Icircumflexsmall', 'Idieresissmall', 'Ntildesmall', 'Oacutesmall', 'Ogravesmall', 'Ocircumflexsmall',
'Odieresissmall', 'Otildesmall', 'Uacutesmall', 'Ugravesmall', 'Ucircumflexsmall', 'Udieresissmall', None,
'eightsuperior', 'fourinferior', 'threeinferior', 'sixinferior', 'eightinferior', 'seveninferior', 'Scaronsmall',
None, 'centinferior', 'twoinferior', None, 'Dieresissmall', None, 'Caronsmall', 'osuperior', 'fiveinferior', None,
'commainferior', 'periodinferior', 'Yacutesmall', None, 'dollarinferior', None, None, 'Thornsmall', None,
'nineinferior', 'zeroinferior', 'Zcaronsmall', 'AEsmall', 'Oslashsmall', 'questiondownsmall', 'oneinferior',
'Lslashsmall', None, None, None, None, None, None, 'Cedillasmall', None, None, None, None, None, 'OEsmall',
'figuredash', 'hyphensuperior', None, None, None, None, 'exclamdownsmall', None, 'Ydieresissmall', None,
'onesuperior', 'twosuperior', 'threesuperior', 'foursuperior', 'fivesuperior', 'sixsuperior', 'sevensuperior',
'ninesuperior', 'zerosuperior', None, 'esuperior', 'rsuperior', 'tsuperior', None, None, 'isuperior', 'ssuperior',
'dsuperior', None, None, None, None, None, 'lsuperior', 'Ogoneksmall', 'Brevesmall', 'Macronsmall', 'bsuperior',
'nsuperior', 'msuperior', 'commasuperior', 'periodsuperior', 'Dotaccentsmall', 'Ringsmall', None, None, None, None)
ascent_descent = {
'Courier': (629, -157),
'Courier-Bold': (626, -142),
'Courier-BoldOblique': (626, -142),
'Courier-Oblique': (629, -157),
'Helvetica': (718, -207),
'Helvetica-Bold': (718, -207),
'Helvetica-BoldOblique': (718, -207),
'Helvetica-Oblique': (718, -207),
'Times-Roman': (683, -217),
'Times-Bold': (676, -205),
'Times-BoldItalic': (699, -205),
'Times-Italic': (683, -205),
'Symbol': (0, 0),
'ZapfDingbats': (0, 0)
}
# nuild this up one entry at a time to stay under JPython's 64k limit.
widthsByFontGlyph = {}
widthsByFontGlyph['Helvetica'] = {'A': 667,
'AE': 1000,
'Aacute': 667,
'Acircumflex': 667,
'Adieresis': 667,
'Agrave': 667,
'Aring': 667,
'Atilde': 667,
'B': 667,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 500,
'K': 667,
'L': 556,
'Lslash': 556,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 667,
'aring': 556,
'asciicircum': 469,
'asciitilde': 584,
'asterisk': 389,
'at': 1015,
'atilde': 556,
'b': 556,
'backslash': 278,
'bar': 260,
'braceleft': 334,
'braceright': 334,
'bracketleft': 278,
'bracketright': 278,
'breve': 333,
'brokenbar': 260,
'bullet': 350,
'c': 500,
'caron': 333,
'ccedilla': 500,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 278,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 556,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 556,
'exclam': 278,
'exclamdown': 333,
'f': 278,
'fi': 500,
'five': 556,
'fl': 500,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 556,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 556,
'hungarumlaut': 333,
'hyphen': 333,
'i': 222,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 222,
'k': 500,
'l': 222,
'less': 584,
'logicalnot': 584,
'lslash': 222,
'm': 833,
'macron': 333,
'minus': 584,
'mu': 556,
'multiply': 584,
'n': 556,
'nine': 556,
'ntilde': 556,
'numbersign': 556,
'o': 556,
'oacute': 556,
'ocircumflex': 556,
'odieresis': 556,
'oe': 944,
'ogonek': 333,
'ograve': 556,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 556,
'p': 556,
'paragraph': 537,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 556,
'question': 556,
'questiondown': 611,
'quotedbl': 355,
'quotedblbase': 333,
'quotedblleft': 333,
'quotedblright': 333,
'quoteleft': 222,
'quoteright': 222,
'quotesinglbase': 222,
'quotesingle': 191,
'r': 333,
'registered': 737,
'ring': 333,
's': 500,
'scaron': 500,
'section': 556,
'semicolon': 278,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 278,
'thorn': 556,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 556,
'uacute': 556,
'ucircumflex': 556,
'udieresis': 556,
'ugrave': 556,
'underscore': 556,
'v': 500,
'w': 722,
'x': 500,
'y': 500,
'yacute': 500,
'ydieresis': 500,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
widthsByFontGlyph['Helvetica-Bold'] = {'A': 722,
'AE': 1000,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 722,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 556,
'K': 722,
'L': 611,
'Lslash': 611,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 722,
'aring': 556,
'asciicircum': 584,
'asciitilde': 584,
'asterisk': 389,
'at': 975,
'atilde': 556,
'b': 611,
'backslash': 278,
'bar': 280,
'braceleft': 389,
'braceright': 389,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 280,
'bullet': 350,
'c': 556,
'caron': 333,
'ccedilla': 556,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 333,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 611,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 611,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 611,
'five': 556,
'fl': 611,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 611,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 611,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 556,
'l': 278,
'less': 584,
'logicalnot': 584,
'lslash': 278,
'm': 889,
'macron': 333,
'minus': 584,
'mu': 611,
'multiply': 584,
'n': 611,
'nine': 556,
'ntilde': 611,
'numbersign': 556,
'o': 611,
'oacute': 611,
'ocircumflex': 611,
'odieresis': 611,
'oe': 944,
'ogonek': 333,
'ograve': 611,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 611,
'p': 611,
'paragraph': 556,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 611,
'question': 611,
'questiondown': 611,
'quotedbl': 474,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 278,
'quoteright': 278,
'quotesinglbase': 278,
'quotesingle': 238,
'r': 389,
'registered': 737,
'ring': 333,
's': 556,
'scaron': 556,
'section': 556,
'semicolon': 333,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 333,
'thorn': 611,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 611,
'uacute': 611,
'ucircumflex': 611,
'udieresis': 611,
'ugrave': 611,
'underscore': 556,
'v': 556,
'w': 778,
'x': 556,
'y': 556,
'yacute': 556,
'ydieresis': 556,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
widthsByFontGlyph['Helvetica-Oblique'] = {'A': 667,
'AE': 1000,
'Aacute': 667,
'Acircumflex': 667,
'Adieresis': 667,
'Agrave': 667,
'Aring': 667,
'Atilde': 667,
'B': 667,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 500,
'K': 667,
'L': 556,
'Lslash': 556,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 667,
'aring': 556,
'asciicircum': 469,
'asciitilde': 584,
'asterisk': 389,
'at': 1015,
'atilde': 556,
'b': 556,
'backslash': 278,
'bar': 260,
'braceleft': 334,
'braceright': 334,
'bracketleft': 278,
'bracketright': 278,
'breve': 333,
'brokenbar': 260,
'bullet': 350,
'c': 500,
'caron': 333,
'ccedilla': 500,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 278,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 556,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 556,
'exclam': 278,
'exclamdown': 333,
'f': 278,
'fi': 500,
'five': 556,
'fl': 500,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 556,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 556,
'hungarumlaut': 333,
'hyphen': 333,
'i': 222,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 222,
'k': 500,
'l': 222,
'less': 584,
'logicalnot': 584,
'lslash': 222,
'm': 833,
'macron': 333,
'minus': 584,
'mu': 556,
'multiply': 584,
'n': 556,
'nine': 556,
'ntilde': 556,
'numbersign': 556,
'o': 556,
'oacute': 556,
'ocircumflex': 556,
'odieresis': 556,
'oe': 944,
'ogonek': 333,
'ograve': 556,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 556,
'p': 556,
'paragraph': 537,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 556,
'question': 556,
'questiondown': 611,
'quotedbl': 355,
'quotedblbase': 333,
'quotedblleft': 333,
'quotedblright': 333,
'quoteleft': 222,
'quoteright': 222,
'quotesinglbase': 222,
'quotesingle': 191,
'r': 333,
'registered': 737,
'ring': 333,
's': 500,
'scaron': 500,
'section': 556,
'semicolon': 278,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 278,
'thorn': 556,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 556,
'uacute': 556,
'ucircumflex': 556,
'udieresis': 556,
'ugrave': 556,
'underscore': 556,
'v': 500,
'w': 722,
'x': 500,
'y': 500,
'yacute': 500,
'ydieresis': 500,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
widthsByFontGlyph['Helvetica-BoldOblique'] = {'A': 722,
'AE': 1000,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 722,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 556,
'F': 611,
'G': 778,
'H': 722,
'I': 278,
'Iacute': 278,
'Icircumflex': 278,
'Idieresis': 278,
'Igrave': 278,
'J': 556,
'K': 722,
'L': 611,
'Lslash': 611,
'M': 833,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 667,
'Q': 778,
'R': 722,
'S': 667,
'Scaron': 667,
'T': 611,
'Thorn': 667,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 944,
'X': 667,
'Y': 667,
'Yacute': 667,
'Ydieresis': 667,
'Z': 611,
'Zcaron': 611,
'a': 556,
'aacute': 556,
'acircumflex': 556,
'acute': 333,
'adieresis': 556,
'ae': 889,
'agrave': 556,
'ampersand': 722,
'aring': 556,
'asciicircum': 584,
'asciitilde': 584,
'asterisk': 389,
'at': 975,
'atilde': 556,
'b': 611,
'backslash': 278,
'bar': 280,
'braceleft': 389,
'braceright': 389,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 280,
'bullet': 350,
'c': 556,
'caron': 333,
'ccedilla': 556,
'cedilla': 333,
'cent': 556,
'circumflex': 333,
'colon': 333,
'comma': 278,
'copyright': 737,
'currency': 556,
'd': 611,
'dagger': 556,
'daggerdbl': 556,
'degree': 400,
'dieresis': 333,
'divide': 584,
'dollar': 556,
'dotaccent': 333,
'dotlessi': 278,
'e': 556,
'eacute': 556,
'ecircumflex': 556,
'edieresis': 556,
'egrave': 556,
'eight': 556,
'ellipsis': 1000,
'emdash': 1000,
'endash': 556,
'equal': 584,
'eth': 611,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 611,
'five': 556,
'fl': 611,
'florin': 556,
'four': 556,
'fraction': 167,
'g': 611,
'germandbls': 611,
'grave': 333,
'greater': 584,
'guillemotleft': 556,
'guillemotright': 556,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 611,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 556,
'l': 278,
'less': 584,
'logicalnot': 584,
'lslash': 278,
'm': 889,
'macron': 333,
'minus': 584,
'mu': 611,
'multiply': 584,
'n': 611,
'nine': 556,
'ntilde': 611,
'numbersign': 556,
'o': 611,
'oacute': 611,
'ocircumflex': 611,
'odieresis': 611,
'oe': 944,
'ogonek': 333,
'ograve': 611,
'one': 556,
'onehalf': 834,
'onequarter': 834,
'onesuperior': 333,
'ordfeminine': 370,
'ordmasculine': 365,
'oslash': 611,
'otilde': 611,
'p': 611,
'paragraph': 556,
'parenleft': 333,
'parenright': 333,
'percent': 889,
'period': 278,
'periodcentered': 278,
'perthousand': 1000,
'plus': 584,
'plusminus': 584,
'q': 611,
'question': 611,
'questiondown': 611,
'quotedbl': 474,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 278,
'quoteright': 278,
'quotesinglbase': 278,
'quotesingle': 238,
'r': 389,
'registered': 737,
'ring': 333,
's': 556,
'scaron': 556,
'section': 556,
'semicolon': 333,
'seven': 556,
'six': 556,
'slash': 278,
'space': 278,
'sterling': 556,
't': 333,
'thorn': 611,
'three': 556,
'threequarters': 834,
'threesuperior': 333,
'tilde': 333,
'trademark': 1000,
'two': 556,
'twosuperior': 333,
'u': 611,
'uacute': 611,
'ucircumflex': 611,
'udieresis': 611,
'ugrave': 611,
'underscore': 556,
'v': 556,
'w': 778,
'x': 556,
'y': 556,
'yacute': 556,
'ydieresis': 556,
'yen': 556,
'z': 500,
'zcaron': 500,
'zero': 556}
# Courier can be expressed more compactly!
_w = {}
for charname in widthsByFontGlyph['Helvetica'].keys():
_w[charname] = 600
widthsByFontGlyph['Courier'] = _w
widthsByFontGlyph['Courier-Bold'] = _w
widthsByFontGlyph['Courier-Oblique'] = _w
widthsByFontGlyph['Courier-BoldOblique'] = _w
widthsByFontGlyph['Times-Roman'] = {'A': 722,
'AE': 889,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 667,
'C': 667,
'Ccedilla': 667,
'D': 722,
'E': 611,
'Eacute': 611,
'Ecircumflex': 611,
'Edieresis': 611,
'Egrave': 611,
'Eth': 722,
'Euro': 500,
'F': 556,
'G': 722,
'H': 722,
'I': 333,
'Iacute': 333,
'Icircumflex': 333,
'Idieresis': 333,
'Igrave': 333,
'J': 389,
'K': 722,
'L': 611,
'Lslash': 611,
'M': 889,
'N': 722,
'Ntilde': 722,
'O': 722,
'OE': 889,
'Oacute': 722,
'Ocircumflex': 722,
'Odieresis': 722,
'Ograve': 722,
'Oslash': 722,
'Otilde': 722,
'P': 556,
'Q': 722,
'R': 667,
'S': 556,
'Scaron': 556,
'T': 611,
'Thorn': 556,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 722,
'W': 944,
'X': 722,
'Y': 722,
'Yacute': 722,
'Ydieresis': 722,
'Z': 611,
'Zcaron': 611,
'a': 444,
'aacute': 444,
'acircumflex': 444,
'acute': 333,
'adieresis': 444,
'ae': 667,
'agrave': 444,
'ampersand': 778,
'aring': 444,
'asciicircum': 469,
'asciitilde': 541,
'asterisk': 500,
'at': 921,
'atilde': 444,
'b': 500,
'backslash': 278,
'bar': 200,
'braceleft': 480,
'braceright': 480,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 200,
'bullet': 350,
'c': 444,
'caron': 333,
'ccedilla': 444,
'cedilla': 333,
'cent': 500,
'circumflex': 333,
'colon': 278,
'comma': 250,
'copyright': 760,
'currency': 500,
'd': 500,
'dagger': 500,
'daggerdbl': 500,
'degree': 400,
'dieresis': 333,
'divide': 564,
'dollar': 500,
'dotaccent': 333,
'dotlessi': 278,
'e': 444,
'eacute': 444,
'ecircumflex': 444,
'edieresis': 444,
'egrave': 444,
'eight': 500,
'ellipsis': 1000,
'emdash': 1000,
'endash': 500,
'equal': 564,
'eth': 500,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 556,
'five': 500,
'fl': 556,
'florin': 500,
'four': 500,
'fraction': 167,
'g': 500,
'germandbls': 500,
'grave': 333,
'greater': 564,
'guillemotleft': 500,
'guillemotright': 500,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 500,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 500,
'l': 278,
'less': 564,
'logicalnot': 564,
'lslash': 278,
'm': 778,
'macron': 333,
'minus': 564,
'mu': 500,
'multiply': 564,
'n': 500,
'nine': 500,
'ntilde': 500,
'numbersign': 500,
'o': 500,
'oacute': 500,
'ocircumflex': 500,
'odieresis': 500,
'oe': 722,
'ogonek': 333,
'ograve': 500,
'one': 500,
'onehalf': 750,
'onequarter': 750,
'onesuperior': 300,
'ordfeminine': 276,
'ordmasculine': 310,
'oslash': 500,
'otilde': 500,
'p': 500,
'paragraph': 453,
'parenleft': 333,
'parenright': 333,
'percent': 833,
'period': 250,
'periodcentered': 250,
'perthousand': 1000,
'plus': 564,
'plusminus': 564,
'q': 500,
'question': 444,
'questiondown': 444,
'quotedbl': 408,
'quotedblbase': 444,
'quotedblleft': 444,
'quotedblright': 444,
'quoteleft': 333,
'quoteright': 333,
'quotesinglbase': 333,
'quotesingle': 180,
'r': 333,
'registered': 760,
'ring': 333,
's': 389,
'scaron': 389,
'section': 500,
'semicolon': 278,
'seven': 500,
'six': 500,
'slash': 278,
'space': 250,
'sterling': 500,
't': 278,
'thorn': 500,
'three': 500,
'threequarters': 750,
'threesuperior': 300,
'tilde': 333,
'trademark': 980,
'two': 500,
'twosuperior': 300,
'u': 500,
'uacute': 500,
'ucircumflex': 500,
'udieresis': 500,
'ugrave': 500,
'underscore': 500,
'v': 500,
'w': 722,
'x': 500,
'y': 500,
'yacute': 500,
'ydieresis': 500,
'yen': 500,
'z': 444,
'zcaron': 444,
'zero': 500}
widthsByFontGlyph['Times-Bold'] = {'A': 722,
'AE': 1000,
'Aacute': 722,
'Acircumflex': 722,
'Adieresis': 722,
'Agrave': 722,
'Aring': 722,
'Atilde': 722,
'B': 667,
'C': 722,
'Ccedilla': 722,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 500,
'F': 611,
'G': 778,
'H': 778,
'I': 389,
'Iacute': 389,
'Icircumflex': 389,
'Idieresis': 389,
'Igrave': 389,
'J': 500,
'K': 778,
'L': 667,
'Lslash': 667,
'M': 944,
'N': 722,
'Ntilde': 722,
'O': 778,
'OE': 1000,
'Oacute': 778,
'Ocircumflex': 778,
'Odieresis': 778,
'Ograve': 778,
'Oslash': 778,
'Otilde': 778,
'P': 611,
'Q': 778,
'R': 722,
'S': 556,
'Scaron': 556,
'T': 667,
'Thorn': 611,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 722,
'W': 1000,
'X': 722,
'Y': 722,
'Yacute': 722,
'Ydieresis': 722,
'Z': 667,
'Zcaron': 667,
'a': 500,
'aacute': 500,
'acircumflex': 500,
'acute': 333,
'adieresis': 500,
'ae': 722,
'agrave': 500,
'ampersand': 833,
'aring': 500,
'asciicircum': 581,
'asciitilde': 520,
'asterisk': 500,
'at': 930,
'atilde': 500,
'b': 556,
'backslash': 278,
'bar': 220,
'braceleft': 394,
'braceright': 394,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 220,
'bullet': 350,
'c': 444,
'caron': 333,
'ccedilla': 444,
'cedilla': 333,
'cent': 500,
'circumflex': 333,
'colon': 333,
'comma': 250,
'copyright': 747,
'currency': 500,
'd': 556,
'dagger': 500,
'daggerdbl': 500,
'degree': 400,
'dieresis': 333,
'divide': 570,
'dollar': 500,
'dotaccent': 333,
'dotlessi': 278,
'e': 444,
'eacute': 444,
'ecircumflex': 444,
'edieresis': 444,
'egrave': 444,
'eight': 500,
'ellipsis': 1000,
'emdash': 1000,
'endash': 500,
'equal': 570,
'eth': 500,
'exclam': 333,
'exclamdown': 333,
'f': 333,
'fi': 556,
'five': 500,
'fl': 556,
'florin': 500,
'four': 500,
'fraction': 167,
'g': 500,
'germandbls': 556,
'grave': 333,
'greater': 570,
'guillemotleft': 500,
'guillemotright': 500,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 556,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 333,
'k': 556,
'l': 278,
'less': 570,
'logicalnot': 570,
'lslash': 278,
'm': 833,
'macron': 333,
'minus': 570,
'mu': 556,
'multiply': 570,
'n': 556,
'nine': 500,
'ntilde': 556,
'numbersign': 500,
'o': 500,
'oacute': 500,
'ocircumflex': 500,
'odieresis': 500,
'oe': 722,
'ogonek': 333,
'ograve': 500,
'one': 500,
'onehalf': 750,
'onequarter': 750,
'onesuperior': 300,
'ordfeminine': 300,
'ordmasculine': 330,
'oslash': 500,
'otilde': 500,
'p': 556,
'paragraph': 540,
'parenleft': 333,
'parenright': 333,
'percent': 1000,
'period': 250,
'periodcentered': 250,
'perthousand': 1000,
'plus': 570,
'plusminus': 570,
'q': 556,
'question': 500,
'questiondown': 500,
'quotedbl': 555,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 333,
'quoteright': 333,
'quotesinglbase': 333,
'quotesingle': 278,
'r': 444,
'registered': 747,
'ring': 333,
's': 389,
'scaron': 389,
'section': 500,
'semicolon': 333,
'seven': 500,
'six': 500,
'slash': 278,
'space': 250,
'sterling': 500,
't': 333,
'thorn': 556,
'three': 500,
'threequarters': 750,
'threesuperior': 300,
'tilde': 333,
'trademark': 1000,
'two': 500,
'twosuperior': 300,
'u': 556,
'uacute': 556,
'ucircumflex': 556,
'udieresis': 556,
'ugrave': 556,
'underscore': 500,
'v': 500,
'w': 722,
'x': 500,
'y': 500,
'yacute': 500,
'ydieresis': 500,
'yen': 500,
'z': 444,
'zcaron': 444,
'zero': 500}
widthsByFontGlyph['Times-Italic'] = {'A': 611,
'AE': 889,
'Aacute': 611,
'Acircumflex': 611,
'Adieresis': 611,
'Agrave': 611,
'Aring': 611,
'Atilde': 611,
'B': 611,
'C': 667,
'Ccedilla': 667,
'D': 722,
'E': 611,
'Eacute': 611,
'Ecircumflex': 611,
'Edieresis': 611,
'Egrave': 611,
'Eth': 722,
'Euro': 500,
'F': 611,
'G': 722,
'H': 722,
'I': 333,
'Iacute': 333,
'Icircumflex': 333,
'Idieresis': 333,
'Igrave': 333,
'J': 444,
'K': 667,
'L': 556,
'Lslash': 556,
'M': 833,
'N': 667,
'Ntilde': 667,
'O': 722,
'OE': 944,
'Oacute': 722,
'Ocircumflex': 722,
'Odieresis': 722,
'Ograve': 722,
'Oslash': 722,
'Otilde': 722,
'P': 611,
'Q': 722,
'R': 611,
'S': 500,
'Scaron': 500,
'T': 556,
'Thorn': 611,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 611,
'W': 833,
'X': 611,
'Y': 556,
'Yacute': 556,
'Ydieresis': 556,
'Z': 556,
'Zcaron': 556,
'a': 500,
'aacute': 500,
'acircumflex': 500,
'acute': 333,
'adieresis': 500,
'ae': 667,
'agrave': 500,
'ampersand': 778,
'aring': 500,
'asciicircum': 422,
'asciitilde': 541,
'asterisk': 500,
'at': 920,
'atilde': 500,
'b': 500,
'backslash': 278,
'bar': 275,
'braceleft': 400,
'braceright': 400,
'bracketleft': 389,
'bracketright': 389,
'breve': 333,
'brokenbar': 275,
'bullet': 350,
'c': 444,
'caron': 333,
'ccedilla': 444,
'cedilla': 333,
'cent': 500,
'circumflex': 333,
'colon': 333,
'comma': 250,
'copyright': 760,
'currency': 500,
'd': 500,
'dagger': 500,
'daggerdbl': 500,
'degree': 400,
'dieresis': 333,
'divide': 675,
'dollar': 500,
'dotaccent': 333,
'dotlessi': 278,
'e': 444,
'eacute': 444,
'ecircumflex': 444,
'edieresis': 444,
'egrave': 444,
'eight': 500,
'ellipsis': 889,
'emdash': 889,
'endash': 500,
'equal': 675,
'eth': 500,
'exclam': 333,
'exclamdown': 389,
'f': 278,
'fi': 500,
'five': 500,
'fl': 500,
'florin': 500,
'four': 500,
'fraction': 167,
'g': 500,
'germandbls': 500,
'grave': 333,
'greater': 675,
'guillemotleft': 500,
'guillemotright': 500,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 500,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 444,
'l': 278,
'less': 675,
'logicalnot': 675,
'lslash': 278,
'm': 722,
'macron': 333,
'minus': 675,
'mu': 500,
'multiply': 675,
'n': 500,
'nine': 500,
'ntilde': 500,
'numbersign': 500,
'o': 500,
'oacute': 500,
'ocircumflex': 500,
'odieresis': 500,
'oe': 667,
'ogonek': 333,
'ograve': 500,
'one': 500,
'onehalf': 750,
'onequarter': 750,
'onesuperior': 300,
'ordfeminine': 276,
'ordmasculine': 310,
'oslash': 500,
'otilde': 500,
'p': 500,
'paragraph': 523,
'parenleft': 333,
'parenright': 333,
'percent': 833,
'period': 250,
'periodcentered': 250,
'perthousand': 1000,
'plus': 675,
'plusminus': 675,
'q': 500,
'question': 500,
'questiondown': 500,
'quotedbl': 420,
'quotedblbase': 556,
'quotedblleft': 556,
'quotedblright': 556,
'quoteleft': 333,
'quoteright': 333,
'quotesinglbase': 333,
'quotesingle': 214,
'r': 389,
'registered': 760,
'ring': 333,
's': 389,
'scaron': 389,
'section': 500,
'semicolon': 333,
'seven': 500,
'six': 500,
'slash': 278,
'space': 250,
'sterling': 500,
't': 278,
'thorn': 500,
'three': 500,
'threequarters': 750,
'threesuperior': 300,
'tilde': 333,
'trademark': 980,
'two': 500,
'twosuperior': 300,
'u': 500,
'uacute': 500,
'ucircumflex': 500,
'udieresis': 500,
'ugrave': 500,
'underscore': 500,
'v': 444,
'w': 667,
'x': 444,
'y': 444,
'yacute': 444,
'ydieresis': 444,
'yen': 500,
'z': 389,
'zcaron': 389,
'zero': 500}
widthsByFontGlyph['Times-BoldItalic'] = {'A': 667,
'AE': 944,
'Aacute': 667,
'Acircumflex': 667,
'Adieresis': 667,
'Agrave': 667,
'Aring': 667,
'Atilde': 667,
'B': 667,
'C': 667,
'Ccedilla': 667,
'D': 722,
'E': 667,
'Eacute': 667,
'Ecircumflex': 667,
'Edieresis': 667,
'Egrave': 667,
'Eth': 722,
'Euro': 500,
'F': 667,
'G': 722,
'H': 778,
'I': 389,
'Iacute': 389,
'Icircumflex': 389,
'Idieresis': 389,
'Igrave': 389,
'J': 500,
'K': 667,
'L': 611,
'Lslash': 611,
'M': 889,
'N': 722,
'Ntilde': 722,
'O': 722,
'OE': 944,
'Oacute': 722,
'Ocircumflex': 722,
'Odieresis': 722,
'Ograve': 722,
'Oslash': 722,
'Otilde': 722,
'P': 611,
'Q': 722,
'R': 667,
'S': 556,
'Scaron': 556,
'T': 611,
'Thorn': 611,
'U': 722,
'Uacute': 722,
'Ucircumflex': 722,
'Udieresis': 722,
'Ugrave': 722,
'V': 667,
'W': 889,
'X': 667,
'Y': 611,
'Yacute': 611,
'Ydieresis': 611,
'Z': 611,
'Zcaron': 611,
'a': 500,
'aacute': 500,
'acircumflex': 500,
'acute': 333,
'adieresis': 500,
'ae': 722,
'agrave': 500,
'ampersand': 778,
'aring': 500,
'asciicircum': 570,
'asciitilde': 570,
'asterisk': 500,
'at': 832,
'atilde': 500,
'b': 500,
'backslash': 278,
'bar': 220,
'braceleft': 348,
'braceright': 348,
'bracketleft': 333,
'bracketright': 333,
'breve': 333,
'brokenbar': 220,
'bullet': 350,
'c': 444,
'caron': 333,
'ccedilla': 444,
'cedilla': 333,
'cent': 500,
'circumflex': 333,
'colon': 333,
'comma': 250,
'copyright': 747,
'currency': 500,
'd': 500,
'dagger': 500,
'daggerdbl': 500,
'degree': 400,
'dieresis': 333,
'divide': 570,
'dollar': 500,
'dotaccent': 333,
'dotlessi': 278,
'e': 444,
'eacute': 444,
'ecircumflex': 444,
'edieresis': 444,
'egrave': 444,
'eight': 500,
'ellipsis': 1000,
'emdash': 1000,
'endash': 500,
'equal': 570,
'eth': 500,
'exclam': 389,
'exclamdown': 389,
'f': 333,
'fi': 556,
'five': 500,
'fl': 556,
'florin': 500,
'four': 500,
'fraction': 167,
'g': 500,
'germandbls': 500,
'grave': 333,
'greater': 570,
'guillemotleft': 500,
'guillemotright': 500,
'guilsinglleft': 333,
'guilsinglright': 333,
'h': 556,
'hungarumlaut': 333,
'hyphen': 333,
'i': 278,
'iacute': 278,
'icircumflex': 278,
'idieresis': 278,
'igrave': 278,
'j': 278,
'k': 500,
'l': 278,
'less': 570,
'logicalnot': 606,
'lslash': 278,
'm': 778,
'macron': 333,
'minus': 606,
'mu': 576,
'multiply': 570,
'n': 556,
'nine': 500,
'ntilde': 556,
'numbersign': 500,
'o': 500,
'oacute': 500,
'ocircumflex': 500,
'odieresis': 500,
'oe': 722,
'ogonek': 333,
'ograve': 500,
'one': 500,
'onehalf': 750,
'onequarter': 750,
'onesuperior': 300,
'ordfeminine': 266,
'ordmasculine': 300,
'oslash': 500,
'otilde': 500,
'p': 500,
'paragraph': 500,
'parenleft': 333,
'parenright': 333,
'percent': 833,
'period': 250,
'periodcentered': 250,
'perthousand': 1000,
'plus': 570,
'plusminus': 570,
'q': 500,
'question': 500,
'questiondown': 500,
'quotedbl': 555,
'quotedblbase': 500,
'quotedblleft': 500,
'quotedblright': 500,
'quoteleft': 333,
'quoteright': 333,
'quotesinglbase': 333,
'quotesingle': 278,
'r': 389,
'registered': 747,
'ring': 333,
's': 389,
'scaron': 389,
'section': 500,
'semicolon': 333,
'seven': 500,
'six': 500,
'slash': 278,
'space': 250,
'sterling': 500,
't': 278,
'thorn': 500,
'three': 500,
'threequarters': 750,
'threesuperior': 300,
'tilde': 333,
'trademark': 1000,
'two': 500,
'twosuperior': 300,
'u': 556,
'uacute': 556,
'ucircumflex': 556,
'udieresis': 556,
'ugrave': 556,
'underscore': 500,
'v': 444,
'w': 667,
'x': 500,
'y': 444,
'yacute': 444,
'ydieresis': 444,
'yen': 500,
'z': 389,
'zcaron': 389,
'zero': 500}
widthsByFontGlyph['Symbol'] = {'Alpha': 722,
'Beta': 667,
'Chi': 722,
'Delta': 612,
'Epsilon': 611,
'Eta': 722,
'Euro': 750,
'Gamma': 603,
'Ifraktur': 686,
'Iota': 333,
'Kappa': 722,
'Lambda': 686,
'Mu': 889,
'Nu': 722,
'Omega': 768,
'Omicron': 722,
'Phi': 763,
'Pi': 768,
'Psi': 795,
'Rfraktur': 795,
'Rho': 556,
'Sigma': 592,
'Tau': 611,
'Theta': 741,
'Upsilon': 690,
'Upsilon1': 620,
'Xi': 645,
'Zeta': 611,
'aleph': 823,
'alpha': 631,
'ampersand': 778,
'angle': 768,
'angleleft': 329,
'angleright': 329,
'apple': 790,
'approxequal': 549,
'arrowboth': 1042,
'arrowdblboth': 1042,
'arrowdbldown': 603,
'arrowdblleft': 987,
'arrowdblright': 987,
'arrowdblup': 603,
'arrowdown': 603,
'arrowhorizex': 1000,
'arrowleft': 987,
'arrowright': 987,
'arrowup': 603,
'arrowvertex': 603,
'asteriskmath': 500,
'bar': 200,
'beta': 549,
'braceex': 494,
'braceleft': 480,
'braceleftbt': 494,
'braceleftmid': 494,
'bracelefttp': 494,
'braceright': 480,
'bracerightbt': 494,
'bracerightmid': 494,
'bracerighttp': 494,
'bracketleft': 333,
'bracketleftbt': 384,
'bracketleftex': 384,
'bracketlefttp': 384,
'bracketright': 333,
'bracketrightbt': 384,
'bracketrightex': 384,
'bracketrighttp': 384,
'bullet': 460,
'carriagereturn': 658,
'chi': 549,
'circlemultiply': 768,
'circleplus': 768,
'club': 753,
'colon': 278,
'comma': 250,
'congruent': 549,
'copyrightsans': 790,
'copyrightserif': 790,
'degree': 400,
'delta': 494,
'diamond': 753,
'divide': 549,
'dotmath': 250,
'eight': 500,
'element': 713,
'ellipsis': 1000,
'emptyset': 823,
'epsilon': 439,
'equal': 549,
'equivalence': 549,
'eta': 603,
'exclam': 333,
'existential': 549,
'five': 500,
'florin': 500,
'four': 500,
'fraction': 167,
'gamma': 411,
'gradient': 713,
'greater': 549,
'greaterequal': 549,
'heart': 753,
'infinity': 713,
'integral': 274,
'integralbt': 686,
'integralex': 686,
'integraltp': 686,
'intersection': 768,
'iota': 329,
'kappa': 549,
'lambda': 549,
'less': 549,
'lessequal': 549,
'logicaland': 603,
'logicalnot': 713,
'logicalor': 603,
'lozenge': 494,
'minus': 549,
'minute': 247,
'mu': 576,
'multiply': 549,
'nine': 500,
'notelement': 713,
'notequal': 549,
'notsubset': 713,
'nu': 521,
'numbersign': 500,
'omega': 686,
'omega1': 713,
'omicron': 549,
'one': 500,
'parenleft': 333,
'parenleftbt': 384,
'parenleftex': 384,
'parenlefttp': 384,
'parenright': 333,
'parenrightbt': 384,
'parenrightex': 384,
'parenrighttp': 384,
'partialdiff': 494,
'percent': 833,
'period': 250,
'perpendicular': 658,
'phi': 521,
'phi1': 603,
'pi': 549,
'plus': 549,
'plusminus': 549,
'product': 823,
'propersubset': 713,
'propersuperset': 713,
'proportional': 713,
'psi': 686,
'question': 444,
'radical': 549,
'radicalex': 500,
'reflexsubset': 713,
'reflexsuperset': 713,
'registersans': 790,
'registerserif': 790,
'rho': 549,
'second': 411,
'semicolon': 278,
'seven': 500,
'sigma': 603,
'sigma1': 439,
'similar': 549,
'six': 500,
'slash': 278,
'space': 250,
'spade': 753,
'suchthat': 439,
'summation': 713,
'tau': 439,
'therefore': 863,
'theta': 521,
'theta1': 631,
'three': 500,
'trademarksans': 786,
'trademarkserif': 890,
'two': 500,
'underscore': 500,
'union': 768,
'universal': 713,
'upsilon': 576,
'weierstrass': 987,
'xi': 493,
'zero': 500,
'zeta': 494}
widthsByFontGlyph['ZapfDingbats'] = {'a1': 974,
'a10': 692,
'a100': 668,
'a101': 732,
'a102': 544,
'a103': 544,
'a104': 910,
'a105': 911,
'a106': 667,
'a107': 760,
'a108': 760,
'a109': 626,
'a11': 960,
'a110': 694,
'a111': 595,
'a112': 776,
'a117': 690,
'a118': 791,
'a119': 790,
'a12': 939,
'a120': 788,
'a121': 788,
'a122': 788,
'a123': 788,
'a124': 788,
'a125': 788,
'a126': 788,
'a127': 788,
'a128': 788,
'a129': 788,
'a13': 549,
'a130': 788,
'a131': 788,
'a132': 788,
'a133': 788,
'a134': 788,
'a135': 788,
'a136': 788,
'a137': 788,
'a138': 788,
'a139': 788,
'a14': 855,
'a140': 788,
'a141': 788,
'a142': 788,
'a143': 788,
'a144': 788,
'a145': 788,
'a146': 788,
'a147': 788,
'a148': 788,
'a149': 788,
'a15': 911,
'a150': 788,
'a151': 788,
'a152': 788,
'a153': 788,
'a154': 788,
'a155': 788,
'a156': 788,
'a157': 788,
'a158': 788,
'a159': 788,
'a16': 933,
'a160': 894,
'a161': 838,
'a162': 924,
'a163': 1016,
'a164': 458,
'a165': 924,
'a166': 918,
'a167': 927,
'a168': 928,
'a169': 928,
'a17': 945,
'a170': 834,
'a171': 873,
'a172': 828,
'a173': 924,
'a174': 917,
'a175': 930,
'a176': 931,
'a177': 463,
'a178': 883,
'a179': 836,
'a18': 974,
'a180': 867,
'a181': 696,
'a182': 874,
'a183': 760,
'a184': 946,
'a185': 865,
'a186': 967,
'a187': 831,
'a188': 873,
'a189': 927,
'a19': 755,
'a190': 970,
'a191': 918,
'a192': 748,
'a193': 836,
'a194': 771,
'a195': 888,
'a196': 748,
'a197': 771,
'a198': 888,
'a199': 867,
'a2': 961,
'a20': 846,
'a200': 696,
'a201': 874,
'a202': 974,
'a203': 762,
'a204': 759,
'a205': 509,
'a206': 410,
'a21': 762,
'a22': 761,
'a23': 571,
'a24': 677,
'a25': 763,
'a26': 760,
'a27': 759,
'a28': 754,
'a29': 786,
'a3': 980,
'a30': 788,
'a31': 788,
'a32': 790,
'a33': 793,
'a34': 794,
'a35': 816,
'a36': 823,
'a37': 789,
'a38': 841,
'a39': 823,
'a4': 719,
'a40': 833,
'a41': 816,
'a42': 831,
'a43': 923,
'a44': 744,
'a45': 723,
'a46': 749,
'a47': 790,
'a48': 792,
'a49': 695,
'a5': 789,
'a50': 776,
'a51': 768,
'a52': 792,
'a53': 759,
'a54': 707,
'a55': 708,
'a56': 682,
'a57': 701,
'a58': 826,
'a59': 815,
'a6': 494,
'a60': 789,
'a61': 789,
'a62': 707,
'a63': 687,
'a64': 696,
'a65': 689,
'a66': 786,
'a67': 787,
'a68': 713,
'a69': 791,
'a7': 552,
'a70': 785,
'a71': 791,
'a72': 873,
'a73': 761,
'a74': 762,
'a75': 759,
'a76': 892,
'a77': 892,
'a78': 788,
'a79': 784,
'a8': 537,
'a81': 438,
'a82': 138,
'a83': 277,
'a84': 415,
'a85': 509,
'a86': 410,
'a87': 234,
'a88': 234,
'a89': 390,
'a9': 577,
'a90': 390,
'a91': 276,
'a92': 276,
'a93': 317,
'a94': 317,
'a95': 334,
'a96': 334,
'a97': 392,
'a98': 392,
'a99': 668,
'space': 278}
#preserve the initial values here
def _reset(
initial_dicts=dict(
ascent_descent=ascent_descent.copy(),
fontsByBaseEnc=fontsByBaseEnc.copy(),
fontsByName=fontsByName.copy(),
standardFontAttributes=standardFontAttributes.copy(),
widthVectorsByFont=widthVectorsByFont.copy(),
widthsByFontGlyph=widthsByFontGlyph.copy(),
)
):
for k,v in initial_dicts.iteritems():
d=globals()[k]
d.clear()
d.update(v)
from reportlab.rl_config import register_reset
register_reset(_reset)
del register_reset
| alexissmirnov/donomo | donomo_archive/lib/reportlab/pdfbase/_fontdata.py | Python | bsd-3-clause | 61,719 |
import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug)
if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
col.set_report_values(**column_config)
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug):
return True
| SEL-Columbia/commcare-hq | corehq/apps/adm/reports/__init__.py | Python | bsd-3-clause | 6,857 |
"""
This test will use the default permissions found in
flaskbb.utils.populate
"""
from flaskbb.utils.permissions import *
def test_moderator_permissions_in_forum(
forum, moderator_user, topic, topic_moderator):
"""Test the moderator permissions in a forum where the user is a
moderator.
"""
assert moderator_user in forum.moderators
assert can_post_reply(moderator_user, forum)
assert can_post_topic(moderator_user, forum)
assert can_edit_post(moderator_user, topic.user_id, forum)
assert can_moderate(moderator_user, forum)
assert can_delete_post(moderator_user, topic.user_id, forum)
assert can_delete_topic(moderator_user, topic.user_id, forum)
def test_moderator_permissions_without_forum(
forum, moderator_user, topic, topic_moderator):
"""Test the moderator permissions in a forum where the user is not a
moderator.
"""
forum.moderators.remove(moderator_user)
assert not moderator_user in forum.moderators
assert not can_moderate(moderator_user, forum)
assert can_post_reply(moderator_user, forum)
assert can_post_topic(moderator_user, forum)
assert not can_edit_post(moderator_user, topic.user_id, forum)
assert not can_delete_post(moderator_user, topic.user_id, forum)
assert not can_delete_topic(moderator_user, topic.user_id, forum)
# Test with own topic
assert can_delete_post(moderator_user, topic_moderator.user_id, forum)
assert can_delete_topic(moderator_user, topic_moderator.user_id, forum)
assert can_edit_post(moderator_user, topic_moderator.user_id, forum)
# Test moderator permissions
assert can_edit_user(moderator_user)
assert can_ban_user(moderator_user)
def test_normal_permissions(forum, user, topic):
"""Test the permissions for a normal user."""
assert not can_moderate(user, forum)
assert can_post_reply(user, forum)
assert can_post_topic(user, forum)
assert can_edit_post(user, topic.user_id, forum)
assert not can_delete_post(user, topic.user_id, forum)
assert not can_delete_topic(user, topic.user_id, forum)
assert not can_edit_user(user)
assert not can_ban_user(user)
def test_admin_permissions(forum, admin_user, topic):
"""Test the permissions for a admin user."""
assert can_moderate(admin_user, forum)
assert can_post_reply(admin_user, forum)
assert can_post_topic(admin_user, forum)
assert can_edit_post(admin_user, topic.user_id, forum)
assert can_delete_post(admin_user, topic.user_id, forum)
assert can_delete_topic(admin_user, topic.user_id, forum)
assert can_edit_user(admin_user)
assert can_ban_user(admin_user)
def test_super_moderator_permissions(forum, super_moderator_user, topic):
"""Test the permissions for a super moderator user."""
assert can_moderate(super_moderator_user, forum)
assert can_post_reply(super_moderator_user, forum)
assert can_post_topic(super_moderator_user, forum)
assert can_edit_post(super_moderator_user, topic.user_id, forum)
assert can_delete_post(super_moderator_user, topic.user_id, forum)
assert can_delete_topic(super_moderator_user, topic.user_id, forum)
assert can_edit_user(super_moderator_user)
assert can_ban_user(super_moderator_user)
def test_can_moderate_without_permission(moderator_user):
"""Test can moderate for a moderator_user without a permission."""
assert can_moderate(moderator_user) == False
| joyhuang-web/flaskbb | tests/unit/utils/test_permissions.py | Python | bsd-3-clause | 3,474 |
#!/usr/bin/env python
""" md5s3stash
content addressable storage in AWS S3
"""
from __future__ import unicode_literals
import sys
import os
import argparse
import tempfile
import urllib2
import urllib
import urlparse
import base64
import logging
import hashlib
import basin
import boto
import magic
from PIL import Image
from collections import namedtuple
import re
regex_s3 = re.compile(r's3.*amazonaws.com')
def main(argv=None):
parser = argparse.ArgumentParser(
description='content addressable storage in AWS S3')
parser.add_argument('url', nargs='+',
help='URL or path of source file to stash')
parser.add_argument('-b', '--bucket_base', nargs="?",
help='this must be a unique name in all of AWS S3')
parser.add_argument('-s', '--bucket_scheme', nargs="?",
default="simple", choices=['simple', 'multivalue'],
help='this must be a unique name in all of AWS S3')
parser.add_argument(
'-t', '--tempdir',
required=False,
help="if your files might be large, make sure this is on a big disk"
)
parser.add_argument(
'-w', '--warnings',
default=False,
help='show python `DeprecationWarning`s supressed by default',
required=False,
action='store_true',
)
parser.add_argument('--loglevel', default='ERROR', required=False)
parser.add_argument('-u', '--username', required=False,
help='username for downloads requiring BasicAuth')
parser.add_argument('-p', '--password', required=False,
help='password for downloads requiring BasicAuth')
if argv is None:
argv = parser.parse_args()
if argv.bucket_base:
bucket_base = argv.bucket_base
else:
assert 'BUCKET_BASE' in os.environ, "`-b` or `BUCKET_BASE` must be set"
bucket_base = os.environ['BUCKET_BASE']
if not argv.warnings:
# supress warnings
# http://stackoverflow.com/a/2047600/1763984
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
if argv.tempdir:
tempfile.tempdir = argv.tempdir
auth = None
if argv.username:
auth = (argv.username, argv.password)
# set debugging level
numeric_level = getattr(logging, argv.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % argv.loglevel)
logging.basicConfig(level=numeric_level, )
# if being used in a library, probably want to be able to recycle
# connection?
conn = boto.connect_s3()
for url in argv.url:
print("{0}\t{1}\t{2}\t{3}".format(
*md5s3stash(url, bucket_base, conn, url_auth=auth, bucket_scheme=argv.bucket_scheme)
))
def md5s3stash(
url,
bucket_base,
conn=None,
url_auth=None,
url_cache={},
hash_cache={},
bucket_scheme='simple'
):
""" stash a file at `url` in the named `bucket_base` ,
`conn` is an optional boto.connect_s3()
`url_auth` is optional Basic auth ('<username>', '<password'>) tuple
to use if the url to download requires authentication.
`url_cache` is an object with a dict interface, keyed on url
url_cache[url] = { md5: ..., If-None-Match: etag, If-Modified-Since: date }
`hash_cache` is an obhect with dict interface, keyed on md5
hash_cache[md5] = ( s3_url, mime_type, dimensions )
`bucket_scheme` is text string 'simple' or 'multibucket'
"""
StashReport = namedtuple('StashReport', 'url, md5, s3_url, mime_type, dimensions')
(file_path, md5, mime_type) = checkChunks(url, url_auth, url_cache)
try:
return StashReport(url, md5, *hash_cache[md5])
except KeyError:
pass
s3_url = md5_to_s3_url(md5, bucket_base, bucket_scheme=bucket_scheme)
if conn is None:
conn = boto.connect_s3()
s3move(file_path, s3_url, mime_type, conn)
(mime, dimensions) = image_info(file_path)
os.remove(file_path) # safer than rmtree
hash_cache[md5] = (s3_url, mime, dimensions)
report = StashReport(url, md5, *hash_cache[md5])
logging.getLogger('MD5S3:stash').info(report)
return report
# think about refactoring the next two functions
def md5_to_s3_url(md5, bucket_base, bucket_scheme='multibucket'):
""" calculate the s3 URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "s3://{0}/{1}".format(
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "s3://{0}.{1}/{2}".format(
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_http_url(md5, bucket_base, bucket_scheme='multibucket', s3_endpoint='s3.amazonaws.com'):
""" calculate the http URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "http://{0}/{1}/{2}".format(
s3_endpoint,
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "http://{1}.{2}.{0}/{3}".format(
s3_endpoint,
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_bucket_shard(md5):
""" calculate the shard label of the bucket name from md5 """
# "Consider utilizing multiple buckets that start with different
# alphanumeric characters. This will ensure a degree of partitioning
# from the start. The higher your volume of concurrent PUT and
# GET requests, the more impact this will likely have."
# -- http://aws.amazon.com/articles/1904
# "Bucket names must be a series of one or more labels. Adjacent
# labels are separated by a single period (.). [...] Each label must
# start and end with a lowercase letter or a number. "
# -- http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
# see also: http://en.wikipedia.org/wiki/Base_36
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
# http://stats.stackexchange.com/a/70884/14900
# take the first two digits of the hash and turn that into an inteter
# this should be evenly distributed
int_value = int(md5[0], 16)+10*int(md5[1], 16)
# divide by the length of the alphabet and take the remainder
bucket = int_value % len(ALPHABET)
return basin.encode(ALPHABET, bucket)
def is_s3_url(url):
'''For s3 urls, if you send http authentication headers, S3 will
send a "400 Bad Request" in response.
Now look for s3*.amazonaws.com
'''
# moving to OR this will be s3-us-west-2.amazonaws.com
match = regex_s3.search(url)
return True if match else False
def urlopen_with_auth(url, auth=None, cache={}):
'''Use urllib2 to open url if the auth is specified.
auth is tuple of (username, password)
'''
opener = urllib2.build_opener(DefaultErrorHandler())
req = urllib2.Request(url)
p = urlparse.urlparse(url)
# try to set headers for conditional get request
try:
here = cache[url]
if 'If-None-Match' in here:
req.add_header('If-None-Match', cache[url]['If-None-Match'],)
if 'If-Modified-Since' in here:
req.add_header('If-Modified-Since', cache[url]['If-Modified-Since'],)
except KeyError:
pass
if not auth or is_s3_url(url):
if p.scheme not in ['http', 'https']:
return urllib.urlopen(url) # urllib works with normal file paths
else:
# make sure https
if p.scheme != 'https':
raise urllib2.URLError('Basic auth not over https is bad idea! \
scheme:{0}'.format(p.scheme))
# Need to add header so it gets sent with first request,
# else redirected to shib
b64authstr = base64.b64encode('{0}:{1}'.format(*auth))
req.add_header('Authorization', 'Basic {0}'.format(b64authstr))
# return urllib2.urlopen(req)
return opener.open(req)
def checkChunks(url, auth=None, cache={}):
"""
Helper to download large files the only arg is a url this file
will go to a temp directory the file will also be downloaded in
chunks and md5 checksum is returned
based on downloadChunks@https://gist.github.com/gourneau/1430932
and http://www.pythoncentral.io/hashing-files-with-python/
"""
temp_file = tempfile.NamedTemporaryFile(delete=False, prefix='md5s3_')
logging.getLogger('MD5S3').info("temp file path %s" % temp_file.name)
hasher = hashlib.new('md5')
BLOCKSIZE = 1024 * hasher.block_size
try:
req = urlopen_with_auth(url, auth=auth, cache=cache)
thisurl = cache.get(url, dict())
if req.getcode() == 304:
return None, thisurl['md5'], None
mime_type = req.info()['Content-type']
# record these headers, they will let us pretend like we are a cacheing
# proxy server, and send conditional GETs next time we see this file
etag = req.info().get('ETag', None);
if etag:
thisurl['If-None-Match'] = etag
lmod = req.info().get('Last-Modified', None);
if lmod:
thisurl['If-Modified-Since'] = lmod
downloaded = 0
with temp_file:
while True:
chunk = req.read(BLOCKSIZE)
hasher.update(chunk)
downloaded += len(chunk)
if not chunk:
break
temp_file.write(chunk)
except urllib2.HTTPError, e:
print "HTTP Error:", e.code, url
return False
except urllib2.URLError, e:
print "URL Error:", e.reason, url
return False
md5 = hasher.hexdigest()
thisurl['md5'] = md5
cache[url] = thisurl
return temp_file.name, md5, mime_type
def s3move(place1, place2, mime, s3):
l = logging.getLogger('MD5S3:s3move')
l.debug({
'place1': place1,
'place2': place2,
'mime': mime,
's3': s3,
})
parts = urlparse.urlsplit(place2)
# SplitResult(scheme='s3', netloc='test.pdf', path='/dkd', query=''
# , fragment='')
try:
bucket = s3.get_bucket(parts.netloc, validate=False)
l.debug('bucket exists')
except boto.exception.S3ResponseError:
bucket = s3.create_bucket(parts.netloc)
l.debug('bucket created')
if not(bucket.get_key(parts.path, validate=False)):
key = bucket.new_key(parts.path)
# metadata has to be set before setting contents/creating object.
# See https://gist.github.com/garnaat/1791086
key.set_metadata("Content-Type", mime)
key.set_contents_from_filename(place1)
# key.set_acl('public-read')
l.debug('file sent to s3')
else:
l.info('key existed already')
def image_info(filepath):
''' get image info
`filepath` path to a file
returns
a tuple of two values
1. mime/type if an image; otherwise None
2. a tuple of (height, width) if an image; otherwise (0,0)
'''
try:
return (
magic.Magic(mime=True).from_file(filepath),
Image.open(filepath).size
)
except IOError as e:
if not e.message.startswith('cannot identify image file'):
raise e
else:
return (None, (0,0))
# example 11.7 Defining URL handlers
# http://www.diveintopython.net/http_web_services/etags.html
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_304(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
# main() idiom for importing into REPL for debugging
if __name__ == "__main__":
sys.exit(main())
"""
Copyright (c) 2015, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
| tingletech/md5s3stash | md5s3stash.py | Python | bsd-3-clause | 13,467 |
"""Jinja2's i18n functionality is not exactly the same as Django's.
In particular, the tags names and their syntax are different:
1. The Django ``trans`` tag is replaced by a _() global.
2. The Django ``blocktrans`` tag is called ``trans``.
(1) isn't an issue, since the whole ``makemessages`` process is based on
converting the template tags to ``_()`` calls. However, (2) means that
those Jinja2 ``trans`` tags will not be picked up my Django's
``makemessage`` command.
There aren't any nice solutions here. While Jinja2's i18n extension does
come with extraction capabilities built in, the code behind ``makemessages``
unfortunately isn't extensible, so we can:
* Duplicate the command + code behind it.
* Offer a separate command for Jinja2 extraction.
* Try to get Django to offer hooks into makemessages().
* Monkey-patch.
We are currently doing that last thing. It turns out there we are lucky
for once: It's simply a matter of extending two regular expressions.
Credit for the approach goes to:
http://stackoverflow.com/questions/2090717/getting-translation-strings-for-jinja2-templates-integrated-with-django-1-x
"""
import re
from django.core.management.commands import makemessages
from django.utils.translation import trans_real
class Command(makemessages.Command):
def handle(self, *args, **options):
old_endblock_re = trans_real.endblock_re
old_block_re = trans_real.block_re
# Extend the regular expressions that are used to detect
# translation blocks with an "OR jinja-syntax" clause.
trans_real.endblock_re = re.compile(
trans_real.endblock_re.pattern + '|' + r"""^\s*endtrans$""")
trans_real.block_re = re.compile(
trans_real.block_re.pattern + '|' + r"""^\s*trans(?:\s+(?!'|")(?=.*?=.*?)|$)""")
trans_real.plural_re = re.compile(
trans_real.plural_re.pattern + '|' + r"""^\s*pluralize(?:\s+.+|$)""")
try:
super(Command, self).handle(*args, **options)
finally:
trans_real.endblock_re = old_endblock_re
trans_real.block_re = old_block_re
| akx/coffin | coffin/management/commands/makemessages.py | Python | bsd-3-clause | 2,126 |
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.parallel import parallel_func
from mne.utils import ProgressBar, array_split_idx, use_log_level
def test_progressbar():
"""Test progressbar class."""
a = np.arange(10)
pbar = ProgressBar(a)
assert a is pbar.iterable
assert pbar.max_value == 10
pbar = ProgressBar(10)
assert pbar.max_value == 10
assert pbar.iterable is None
# Make sure that non-iterable input raises an error
def iter_func(a):
for ii in a:
pass
pytest.raises(Exception, iter_func, ProgressBar(20))
def _identity(x):
return x
def test_progressbar_parallel_basic(capsys):
"""Test ProgressBar with parallel computing, basic version."""
assert capsys.readouterr().out == ''
parallel, p_fun, _ = parallel_func(_identity, total=10, n_jobs=1,
verbose=True)
with use_log_level(True):
out = parallel(p_fun(x) for x in range(10))
assert out == list(range(10))
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block(x, pb):
for ii in range(len(x)):
pb.update(ii + 1)
return x
def test_progressbar_parallel_advanced(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr)) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(arr, 2))
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=10).sum()
assert sum_ == len(arr)
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
out = np.concatenate(out)
assert_array_equal(out, arr)
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block_wide(x, pb):
for ii in range(len(x)):
for jj in range(2):
pb.update(ii * 2 + jj + 1)
return x, pb.idx
def test_progressbar_parallel_more(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block_wide, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr) * 2) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(
arr, 2, n_per_split=2))
idxs = np.concatenate([o[1] for o in out])
assert_array_equal(idxs, np.arange(len(arr) * 2))
out = np.concatenate([o[0] for o in out])
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=len(arr) * 2).sum()
assert sum_ == len(arr) * 2
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
| Teekuningas/mne-python | mne/utils/tests/test_progressbar.py | Python | bsd-3-clause | 3,513 |
import inspect
from functools import partial
try:
from joblib.externals.cloudpickle import dumps, loads
cloudpickle = True
except ImportError:
cloudpickle = False
WRAP_CACHE = dict()
class CloudpickledObjectWrapper(object):
def __init__(self, obj, keep_wrapper=False):
self._obj = obj
self._keep_wrapper = keep_wrapper
def __reduce__(self):
_pickled_object = dumps(self._obj)
if not self._keep_wrapper:
return loads, (_pickled_object,)
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
def __getattr__(self, attr):
# Ensure that the wrapped object can be used seemlessly as the
# previous object.
if attr not in ['_obj', '_keep_wrapper']:
return getattr(self._obj, attr)
return getattr(self, attr)
# Make sure the wrapped object conserves the callable property
class CallableObjectWrapper(CloudpickledObjectWrapper):
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def _wrap_non_picklable_objects(obj, keep_wrapper):
if callable(obj):
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
def _reconstruct_wrapper(_pickled_object, keep_wrapper):
obj = loads(_pickled_object)
return _wrap_non_picklable_objects(obj, keep_wrapper)
def _wrap_objects_when_needed(obj):
# Function to introspect an object and decide if it should be wrapped or
# not.
if not cloudpickle:
return obj
need_wrap = "__main__" in getattr(obj, "__module__", "")
if isinstance(obj, partial):
return partial(
_wrap_objects_when_needed(obj.func),
*[_wrap_objects_when_needed(a) for a in obj.args],
**{k: _wrap_objects_when_needed(v)
for k, v in obj.keywords.items()}
)
if callable(obj):
# Need wrap if the object is a function defined in a local scope of
# another function.
func_code = getattr(obj, "__code__", "")
need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
# Need wrap if the obj is a lambda expression
func_name = getattr(obj, "__name__", "")
need_wrap |= "<lambda>" in func_name
if not need_wrap:
return obj
wrapped_obj = WRAP_CACHE.get(obj)
if wrapped_obj is None:
wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
WRAP_CACHE[obj] = wrapped_obj
return wrapped_obj
def wrap_non_picklable_objects(obj, keep_wrapper=True):
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
Note that this wrapper tends to slow down the serialization process as it
is done with cloudpickle which is typically slower compared to pickle. The
proper way to solve serialization issues is to avoid defining functions and
objects in the main scripts and to implement __reduce__ functions for
complex classes.
"""
if not cloudpickle:
raise ImportError("could not from joblib.externals import cloudpickle. Please install "
"cloudpickle to allow extended serialization. "
"(`pip install cloudpickle`).")
# If obj is a class, create a CloudpickledClassWrapper which instantiates
# the object internally and wrap it directly in a CloudpickledObjectWrapper
if inspect.isclass(obj):
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
def __init__(self, *args, **kwargs):
self._obj = obj(*args, **kwargs)
self._keep_wrapper = keep_wrapper
CloudpickledClassWrapper.__name__ = obj.__name__
return CloudpickledClassWrapper
# If obj is an instance of a class, just wrap it in a regular
# CloudpickledObjectWrapper
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
| lesteve/joblib | joblib/externals/loky/cloudpickle_wrapper.py | Python | bsd-3-clause | 3,964 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Simple script which takes a file with one packet latency (expressed as a
# signed integer) per line and plots a trivial histogram.
# Copyright (c) 2015, Malte Schwarzkopf
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the project, the name of copyright holder nor the names
# of its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import sys, re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from utils import *
from matplotlib import pylab
from scipy.stats import scoreatpercentile
pkt_size = 256
train_length = 6
# @author: Aaron Blankstein, with modifications by Malte Schwarzkopf
class boxplotter(object):
def __init__(self, median, top, bottom, whisk_top=None,
whisk_bottom=None, extreme_top=None):
self.median = median
self.top = top
self.bott = bottom
self.whisk_top = whisk_top
self.whisk_bott = whisk_bottom
self.extreme_top = extreme_top
def draw_on(self, ax, index, box_color = "blue",
median_color = "red", whisker_color = "black"):
width = .7
w2 = width / 2
ax.broken_barh([(index - w2, width)],
(self.bott,self.top - self.bott),
facecolor="white",edgecolor=box_color, lw=0.5)
ax.broken_barh([(index - w2, width)],
(self.median,0),
facecolor="white", edgecolor=median_color, lw=0.5)
if self.whisk_top is not None:
ax.broken_barh([(index - w2, width)],
(self.whisk_top,0),
facecolor="white", edgecolor=whisker_color, lw=0.5)
ax.broken_barh([(index , 0)],
(self.whisk_top, self.top-self.whisk_top),
edgecolor=box_color,linestyle="solid", lw=0.5)
if self.whisk_bott is not None:
ax.broken_barh([(index - w2, width)],
(self.whisk_bott,0),
facecolor="white", edgecolor=whisker_color, lw=0.5)
ax.broken_barh([(index , 0)],
(self.whisk_bott,self.bott-self.whisk_bott),
edgecolor=box_color,linestyle="solid", lw=0.5)
if self.extreme_top is not None:
ax.scatter([index], [self.extreme_top], marker='*',
lw=0.5)
def percentile_box_plot(ax, data, indexer=None, box_top=75,
box_bottom=25,whisker_top=99,whisker_bottom=1):
if indexer is None:
indexed_data = zip(range(1,len(data)+1), data)
else:
indexed_data = [(indexer(datum), datum) for datum in data]
def get_whisk(vector, w):
if w is None:
return None
return scoreatpercentile(vector, w)
for index, x in indexed_data:
bp = boxplotter(scoreatpercentile(x, 50),
scoreatpercentile(x, box_top),
scoreatpercentile(x, box_bottom),
get_whisk(x, whisker_top),
get_whisk(x, whisker_bottom),
scoreatpercentile(x, 100))
bp.draw_on(ax, index)
def worst_case_approx(setups, trainlength, plength):
base_worst = 4.0 * 3
#base_worst = 0.566
#packet_time = (plength + 18.0) * 8.0 / 10.0 / 1000.0
packet_time = plength * 8.0 / 10.0 / 1000.0
tmp = [x * (packet_time * trainlength) for x in setups]
worst = [x + base_worst for x in tmp]
for i in range(len(worst)):
print "WORST CASE %d: %f" % (setups[i], worst[i])
return worst
######################################
if len(sys.argv) < 2:
print "usage: plot_switch_experiment.py <input dir1> <input1 label> " \
"<input dir2> <input2 label> ... <output file>"
sys.exit(1)
paper_mode = True
if paper_mode:
set_paper_rcs()
# arg processing
if (len(sys.argv) - 1) % 2 == 1:
# odd number of args, have output name
outname = sys.argv[-1]
print "Output name specified: %s" % (outname)
else:
print "Please specify an output name!"
sys.exit(1)
inputdirs = []
labels = []
for i in range(1, len(sys.argv)-1, 2):
inputdirs.append(sys.argv[i])
labels.append(sys.argv[i+1])
# parsing
data = []
negs_ignored = 0
for indir in inputdirs:
ds = []
for line in open(indir).readlines():
#for line in open(indir).readlines():
if line.strip() == "":
continue
val = float(line.strip()) / 1000.0
if val > 0:
ds.append(val)
else:
negs_ignored += 1
data.append(ds)
print "Ignored %d negative latency values!" % (negs_ignored)
# plotting
fig = plt.figure(figsize=(3.33,2.22))
#plt.rc("font", size=7.0)
fig, ax = plt.subplots(figsize=(3.33,2.22))
pos = np.array(range(len(data)))+1
#bp = percentile_box_plot(ax, data)
plt.plot(pos, [np.mean(x) for x in data], marker='+', label='average',
lw=1.0, color='g')
plt.plot(pos, [np.percentile(x, 99) for x in data], marker='v',
label='99\\textsuperscript{th}\%ile',
lw=1.0, color='y', mfc='none', mec='y', mew=1.0)
plt.scatter(pos, [max(x) for x in data], marker='x',
label='100\\textsuperscript{th}\%ile',
lw=1.0, color='r')
# worst-case analytical approximation
#plt.plot(range(1, len(data)+1),
# worst_case_approx(range(0, len(data)), train_length, pkt_size),
# ':', color='r', label="modelled worst case", lw=1.0)
worst_case_approximation = worst_case_approx([10], train_length, pkt_size)[0]
wc_line = plt.axhline(worst_case_approximation, ls=':', color='r', lw=1.0)
#plt.axvline(worst_case_approx([10], train_length, pkt_size)[0] - 8, ls='--',
# color='k', lw=1.0, label="optimal network epoch")
first_legend = plt.legend(loc='upper left', frameon=False, handletextpad=0.1,
borderaxespad=0.05)
plt.gca().add_artist(first_legend)
plt.legend([wc_line], ["latency bound"], frameon=False, loc='upper center',
borderaxespad=0.05, handletextpad=0.1)
ax.set_xlabel('Throughput factor $f$')
ax.set_ylabel('End-to-end latency [$\mu$s]')
plt.ylim(0, 30.0)
plt.yticks(range(0, 31, 5), [str(x) for x in range(0, 31, 5)])
plt.xlim(0, len(inputdirs) + 1)
plt.xticks(range(pos[0], pos[-1] + 1, len(pos) / 5),
[round(worst_case_approximation / float(labels[i-1]), 1)
for i in range(pos[0], pos[-1] + 1, len(pos) / 5)])
plt.axvspan(0, 5, facecolor='0.8', alpha=0.5, zorder=0, lw=0.0)
plt.axvspan(20.5, 23, facecolor='0.8', alpha=0.5, zorder=0, lw=0.0)
plt.text(2, 31, "\\textbf{A}", fontsize=12)
plt.text(13, 31, "\\textbf{B}", fontsize=12)
plt.text(21.3, 31, "\\textbf{C}", fontsize=12)
#plt.setp(bp['whiskers'], color='k', linestyle='-' )
#plt.setp(bp['fliers'], markersize=3.0)
plt.savefig(outname, format="pdf", bbox_inches='tight', pad_inches=0.01)
| camsas/qjump-nsdi15-plotting | figure11/plot_throughput_factor_experiment.py | Python | bsd-3-clause | 8,217 |
from __future__ import unicode_literals, absolute_import
from mock import MagicMock
from ....unittest import TestCase
from oauthlib.oauth1 import RequestValidator
from oauthlib.oauth1.rfc5849 import errors
from oauthlib.oauth1.rfc5849.endpoints import AuthorizationEndpoint
class ResourceEndpointTest(TestCase):
def setUp(self):
self.validator = MagicMock(wraps=RequestValidator())
self.validator.verify_request_token.return_value = True
self.validator.verify_realms.return_value = True
self.validator.get_realms.return_value = ['test']
self.validator.save_verifier = MagicMock()
self.endpoint = AuthorizationEndpoint(self.validator)
self.uri = 'https://i.b/authorize?oauth_token=foo'
def test_get_realms_and_credentials(self):
realms, credentials = self.endpoint.get_realms_and_credentials(self.uri)
self.assertEqual(realms, ['test'])
def test_verify_token(self):
self.validator.verify_request_token.return_value = False
self.assertRaises(errors.InvalidClientError,
self.endpoint.get_realms_and_credentials, self.uri)
self.assertRaises(errors.InvalidClientError,
self.endpoint.create_authorization_response, self.uri)
def test_verify_realms(self):
self.validator.verify_realms.return_value = False
self.assertRaises(errors.InvalidRequestError,
self.endpoint.create_authorization_response,
self.uri,
realms=['bar'])
def test_create_authorization_response(self):
self.validator.get_redirect_uri.return_value = 'https://c.b/cb'
h, b, s = self.endpoint.create_authorization_response(self.uri)
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(location.startswith('https://c.b/cb'))
self.assertIn('oauth_verifier', location)
def test_create_authorization_response(self):
self.validator.get_redirect_uri.return_value = 'oob'
h, b, s = self.endpoint.create_authorization_response(self.uri)
self.assertEqual(s, 200)
self.assertNotIn('Location', h)
self.assertIn('oauth_verifier', b)
self.assertIn('oauth_token', b)
| masci/oauthlib | tests/oauth1/rfc5849/endpoints/test_authorization.py | Python | bsd-3-clause | 2,250 |
import Sea
from Connection import Connection
class ConnectionPoint(Connection):
"""
Class for point connections.
"""
def __init__(self, obj, system, components):
Connection.__init__(self, obj, system, components)
#obj.Sort = 'Point'
def updateComponents(self, obj):
connections = Sea.actions.connection.ShapeConnection([item.Shape for item in obj.Components])
commons = connections.commons()
if any([item.Vertexes for item in commons]):
"""
There is indeed a point connection.
"""
obj.Proxy.model.components = obj.Components
obj.updateCouplings()
| python-acoustics/Sea | Sea/adapter/connections/ConnectionPoint.py | Python | bsd-3-clause | 714 |
import doctest
import pytest
from datascience import predicates
from datascience import *
def test_both():
"""Both f and g."""
p = are.above(2) & are.below(4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, True, False, False]
def test_either():
"""Either f or g."""
p = are.above(3) | are.below(2)
ps = [p(x) for x in range(1, 6)]
assert ps == [True, False, False, True, True]
def test_equal_to():
"""Equal to y."""
p = are.equal_to(1)
ps = [p(x) for x in range(1, 6)]
assert ps == [True, False, False, False, False]
def test_above():
"""Greater than y."""
p = are.above(3)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, False, True, True]
def test_below():
"""Less than y."""
p = are.not_below(4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, False, True, True]
def test_above_or_equal_to():
"""Greater than or equal to y."""
p = are.above_or_equal_to(4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, False, True, True]
def test_below_or_equal_to():
"""Less than or equal to y."""
p = are.not_below_or_equal_to(3)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, False, True, True]
def test_strictly_between():
"""Greater than y and less than z."""
p = are.strictly_between(2, 4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, True, False, False]
def test_between():
"""Greater than or equal to y and less than z."""
p = are.between(3, 4)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, True, False, False]
def test_between_or_equal_to():
"""Greater than or equal to y and less than or equal to z."""
p = are.between_or_equal_to(3, 3)
ps = [p(x) for x in range(1, 6)]
assert ps == [False, False, True, False, False]
############
# Doctests #
############
def test_doctests():
results = doctest.testmod(predicates,
optionflags=doctest.NORMALIZE_WHITESPACE)
assert results.failed == 0
| dsten/datascience | tests/test_predicates.py | Python | bsd-3-clause | 2,093 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
from telemetry.core import util
from telemetry.results import buildbot_output_formatter
from telemetry.results import chart_json_output_formatter
from telemetry.results import csv_output_formatter
from telemetry.results import csv_pivot_table_output_formatter
from telemetry.results import gtest_progress_reporter
from telemetry.results import html_output_formatter
from telemetry.results import json_output_formatter
from telemetry.results import page_test_results
from telemetry.results import progress_reporter
# Allowed output formats. The default is the first item in the list.
_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'csv', 'gtest', 'json',
'chartjson', 'csv-pivot-table', 'none')
# Filenames to use for given output formats.
_OUTPUT_FILENAME_LOOKUP = {
'html': 'results.html',
'csv': 'results.csv',
'json': 'results.json',
'chartjson': 'results-chart.json',
'csv-pivot-table': 'results-pivot-table.csv'
}
def AddResultsOptions(parser):
group = optparse.OptionGroup(parser, 'Results options')
group.add_option('--chartjson', action='store_true',
help='Output Chart JSON. Ignores --output-format.')
group.add_option('--output-format', action='append', dest='output_formats',
choices=_OUTPUT_FORMAT_CHOICES, default=[],
help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
group.add_option('-o', '--output',
dest='output_file',
default=None,
help='Redirects output to a file. Defaults to stdout.')
group.add_option('--output-dir', default=util.GetBaseDir(),
help='Where to save output data after the run.')
group.add_option('--output-trace-tag',
default='',
help='Append a tag to the key of each result trace. Use '
'with html, buildbot, csv-pivot-table output formats.')
group.add_option('--reset-results', action='store_true',
help='Delete all stored results.')
group.add_option('--upload-results', action='store_true',
help='Upload the results to cloud storage.')
group.add_option('--upload-bucket', default='internal',
choices=['public', 'partner', 'internal'],
help='Storage bucket to use for the uploaded results. '
'Defaults to internal. Supported values are: '
'public, partner, internal')
group.add_option('--results-label',
default=None,
help='Optional label to use for the results of a run .')
group.add_option('--suppress_gtest_report',
default=False,
help='Whether to suppress GTest progress report.')
parser.add_option_group(group)
def ProcessCommandLineArgs(parser, args):
# TODO(ariblue): Delete this flag entirely at some future data, when the
# existence of such a flag has been long forgotten.
if args.output_file:
parser.error('This flag is deprecated. Please use --output-dir instead.')
try:
os.makedirs(args.output_dir)
except OSError:
# Do nothing if the output directory already exists. Existing files will
# get overwritten.
pass
args.output_dir = os.path.expanduser(args.output_dir)
def _GetOutputStream(output_format, output_dir):
assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.'
assert output_format not in ('gtest', 'none'), (
'Cannot set stream for \'gtest\' or \'none\' output formats.')
if output_format == 'buildbot':
return sys.stdout
assert output_format in _OUTPUT_FILENAME_LOOKUP, (
'No known filename for the \'%s\' output format' % output_format)
output_file = os.path.join(output_dir, _OUTPUT_FILENAME_LOOKUP[output_format])
open(output_file, 'a').close() # Create file if it doesn't exist.
return open(output_file, 'r+')
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
if suppress_gtest_report:
return progress_reporter.ProgressReporter()
return gtest_progress_reporter.GTestProgressReporter(
sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
def CreateResults(benchmark_metadata, options,
value_can_be_added_predicate=lambda v: True):
"""
Args:
options: Contains the options specified in AddResultsOptions.
"""
if not options.output_formats:
options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]
output_formatters = []
for output_format in options.output_formats:
if output_format == 'none' or output_format == "gtest" or options.chartjson:
continue
output_stream = _GetOutputStream(output_format, options.output_dir)
if output_format == 'csv':
output_formatters.append(csv_output_formatter.CsvOutputFormatter(
output_stream))
elif output_format == 'csv-pivot-table':
output_formatters.append(
csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'buildbot':
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'html':
# TODO(chrishenry): We show buildbot output so that users can grep
# through the results easily without needing to open the html
# file. Another option for this is to output the results directly
# in gtest-style results (via some sort of progress reporter),
# as we plan to enable gtest-style output for all output formatters.
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter(
output_stream, benchmark_metadata, options.reset_results,
options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag))
elif output_format == 'json':
output_formatters.append(json_output_formatter.JsonOutputFormatter(
output_stream, benchmark_metadata))
elif output_format == 'chartjson':
output_formatters.append(
chart_json_output_formatter.ChartJsonOutputFormatter(
output_stream, benchmark_metadata))
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))
# TODO(chrishenry): This is here to not change the output of
# gtest. Let's try enabling skipped tests summary for gtest test
# results too (in a separate patch), and see if we break anything.
output_skipped_tests_summary = 'gtest' in options.output_formats
reporter = _GetProgressReporter(output_skipped_tests_summary,
options.suppress_gtest_report)
return page_test_results.PageTestResults(
output_formatters=output_formatters, progress_reporter=reporter,
output_dir=options.output_dir,
value_can_be_added_predicate=value_can_be_added_predicate)
| hefen1/chromium | tools/telemetry/telemetry/results/results_options.py | Python | bsd-3-clause | 7,550 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the standard 147 CSS (X11) named colors.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from .util import NamedColor
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
aliceblue = NamedColor("aliceblue", 240, 248, 255)
antiquewhite = NamedColor("antiquewhite", 250, 235, 215)
aqua = NamedColor("aqua", 0, 255, 255)
aquamarine = NamedColor("aquamarine", 127, 255, 212)
azure = NamedColor("azure", 240, 255, 255)
beige = NamedColor("beige", 245, 245, 220)
bisque = NamedColor("bisque", 255, 228, 196)
black = NamedColor("black", 0, 0, 0 )
blanchedalmond = NamedColor("blanchedalmond", 255, 235, 205)
blue = NamedColor("blue", 0, 0, 255)
blueviolet = NamedColor("blueviolet", 138, 43, 226)
brown = NamedColor("brown", 165, 42, 42 )
burlywood = NamedColor("burlywood", 222, 184, 135)
cadetblue = NamedColor("cadetblue", 95, 158, 160)
chartreuse = NamedColor("chartreuse", 127, 255, 0 )
chocolate = NamedColor("chocolate", 210, 105, 30 )
coral = NamedColor("coral", 255, 127, 80 )
cornflowerblue = NamedColor("cornflowerblue", 100, 149, 237)
cornsilk = NamedColor("cornsilk", 255, 248, 220)
crimson = NamedColor("crimson", 220, 20, 60 )
cyan = NamedColor("cyan", 0, 255, 255)
darkblue = NamedColor("darkblue", 0, 0, 139)
darkcyan = NamedColor("darkcyan", 0, 139, 139)
darkgoldenrod = NamedColor("darkgoldenrod", 184, 134, 11 )
darkgray = NamedColor("darkgray", 169, 169, 169)
darkgreen = NamedColor("darkgreen", 0, 100, 0 )
darkgrey = NamedColor("darkgrey", 169, 169, 169)
darkkhaki = NamedColor("darkkhaki", 189, 183, 107)
darkmagenta = NamedColor("darkmagenta", 139, 0, 139)
darkolivegreen = NamedColor("darkolivegreen", 85, 107, 47 )
darkorange = NamedColor("darkorange", 255, 140, 0 )
darkorchid = NamedColor("darkorchid", 153, 50, 204)
darkred = NamedColor("darkred", 139, 0, 0 )
darksalmon = NamedColor("darksalmon", 233, 150, 122)
darkseagreen = NamedColor("darkseagreen", 143, 188, 143)
darkslateblue = NamedColor("darkslateblue", 72, 61, 139)
darkslategray = NamedColor("darkslategray", 47, 79, 79 )
darkslategrey = NamedColor("darkslategrey", 47, 79, 79 )
darkturquoise = NamedColor("darkturquoise", 0, 206, 209)
darkviolet = NamedColor("darkviolet", 148, 0, 211)
deeppink = NamedColor("deeppink", 255, 20, 147)
deepskyblue = NamedColor("deepskyblue", 0, 191, 255)
dimgray = NamedColor("dimgray", 105, 105, 105)
dimgrey = NamedColor("dimgrey", 105, 105, 105)
dodgerblue = NamedColor("dodgerblue", 30, 144, 255)
firebrick = NamedColor("firebrick", 178, 34, 34 )
floralwhite = NamedColor("floralwhite", 255, 250, 240)
forestgreen = NamedColor("forestgreen", 34, 139, 34 )
fuchsia = NamedColor("fuchsia", 255, 0, 255)
gainsboro = NamedColor("gainsboro", 220, 220, 220)
ghostwhite = NamedColor("ghostwhite", 248, 248, 255)
gold = NamedColor("gold", 255, 215, 0 )
goldenrod = NamedColor("goldenrod", 218, 165, 32 )
gray = NamedColor("gray", 128, 128, 128)
green = NamedColor("green", 0, 128, 0 )
greenyellow = NamedColor("greenyellow", 173, 255, 47 )
grey = NamedColor("grey", 128, 128, 128)
honeydew = NamedColor("honeydew", 240, 255, 240)
hotpink = NamedColor("hotpink", 255, 105, 180)
indianred = NamedColor("indianred", 205, 92, 92 )
indigo = NamedColor("indigo", 75, 0, 130)
ivory = NamedColor("ivory", 255, 255, 240)
khaki = NamedColor("khaki", 240, 230, 140)
lavender = NamedColor("lavender", 230, 230, 250)
lavenderblush = NamedColor("lavenderblush", 255, 240, 245)
lawngreen = NamedColor("lawngreen", 124, 252, 0 )
lemonchiffon = NamedColor("lemonchiffon", 255, 250, 205)
lightblue = NamedColor("lightblue", 173, 216, 230)
lightcoral = NamedColor("lightcoral", 240, 128, 128)
lightcyan = NamedColor("lightcyan", 224, 255, 255)
lightgoldenrodyellow = NamedColor("lightgoldenrodyellow", 250, 250, 210)
lightgray = NamedColor("lightgray", 211, 211, 211)
lightgreen = NamedColor("lightgreen", 144, 238, 144)
lightgrey = NamedColor("lightgrey", 211, 211, 211)
lightpink = NamedColor("lightpink", 255, 182, 193)
lightsalmon = NamedColor("lightsalmon", 255, 160, 122)
lightseagreen = NamedColor("lightseagreen", 32, 178, 170)
lightskyblue = NamedColor("lightskyblue", 135, 206, 250)
lightslategray = NamedColor("lightslategray", 119, 136, 153)
lightslategrey = NamedColor("lightslategrey", 119, 136, 153)
lightsteelblue = NamedColor("lightsteelblue", 176, 196, 222)
lightyellow = NamedColor("lightyellow", 255, 255, 224)
lime = NamedColor("lime", 0, 255, 0 )
limegreen = NamedColor("limegreen", 50, 205, 50 )
linen = NamedColor("linen", 250, 240, 230)
magenta = NamedColor("magenta", 255, 0, 255)
maroon = NamedColor("maroon", 128, 0, 0 )
mediumaquamarine = NamedColor("mediumaquamarine", 102, 205, 170)
mediumblue = NamedColor("mediumblue", 0, 0, 205)
mediumorchid = NamedColor("mediumorchid", 186, 85, 211)
mediumpurple = NamedColor("mediumpurple", 147, 112, 219)
mediumseagreen = NamedColor("mediumseagreen", 60, 179, 113)
mediumslateblue = NamedColor("mediumslateblue", 123, 104, 238)
mediumspringgreen = NamedColor("mediumspringgreen", 0, 250, 154)
mediumturquoise = NamedColor("mediumturquoise", 72, 209, 204)
mediumvioletred = NamedColor("mediumvioletred", 199, 21, 133)
midnightblue = NamedColor("midnightblue", 25, 25, 112)
mintcream = NamedColor("mintcream", 245, 255, 250)
mistyrose = NamedColor("mistyrose", 255, 228, 225)
moccasin = NamedColor("moccasin", 255, 228, 181)
navajowhite = NamedColor("navajowhite", 255, 222, 173)
navy = NamedColor("navy", 0, 0, 128)
oldlace = NamedColor("oldlace", 253, 245, 230)
olive = NamedColor("olive", 128, 128, 0 )
olivedrab = NamedColor("olivedrab", 107, 142, 35 )
orange = NamedColor("orange", 255, 165, 0 )
orangered = NamedColor("orangered", 255, 69, 0 )
orchid = NamedColor("orchid", 218, 112, 214)
palegoldenrod = NamedColor("palegoldenrod", 238, 232, 170)
palegreen = NamedColor("palegreen", 152, 251, 152)
paleturquoise = NamedColor("paleturquoise", 175, 238, 238)
palevioletred = NamedColor("palevioletred", 219, 112, 147)
papayawhip = NamedColor("papayawhip", 255, 239, 213)
peachpuff = NamedColor("peachpuff", 255, 218, 185)
peru = NamedColor("peru", 205, 133, 63 )
pink = NamedColor("pink", 255, 192, 203)
plum = NamedColor("plum", 221, 160, 221)
powderblue = NamedColor("powderblue", 176, 224, 230)
purple = NamedColor("purple", 128, 0, 128)
red = NamedColor("red", 255, 0, 0 )
rosybrown = NamedColor("rosybrown", 188, 143, 143)
royalblue = NamedColor("royalblue", 65, 105, 225)
saddlebrown = NamedColor("saddlebrown", 139, 69, 19 )
salmon = NamedColor("salmon", 250, 128, 114)
sandybrown = NamedColor("sandybrown", 244, 164, 96 )
seagreen = NamedColor("seagreen", 46, 139, 87 )
seashell = NamedColor("seashell", 255, 245, 238)
sienna = NamedColor("sienna", 160, 82, 45 )
silver = NamedColor("silver", 192, 192, 192)
skyblue = NamedColor("skyblue", 135, 206, 235)
slateblue = NamedColor("slateblue", 106, 90, 205)
slategray = NamedColor("slategray", 112, 128, 144)
slategrey = NamedColor("slategrey", 112, 128, 144)
snow = NamedColor("snow", 255, 250, 250)
springgreen = NamedColor("springgreen", 0, 255, 127)
steelblue = NamedColor("steelblue", 70, 130, 180)
tan = NamedColor("tan", 210, 180, 140)
teal = NamedColor("teal", 0, 128, 128)
thistle = NamedColor("thistle", 216, 191, 216)
tomato = NamedColor("tomato", 255, 99, 71 )
turquoise = NamedColor("turquoise", 64, 224, 208)
violet = NamedColor("violet", 238, 130, 238)
wheat = NamedColor("wheat", 245, 222, 179)
white = NamedColor("white", 255, 255, 255)
whitesmoke = NamedColor("whitesmoke", 245, 245, 245)
yellow = NamedColor("yellow", 255, 255, 0 )
yellowgreen = NamedColor("yellowgreen", 154, 205, 50 )
__all__ = NamedColor.__all__
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| stonebig/bokeh | bokeh/colors/named.py | Python | bsd-3-clause | 13,025 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Text Trees
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# Peter Ljunglöf <[email protected]>
# Nathan Bodenstab <[email protected]> (tree transforms)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Class for representing hierarchical language structures, such as
syntax trees and morphological trees.
"""
from __future__ import print_function, unicode_literals
# TODO: add LabelledTree (can be used for dependency trees)
import re
from nltk.grammar import Production, Nonterminal
from nltk.probability import ProbabilisticMixIn
from nltk.util import slice_bounds
from nltk.compat import string_types, python_2_unicode_compatible, unicode_repr
from nltk.internals import raise_unorderable_types
######################################################################
## Trees
######################################################################
@python_2_unicode_compatible
class Tree(list):
"""
A Tree represents a hierarchical grouping of leaves and subtrees.
For example, each constituent in a syntax tree is represented by a single Tree.
A tree's children are encoded as a list of leaves and subtrees,
where a leaf is a basic (non-tree) value; and a subtree is a
nested Tree.
>>> from nltk.tree import Tree
>>> print(Tree(1, [2, Tree(3, [4]), 5]))
(1 2 (3 4) 5)
>>> vp = Tree('VP', [Tree('V', ['saw']),
... Tree('NP', ['him'])])
>>> s = Tree('S', [Tree('NP', ['I']), vp])
>>> print(s)
(S (NP I) (VP (V saw) (NP him)))
>>> print(s[1])
(VP (V saw) (NP him))
>>> print(s[1,1])
(NP him)
>>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))")
>>> s == t
True
>>> t[1][1].set_label('X')
>>> t[1][1].label()
'X'
>>> print(t)
(S (NP I) (VP (V saw) (X him)))
>>> t[0], t[1,1] = t[1,1], t[0]
>>> print(t)
(S (X him) (VP (V saw) (NP I)))
The length of a tree is the number of children it has.
>>> len(t)
2
The set_label() and label() methods allow individual constituents
to be labeled. For example, syntax trees use this label to specify
phrase tags, such as "NP" and "VP".
Several Tree methods use "tree positions" to specify
children or descendants of a tree. Tree positions are defined as
follows:
- The tree position *i* specifies a Tree's *i*\ th child.
- The tree position ``()`` specifies the Tree itself.
- If *p* is the tree position of descendant *d*, then
*p+i* specifies the *i*\ th child of *d*.
I.e., every tree position is either a single index *i*,
specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*,
specifying ``tree[i1][i2]...[iN]``.
Construct a new tree. This constructor can be called in one
of two ways:
- ``Tree(label, children)`` constructs a new tree with the
specified label and list of children.
- ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``.
"""
def __init__(self, node, children=None):
if children is None:
raise TypeError("%s: Expected a node value and child list "
% type(self).__name__)
elif isinstance(children, string_types):
raise TypeError("%s() argument 2 should be a list, not a "
"string" % type(self).__name__)
else:
list.__init__(self, children)
self._label = node
#////////////////////////////////////////////////////////////
# Comparison operators
#////////////////////////////////////////////////////////////
def __eq__(self, other):
return (self.__class__ is other.__class__ and
(self._label, list(self)) == (other._label, list(other)))
def __lt__(self, other):
if not isinstance(other, Tree):
# raise_unorderable_types("<", self, other)
# Sometimes children can be pure strings,
# so we need to be able to compare with non-trees:
return self.__class__.__name__ < other.__class__.__name__
elif self.__class__ is other.__class__:
return (self._label, list(self)) < (other._label, list(other))
else:
return self.__class__.__name__ < other.__class__.__name__
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ne__ = lambda self, other: not self == other
__gt__ = lambda self, other: not (self < other or self == other)
__le__ = lambda self, other: self < other or self == other
__ge__ = lambda self, other: not self < other
#////////////////////////////////////////////////////////////
# Disabled list operations
#////////////////////////////////////////////////////////////
def __mul__(self, v):
raise TypeError('Tree does not support multiplication')
def __rmul__(self, v):
raise TypeError('Tree does not support multiplication')
def __add__(self, v):
raise TypeError('Tree does not support addition')
def __radd__(self, v):
raise TypeError('Tree does not support addition')
#////////////////////////////////////////////////////////////
# Indexing (with support for tree positions)
#////////////////////////////////////////////////////////////
def __getitem__(self, index):
if isinstance(index, (int, slice)):
return list.__getitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
return self
elif len(index) == 1:
return self[index[0]]
else:
return self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __setitem__(self, index, value):
if isinstance(index, (int, slice)):
return list.__setitem__(self, index, value)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError('The tree position () may not be '
'assigned to.')
elif len(index) == 1:
self[index[0]] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __delitem__(self, index):
if isinstance(index, (int, slice)):
return list.__delitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
elif len(index) == 1:
del self[index[0]]
else:
del self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
#////////////////////////////////////////////////////////////
# Basic tree operations
#////////////////////////////////////////////////////////////
def _get_node(self):
"""Outdated method to access the node value; use the label() method instead."""
raise NotImplementedError("Use label() to access a node label.")
def _set_node(self, value):
"""Outdated method to set the node value; use the set_label() method instead."""
raise NotImplementedError("Use set_label() method to set a node label.")
node = property(_get_node, _set_node)
def label(self):
"""
Return the node label of the tree.
>>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))')
>>> t.label()
'S'
:return: the node label (typically a string)
:rtype: any
"""
return self._label
def set_label(self, label):
"""
Set the node label of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.set_label("T")
>>> print(t)
(T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))
:param label: the node label (typically a string)
:type label: any
"""
self._label = label
def leaves(self):
"""
Return the leaves of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.leaves()
['the', 'dog', 'chased', 'the', 'cat']
:return: a list containing this tree's leaves.
The order reflects the order of the
leaves in the tree's hierarchical structure.
:rtype: list
"""
leaves = []
for child in self:
if isinstance(child, Tree):
leaves.extend(child.leaves())
else:
leaves.append(child)
return leaves
def flatten(self):
"""
Return a flat version of the tree, with all non-root non-terminals removed.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> print(t.flatten())
(S the dog chased the cat)
:return: a tree consisting of this tree's root connected directly to
its leaves, omitting all intervening non-terminal nodes.
:rtype: Tree
"""
return Tree(self.label(), self.leaves())
def height(self):
"""
Return the height of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.height()
5
>>> print(t[0,0])
(D the)
>>> t[0,0].height()
2
:return: The height of this tree. The height of a tree
containing no children is 1; the height of a tree
containing only leaves is 2; and the height of any other
tree is one plus the maximum of its children's
heights.
:rtype: int
"""
max_child_height = 0
for child in self:
if isinstance(child, Tree):
max_child_height = max(max_child_height, child.height())
else:
max_child_height = max(max_child_height, 1)
return 1 + max_child_height
def treepositions(self, order='preorder'):
"""
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.treepositions() # doctest: +ELLIPSIS
[(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...]
>>> for pos in t.treepositions('leaves'):
... t[pos] = t[pos][::-1].upper()
>>> print(t)
(S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC))))
:param order: One of: ``preorder``, ``postorder``, ``bothorder``,
``leaves``.
"""
positions = []
if order in ('preorder', 'bothorder'): positions.append( () )
for i, child in enumerate(self):
if isinstance(child, Tree):
childpos = child.treepositions(order)
positions.extend((i,)+p for p in childpos)
else:
positions.append( (i,) )
if order in ('postorder', 'bothorder'): positions.append( () )
return positions
def subtrees(self, filter=None):
"""
Generate all the subtrees of this tree, optionally restricted
to trees matching the filter function.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> for s in t.subtrees(lambda t: t.height() == 2):
... print(s)
(D the)
(N dog)
(V chased)
(D the)
(N cat)
:type filter: function
:param filter: the function to filter all local trees
"""
if not filter or filter(self):
yield self
for child in self:
if isinstance(child, Tree):
for subtree in child.subtrees(filter):
yield subtree
def productions(self):
"""
Generate the productions that correspond to the non-terminal nodes of the tree.
For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the
form P -> C1 C2 ... Cn.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.productions()
[S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased',
NP -> D N, D -> 'the', N -> 'cat']
:rtype: list(Production)
"""
if not isinstance(self._label, string_types):
raise TypeError('Productions can only be generated from trees having node labels that are strings')
prods = [Production(Nonterminal(self._label), _child_names(self))]
for child in self:
if isinstance(child, Tree):
prods += child.productions()
return prods
def pos(self):
"""
Return a sequence of pos-tagged words extracted from the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.pos()
[('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')]
:return: a list of tuples containing leaves and pre-terminals (part-of-speech tags).
The order reflects the order of the leaves in the tree's hierarchical structure.
:rtype: list(tuple)
"""
pos = []
for child in self:
if isinstance(child, Tree):
pos.extend(child.pos())
else:
pos.append((child, self._label))
return pos
def leaf_treeposition(self, index):
"""
:return: The tree position of the ``index``-th leaf in this
tree. I.e., if ``tp=self.leaf_treeposition(i)``, then
``self[tp]==self.leaves()[i]``.
:raise IndexError: If this tree contains fewer than ``index+1``
leaves, or if ``index<0``.
"""
if index < 0: raise IndexError('index must be non-negative')
stack = [(self, ())]
while stack:
value, treepos = stack.pop()
if not isinstance(value, Tree):
if index == 0: return treepos
else: index -= 1
else:
for i in range(len(value)-1, -1, -1):
stack.append( (value[i], treepos+(i,)) )
raise IndexError('index must be less than or equal to len(self)')
def treeposition_spanning_leaves(self, start, end):
"""
:return: The tree position of the lowest descendant of this
tree that dominates ``self.leaves()[start:end]``.
:raise ValueError: if ``end <= start``
"""
if end <= start:
raise ValueError('end must be greater than start')
# Find the tree positions of the start & end leaves, and
# take the longest common subsequence.
start_treepos = self.leaf_treeposition(start)
end_treepos = self.leaf_treeposition(end-1)
# Find the first index where they mismatch:
for i in range(len(start_treepos)):
if i == len(end_treepos) or start_treepos[i] != end_treepos[i]:
return start_treepos[:i]
return start_treepos
#////////////////////////////////////////////////////////////
# Transforms
#////////////////////////////////////////////////////////////
def chomsky_normal_form(self, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^"):
"""
This method can modify a tree in three ways:
1. Convert a tree into its Chomsky Normal Form (CNF)
equivalent -- Every subtree has either two non-terminals
or one terminal as its children. This process requires
the creation of more"artificial" non-terminal nodes.
2. Markov (vertical) smoothing of children in new artificial
nodes
3. Horizontal (parent) annotation of nodes
:param factor: Right or left factoring method (default = "right")
:type factor: str = [left|right]
:param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings)
:type horzMarkov: int | None
:param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation)
:type vertMarkov: int | None
:param childChar: A string used in construction of the artificial nodes, separating the head of the
original subtree from the child nodes that have yet to be expanded (default = "|")
:type childChar: str
:param parentChar: A string used to separate the node representation from its vertical annotation
:type parentChar: str
"""
from nltk.treetransforms import chomsky_normal_form
chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar)
def un_chomsky_normal_form(self, expandUnary = True, childChar = "|", parentChar = "^", unaryChar = "+"):
"""
This method modifies the tree in three ways:
1. Transforms a tree in Chomsky Normal Form back to its
original structure (branching greater than two)
2. Removes any parent annotation (if it exists)
3. (optional) expands unary subtrees (if previously
collapsed with collapseUnary(...) )
:param expandUnary: Flag to expand unary or not (default = True)
:type expandUnary: bool
:param childChar: A string separating the head node from its children in an artificial node (default = "|")
:type childChar: str
:param parentChar: A sting separating the node label from its parent annotation (default = "^")
:type parentChar: str
:param unaryChar: A string joining two non-terminals in a unary production (default = "+")
:type unaryChar: str
"""
from nltk.treetransforms import un_chomsky_normal_form
un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar)
def collapse_unary(self, collapsePOS = False, collapseRoot = False, joinChar = "+"):
"""
Collapse subtrees with a single child (ie. unary productions)
into a new non-terminal (Tree node) joined by 'joinChar'.
This is useful when working with algorithms that do not allow
unary productions, and completely removing the unary productions
would require loss of useful information. The Tree is modified
directly (since it is passed by reference) and no value is returned.
:param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie.
Part-of-Speech tags) since they are always unary productions
:type collapsePOS: bool
:param collapseRoot: 'False' (default) will not modify the root production
if it is unary. For the Penn WSJ treebank corpus, this corresponds
to the TOP -> productions.
:type collapseRoot: bool
:param joinChar: A string used to connect collapsed node values (default = "+")
:type joinChar: str
"""
from nltk.treetransforms import collapse_unary
collapse_unary(self, collapsePOS, collapseRoot, joinChar)
#////////////////////////////////////////////////////////////
# Convert, copy
#////////////////////////////////////////////////////////////
@classmethod
def convert(cls, tree):
"""
Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree.
"""
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
return cls(tree._label, children)
else:
return tree
def copy(self, deep=False):
if not deep: return type(self)(self._label, self)
else: return type(self).convert(self)
def _frozen_class(self): return ImmutableTree
def freeze(self, leaf_freezer=None):
frozen_class = self._frozen_class()
if leaf_freezer is None:
newcopy = frozen_class.convert(self)
else:
newcopy = self.copy(deep=True)
for pos in newcopy.treepositions('leaves'):
newcopy[pos] = leaf_freezer(newcopy[pos])
newcopy = frozen_class.convert(newcopy)
hash(newcopy) # Make sure the leaves are hashable.
return newcopy
#////////////////////////////////////////////////////////////
# Parsing
#////////////////////////////////////////////////////////////
@classmethod
def fromstring(cls, s, brackets='()', read_node=None, read_leaf=None,
node_pattern=None, leaf_pattern=None,
remove_empty_top_bracketing=False):
"""
Read a bracketed tree string and return the resulting tree.
Trees are represented as nested brackettings, such as::
(S (NP (NNP John)) (VP (V runs)))
:type s: str
:param s: The string to read
:type brackets: str (length=2)
:param brackets: The bracket characters used to mark the
beginning and end of trees and subtrees.
:type read_node: function
:type read_leaf: function
:param read_node, read_leaf: If specified, these functions
are applied to the substrings of ``s`` corresponding to
nodes and leaves (respectively) to obtain the values for
those nodes and leaves. They should have the following
signature:
read_node(str) -> value
For example, these functions could be used to process nodes
and leaves whose values should be some type other than
string (such as ``FeatStruct``).
Note that by default, node strings and leaf strings are
delimited by whitespace and brackets; to override this
default, use the ``node_pattern`` and ``leaf_pattern``
arguments.
:type node_pattern: str
:type leaf_pattern: str
:param node_pattern, leaf_pattern: Regular expression patterns
used to find node and leaf substrings in ``s``. By
default, both nodes patterns are defined to match any
sequence of non-whitespace non-bracket characters.
:type remove_empty_top_bracketing: bool
:param remove_empty_top_bracketing: If the resulting tree has
an empty node label, and is length one, then return its
single child instead. This is useful for treebank trees,
which sometimes contain an extra level of bracketing.
:return: A tree corresponding to the string representation ``s``.
If this class method is called using a subclass of Tree,
then it will return a tree of that type.
:rtype: Tree
"""
if not isinstance(brackets, string_types) or len(brackets) != 2:
raise TypeError('brackets must be a length-2 string')
if re.search('\s', brackets):
raise TypeError('whitespace brackets not allowed')
# Construct a regexp that will tokenize the string.
open_b, close_b = brackets
open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b))
if node_pattern is None:
node_pattern = '[^\s%s%s]+' % (open_pattern, close_pattern)
if leaf_pattern is None:
leaf_pattern = '[^\s%s%s]+' % (open_pattern, close_pattern)
token_re = re.compile('%s\s*(%s)?|%s|(%s)' % (
open_pattern, node_pattern, close_pattern, leaf_pattern))
# Walk through each token, updating a stack of trees.
stack = [(None, [])] # list of (node, children) tuples
for match in token_re.finditer(s):
token = match.group()
# Beginning of a tree/subtree
if token[0] == open_b:
if len(stack) == 1 and len(stack[0][1]) > 0:
cls._parse_error(s, match, 'end-of-string')
label = token[1:].lstrip()
if read_node is not None: label = read_node(label)
stack.append((label, []))
# End of a tree/subtree
elif token == close_b:
if len(stack) == 1:
if len(stack[0][1]) == 0:
cls._parse_error(s, match, open_b)
else:
cls._parse_error(s, match, 'end-of-string')
label, children = stack.pop()
stack[-1][1].append(cls(label, children))
# Leaf node
else:
if len(stack) == 1:
cls._parse_error(s, match, open_b)
if read_leaf is not None: token = read_leaf(token)
stack[-1][1].append(token)
# check that we got exactly one complete tree.
if len(stack) > 1:
cls._parse_error(s, 'end-of-string', close_b)
elif len(stack[0][1]) == 0:
cls._parse_error(s, 'end-of-string', open_b)
else:
assert stack[0][0] is None
assert len(stack[0][1]) == 1
tree = stack[0][1][0]
# If the tree has an extra level with node='', then get rid of
# it. E.g.: "((S (NP ...) (VP ...)))"
if remove_empty_top_bracketing and tree._label == '' and len(tree) == 1:
tree = tree[0]
# return the tree.
return tree
@classmethod
def _parse_error(cls, s, match, expecting):
"""
Display a friendly error message when parsing a tree string fails.
:param s: The string we're parsing.
:param match: regexp match of the problem token.
:param expecting: what we expected to see instead.
"""
# Construct a basic error message
if match == 'end-of-string':
pos, token = len(s), 'end-of-string'
else:
pos, token = match.start(), match.group()
msg = '%s.read(): expected %r but got %r\n%sat index %d.' % (
cls.__name__, expecting, token, ' '*12, pos)
# Add a display showing the error token itsels:
s = s.replace('\n', ' ').replace('\t', ' ')
offset = pos
if len(s) > pos+10:
s = s[:pos+10]+'...'
if pos > 10:
s = '...'+s[pos-10:]
offset = 13
msg += '\n%s"%s"\n%s^' % (' '*16, s, ' '*(17+offset))
raise ValueError(msg)
#////////////////////////////////////////////////////////////
# Visualization & String Representation
#////////////////////////////////////////////////////////////
def draw(self):
"""
Open a new window containing a graphical diagram of this tree.
"""
from nltk.draw.tree import draw_trees
draw_trees(self)
def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs):
"""
Pretty-print this tree as ASCII or Unicode art.
For explanation of the arguments, see the documentation for
`nltk.treeprettyprinter.TreePrettyPrinter`.
"""
from nltk.treeprettyprinter import TreePrettyPrinter
print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs),
file=stream)
def __repr__(self):
childstr = ", ".join(unicode_repr(c) for c in self)
return '%s(%s, [%s])' % (type(self).__name__, unicode_repr(self._label), childstr)
def _repr_png_(self):
"""
Draws and outputs in PNG for ipython.
PNG is used instead of PDF, since it can be displayed in the qt console and
has wider browser support.
"""
import os
import base64
import subprocess
import tempfile
from nltk.draw.tree import tree_to_treesegment
from nltk.draw.util import CanvasFrame
from nltk.internals import find_binary
_canvas_frame = CanvasFrame()
widget = tree_to_treesegment(_canvas_frame.canvas(), self)
_canvas_frame.add_widget(widget)
x, y, w, h = widget.bbox()
# print_to_file uses scrollregion to set the width and height of the pdf.
_canvas_frame.canvas()['scrollregion'] = (0, 0, w, h)
with tempfile.NamedTemporaryFile() as file:
in_path = '{0:}.ps'.format(file.name)
out_path = '{0:}.png'.format(file.name)
_canvas_frame.print_to_file(in_path)
_canvas_frame.destroy_widget(widget)
subprocess.call([find_binary('gs', binary_names=['gswin32c.exe', 'gswin64c.exe'], env_vars=['PATH'], verbose=False)] +
'-q -dEPSCrop -sDEVICE=png16m -r90 -dTextAlphaBits=4 -dGraphicsAlphaBits=4 -dSAFER -dBATCH -dNOPAUSE -sOutputFile={0:} {1:}'
.format(out_path, in_path).split())
with open(out_path, 'rb') as sr:
res = sr.read()
os.remove(in_path)
os.remove(out_path)
return base64.b64encode(res).decode()
def __str__(self):
return self.pformat()
def pprint(self, **kwargs):
"""
Print a string representation of this Tree to 'stream'
"""
if "stream" in kwargs:
stream = kwargs["stream"]
del kwargs["stream"]
else:
stream = None
print(self.pformat(**kwargs), file=stream)
def pformat(self, margin=70, indent=0, nodesep='', parens='()', quotes=False):
"""
:return: A pretty-printed string representation of this tree.
:rtype: str
:param margin: The right margin at which to do line-wrapping.
:type margin: int
:param indent: The indentation level at which printing
begins. This number is used to decide how far to indent
subsequent lines.
:type indent: int
:param nodesep: A string that is used to separate the node
from the children. E.g., the default value ``':'`` gives
trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``.
"""
# Try writing it on one line.
s = self._pformat_flat(nodesep, parens, quotes)
if len(s) + indent < margin:
return s
# If it doesn't fit on one line, then write it on multi-lines.
if isinstance(self._label, string_types):
s = '%s%s%s' % (parens[0], self._label, nodesep)
else:
s = '%s%s%s' % (parens[0], unicode_repr(self._label), nodesep)
for child in self:
if isinstance(child, Tree):
s += '\n'+' '*(indent+2)+child.pformat(margin, indent+2,
nodesep, parens, quotes)
elif isinstance(child, tuple):
s += '\n'+' '*(indent+2)+ "/".join(child)
elif isinstance(child, string_types) and not quotes:
s += '\n'+' '*(indent+2)+ '%s' % child
else:
s += '\n'+' '*(indent+2)+ unicode_repr(child)
return s+parens[1]
def pformat_latex_qtree(self):
r"""
Returns a representation of the tree compatible with the
LaTeX qtree package. This consists of the string ``\Tree``
followed by the tree represented in bracketed notation.
For example, the following result was generated from a parse tree of
the sentence ``The announcement astounded us``::
\Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ]
[.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ]
See http://www.ling.upenn.edu/advice/latex.html for the LaTeX
style file for the qtree package.
:return: A latex qtree representation of this tree.
:rtype: str
"""
reserved_chars = re.compile('([#\$%&~_\{\}])')
pformat = self.pformat(indent=6, nodesep='', parens=('[.', ' ]'))
return r'\Tree ' + re.sub(reserved_chars, r'\\\1', pformat)
def _pformat_flat(self, nodesep, parens, quotes):
childstrs = []
for child in self:
if isinstance(child, Tree):
childstrs.append(child._pformat_flat(nodesep, parens, quotes))
elif isinstance(child, tuple):
childstrs.append("/".join(child))
elif isinstance(child, string_types) and not quotes:
childstrs.append('%s' % child)
else:
childstrs.append(unicode_repr(child))
if isinstance(self._label, string_types):
return '%s%s%s %s%s' % (parens[0], self._label, nodesep,
" ".join(childstrs), parens[1])
else:
return '%s%s%s %s%s' % (parens[0], unicode_repr(self._label), nodesep,
" ".join(childstrs), parens[1])
class ImmutableTree(Tree):
def __init__(self, node, children=None):
super(ImmutableTree, self).__init__(node, children)
# Precompute our hash value. This ensures that we're really
# immutable. It also means we only have to calculate it once.
try:
self._hash = hash((self._label, tuple(self)))
except (TypeError, ValueError):
raise ValueError("%s: node value and children "
"must be immutable" % type(self).__name__)
def __setitem__(self, index, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __setslice__(self, i, j, value):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delitem__(self, index):
raise ValueError('%s may not be modified' % type(self).__name__)
def __delslice__(self, i, j):
raise ValueError('%s may not be modified' % type(self).__name__)
def __iadd__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def __imul__(self, other):
raise ValueError('%s may not be modified' % type(self).__name__)
def append(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def extend(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def pop(self, v=None):
raise ValueError('%s may not be modified' % type(self).__name__)
def remove(self, v):
raise ValueError('%s may not be modified' % type(self).__name__)
def reverse(self):
raise ValueError('%s may not be modified' % type(self).__name__)
def sort(self):
raise ValueError('%s may not be modified' % type(self).__name__)
def __hash__(self):
return self._hash
def set_label(self, value):
"""
Set the node label. This will only succeed the first time the
node label is set, which should occur in ImmutableTree.__init__().
"""
if hasattr(self, '_label'):
raise ValueError('%s may not be modified' % type(self).__name__)
self._label = value
######################################################################
## Parented trees
######################################################################
class AbstractParentedTree(Tree):
"""
An abstract base class for a ``Tree`` that automatically maintains
pointers to parent nodes. These parent pointers are updated
whenever any change is made to a tree's structure. Two subclasses
are currently defined:
- ``ParentedTree`` is used for tree structures where each subtree
has at most one parent. This class should be used in cases
where there is no"sharing" of subtrees.
- ``MultiParentedTree`` is used for tree structures where a
subtree may have zero or more parents. This class should be
used in cases where subtrees may be shared.
Subclassing
===========
The ``AbstractParentedTree`` class redefines all operations that
modify a tree's structure to call two methods, which are used by
subclasses to update parent information:
- ``_setparent()`` is called whenever a new child is added.
- ``_delparent()`` is called whenever a child is removed.
"""
def __init__(self, node, children=None):
super(AbstractParentedTree, self).__init__(node, children)
# If children is None, the tree is read from node, and
# all parents will be set during parsing.
if children is not None:
# Otherwise we have to set the parent of the children.
# Iterate over self, and *not* children, because children
# might be an iterator.
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i, dry_run=True)
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i)
#////////////////////////////////////////////////////////////
# Parent management
#////////////////////////////////////////////////////////////
def _setparent(self, child, index, dry_run=False):
"""
Update the parent pointer of ``child`` to point to ``self``. This
method is only called if the type of ``child`` is ``Tree``;
i.e., it is not called when adding a leaf to a tree. This method
is always called before the child is actually added to the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
:raise TypeError: If ``child`` is a tree with an impropriate
type. Typically, if ``child`` is a tree, then its type needs
to match the type of ``self``. This prevents mixing of
different tree types (single-parented, multi-parented, and
non-parented).
:param dry_run: If true, the don't actually set the child's
parent pointer; just check for any error conditions, and
raise an exception if one is found.
"""
raise NotImplementedError()
def _delparent(self, child, index):
"""
Update the parent pointer of ``child`` to not point to self. This
method is only called if the type of ``child`` is ``Tree``; i.e., it
is not called when removing a leaf from a tree. This method
is always called before the child is actually removed from the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
"""
raise NotImplementedError()
#////////////////////////////////////////////////////////////
# Methods that add/remove children
#////////////////////////////////////////////////////////////
# Every method that adds or removes a child must make
# appropriate calls to _setparent() and _delparent().
def __delitem__(self, index):
# del ptree[start:stop]
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# Clear all the children pointers.
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# Delete the children from our child list.
super(AbstractParentedTree, self).__delitem__(index)
# del ptree[i]
elif isinstance(index, int):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
# Clear the child's parent pointer.
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Remove the child from our child list.
super(AbstractParentedTree, self).__delitem__(index)
elif isinstance(index, (list, tuple)):
# del ptree[()]
if len(index) == 0:
raise IndexError('The tree position () may not be deleted.')
# del ptree[(i,)]
elif len(index) == 1:
del self[index[0]]
# del ptree[i1, i2, i3]
else:
del self[index[0]][index[1:]]
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def __setitem__(self, index, value):
# ptree[start:stop] = value
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# make a copy of value, in case it's an iterator
if not isinstance(value, (list, tuple)):
value = list(value)
# Check for any error conditions, so we can avoid ending
# up in an inconsistent state if an error does occur.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i*step, dry_run=True)
# clear the child pointers of all parents we're removing
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# set the child pointers of the new children. We do this
# after clearing *all* child pointers, in case we're e.g.
# reversing the elements in a tree.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i*step)
# finally, update the content of the child list itself.
super(AbstractParentedTree, self).__setitem__(index, value)
# ptree[i] = value
elif isinstance(index, int):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
# if the value is not changing, do nothing.
if value is self[index]:
return
# Set the new child's parent pointer.
if isinstance(value, Tree):
self._setparent(value, index)
# Remove the old child's parent pointer
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Update our child list.
super(AbstractParentedTree, self).__setitem__(index, value)
elif isinstance(index, (list, tuple)):
# ptree[()] = value
if len(index) == 0:
raise IndexError('The tree position () may not be assigned to.')
# ptree[(i,)] = value
elif len(index) == 1:
self[index[0]] = value
# ptree[i1, i2, i3] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError("%s indices must be integers, not %s" %
(type(self).__name__, type(index).__name__))
def append(self, child):
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def extend(self, children):
for child in children:
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def insert(self, index, child):
# Handle negative indexes. Note that if index < -len(self),
# we do *not* raise an IndexError, unlike __getitem__. This
# is done for consistency with list.__getitem__ and list.index.
if index < 0: index += len(self)
if index < 0: index = 0
# Set the child's parent, and update our child list.
if isinstance(child, Tree):
self._setparent(child, index)
super(AbstractParentedTree, self).insert(index, child)
def pop(self, index=-1):
if index < 0: index += len(self)
if index < 0: raise IndexError('index out of range')
if isinstance(self[index], Tree):
self._delparent(self[index], index)
return super(AbstractParentedTree, self).pop(index)
# n.b.: like `list`, this is done by equality, not identity!
# To remove a specific child, use del ptree[i].
def remove(self, child):
index = self.index(child)
if isinstance(self[index], Tree):
self._delparent(self[index], index)
super(AbstractParentedTree, self).remove(child)
# We need to implement __getslice__ and friends, even though
# they're deprecated, because otherwise list.__getslice__ will get
# called (since we're subclassing from list). Just delegate to
# __getitem__ etc., but use max(0, start) and max(0, stop) because
# because negative indices are already handled *before*
# __getslice__ is called; and we don't want to double-count them.
if hasattr(list, '__getslice__'):
def __getslice__(self, start, stop):
return self.__getitem__(slice(max(0, start), max(0, stop)))
def __delslice__(self, start, stop):
return self.__delitem__(slice(max(0, start), max(0, stop)))
def __setslice__(self, start, stop, value):
return self.__setitem__(slice(max(0, start), max(0, stop)), value)
class ParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
single-parented trees. The following are methods for querying
the structure of a parented tree: ``parent``, ``parent_index``,
``left_sibling``, ``right_sibling``, ``root``, ``treeposition``.
Each ``ParentedTree`` may have at most one parent. In
particular, subtrees may not be shared. Any attempt to reuse a
single ``ParentedTree`` as a child of more than one parent (or
as multiple children of the same parent) will cause a
``ValueError`` exception to be raised.
``ParentedTrees`` should never be used in the same tree as ``Trees``
or ``MultiParentedTrees``. Mixing tree implementations may result
in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node, children=None):
self._parent = None
"""The parent of this Tree, or None if it has no parent."""
super(ParentedTree, self).__init__(node, children)
if children is None:
# If children is None, the tree is read from node.
# After parsing, the parent of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parent = None
self._setparent(child, i)
def _frozen_class(self): return ImmutableParentedTree
#/////////////////////////////////////////////////////////////////
# Methods
#/////////////////////////////////////////////////////////////////
def parent(self):
"""The parent of this tree, or None if it has no parent."""
return self._parent
def parent_index(self):
"""
The index of this tree in its parent. I.e.,
``ptree.parent()[ptree.parent_index()] is ptree``. Note that
``ptree.parent_index()`` is not necessarily equal to
``ptree.parent.index(ptree)``, since the ``index()`` method
returns the first child that is equal to its argument.
"""
if self._parent is None: return None
for i, child in enumerate(self._parent):
if child is self: return i
assert False, 'expected to find self in self._parent!'
def left_sibling(self):
"""The left sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index > 0:
return self._parent[parent_index-1]
return None # no left sibling
def right_sibling(self):
"""The right sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index < (len(self._parent)-1):
return self._parent[parent_index+1]
return None # no right sibling
def root(self):
"""
The root of this tree. I.e., the unique ancestor of this tree
whose parent is None. If ``ptree.parent()`` is None, then
``ptree`` is its own root.
"""
root = self
while root.parent() is not None:
root = root.parent()
return root
def treeposition(self):
"""
The tree position of this tree, relative to the root of the
tree. I.e., ``ptree.root[ptree.treeposition] is ptree``.
"""
if self.parent() is None:
return ()
else:
return self.parent().treeposition() + (self.parent_index(),)
#/////////////////////////////////////////////////////////////////
# Parent Management
#/////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, ParentedTree)
assert self[index] is child
assert child._parent is self
# Delete child's parent pointer.
child._parent = None
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, ParentedTree):
raise TypeError('Can not insert a non-ParentedTree '+
'into a ParentedTree')
# If child already has a parent, then complain.
if child._parent is not None:
raise ValueError('Can not insert a subtree that already '
'has a parent.')
# Set child's parent pointer & index.
if not dry_run:
child._parent = self
class MultiParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
multi-parented trees. The following are methods for querying the
structure of a multi-parented tree: ``parents()``, ``parent_indices()``,
``left_siblings()``, ``right_siblings()``, ``roots``, ``treepositions``.
Each ``MultiParentedTree`` may have zero or more parents. In
particular, subtrees may be shared. If a single
``MultiParentedTree`` is used as multiple children of the same
parent, then that parent will appear multiple times in its
``parents()`` method.
``MultiParentedTrees`` should never be used in the same tree as
``Trees`` or ``ParentedTrees``. Mixing tree implementations may
result in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node, children=None):
self._parents = []
"""A list of this tree's parents. This list should not
contain duplicates, even if a parent contains this tree
multiple times."""
super(MultiParentedTree, self).__init__(node, children)
if children is None:
# If children is None, the tree is read from node.
# After parsing, the parent(s) of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parents = []
self._setparent(child, i)
def _frozen_class(self): return ImmutableMultiParentedTree
#/////////////////////////////////////////////////////////////////
# Methods
#/////////////////////////////////////////////////////////////////
def parents(self):
"""
The set of parents of this tree. If this tree has no parents,
then ``parents`` is the empty set. To check if a tree is used
as multiple children of the same parent, use the
``parent_indices()`` method.
:type: list(MultiParentedTree)
"""
return list(self._parents)
def left_siblings(self):
"""
A list of all left siblings of this tree, in any of its parent
trees. A tree may be its own left sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the left sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [parent[index-1]
for (parent, index) in self._get_parent_indices()
if index > 0]
def right_siblings(self):
"""
A list of all right siblings of this tree, in any of its parent
trees. A tree may be its own right sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the right sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [parent[index+1]
for (parent, index) in self._get_parent_indices()
if index < (len(parent)-1)]
def _get_parent_indices(self):
return [(parent, index)
for parent in self._parents
for index, child in enumerate(parent)
if child is self]
def roots(self):
"""
The set of all roots of this tree. This set is formed by
tracing all possible parent paths until trees with no parents
are found.
:type: list(MultiParentedTree)
"""
return list(self._get_roots_helper({}).values())
def _get_roots_helper(self, result):
if self._parents:
for parent in self._parents:
parent._get_roots_helper(result)
else:
result[id(self)] = self
return result
def parent_indices(self, parent):
"""
Return a list of the indices where this tree occurs as a child
of ``parent``. If this child does not occur as a child of
``parent``, then the empty list is returned. The following is
always true::
for parent_index in ptree.parent_indices(parent):
parent[parent_index] is ptree
"""
if parent not in self._parents: return []
else: return [index for (index, child) in enumerate(parent)
if child is self]
def treepositions(self, root):
"""
Return a list of all tree positions that can be used to reach
this multi-parented tree starting from ``root``. I.e., the
following is always true::
for treepos in ptree.treepositions(root):
root[treepos] is ptree
"""
if self is root:
return [()]
else:
return [treepos+(index,)
for parent in self._parents
for treepos in parent.treepositions(root)
for (index, child) in enumerate(parent) if child is self]
#/////////////////////////////////////////////////////////////////
# Parent Management
#/////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, MultiParentedTree)
assert self[index] is child
assert len([p for p in child._parents if p is self]) == 1
# If the only copy of child in self is at index, then delete
# self from child's parent list.
for i, c in enumerate(self):
if c is child and i != index: break
else:
child._parents.remove(self)
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, MultiParentedTree):
raise TypeError('Can not insert a non-MultiParentedTree '+
'into a MultiParentedTree')
# Add self as a parent pointer if it's not already listed.
if not dry_run:
for parent in child._parents:
if parent is self: break
else:
child._parents.append(self)
class ImmutableParentedTree(ImmutableTree, ParentedTree):
pass
class ImmutableMultiParentedTree(ImmutableTree, MultiParentedTree):
pass
######################################################################
## Probabilistic trees
######################################################################
@python_2_unicode_compatible
class ProbabilisticTree(Tree, ProbabilisticMixIn):
def __init__(self, node, children=None, **prob_kwargs):
Tree.__init__(self, node, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s (p=%r)' % (Tree.unicode_repr(self), self.prob())
def __str__(self):
return '%s (p=%.6g)' % (self.pformat(margin=60), self.prob())
def copy(self, deep=False):
if not deep: return type(self)(self._label, self, prob=self.prob())
else: return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val._label, children, prob=val.prob())
else:
return cls(val._label, children, prob=1.0)
else:
return val
def __eq__(self, other):
return (self.__class__ is other.__class__ and
(self._label, list(self), self.prob()) ==
(other._label, list(other), other.prob()))
def __lt__(self, other):
if not isinstance(other, Tree):
raise_unorderable_types("<", self, other)
if self.__class__ is other.__class__:
return ((self._label, list(self), self.prob()) <
(other._label, list(other), other.prob()))
else:
return self.__class__.__name__ < other.__class__.__name__
@python_2_unicode_compatible
class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn):
def __init__(self, node, children=None, **prob_kwargs):
ImmutableTree.__init__(self, node, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
self._hash = hash((self._label, tuple(self), self.prob()))
# We have to patch up these methods to make them work right:
def _frozen_class(self): return ImmutableProbabilisticTree
def __repr__(self):
return '%s [%s]' % (Tree.unicode_repr(self), self.prob())
def __str__(self):
return '%s [%s]' % (self.pformat(margin=60), self.prob())
def copy(self, deep=False):
if not deep: return type(self)(self._label, self, prob=self.prob())
else: return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val._label, children, prob=val.prob())
else:
return cls(val._label, children, prob=1.0)
else:
return val
def _child_names(tree):
names = []
for child in tree:
if isinstance(child, Tree):
names.append(Nonterminal(child._label))
else:
names.append(child)
return names
######################################################################
## Parsing
######################################################################
def bracket_parse(s):
"""
Use Tree.read(s, remove_empty_top_bracketing=True) instead.
"""
raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.")
def sinica_parse(s):
"""
Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings,
as shown in the following example (X represents a Chinese character):
S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY)
:return: A tree corresponding to the string representation.
:rtype: Tree
:param s: The string to be converted
:type s: str
"""
tokens = re.split(r'([()| ])', s)
for i in range(len(tokens)):
if tokens[i] == '(':
tokens[i-1], tokens[i] = tokens[i], tokens[i-1] # pull nonterminal inside parens
elif ':' in tokens[i]:
fields = tokens[i].split(':')
if len(fields) == 2: # non-terminal
tokens[i] = fields[1]
else:
tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")"
elif tokens[i] == '|':
tokens[i] = ''
treebank_string = " ".join(tokens)
return Tree.fromstring(treebank_string, remove_empty_top_bracketing=True)
# s = re.sub(r'^#[^\s]*\s', '', s) # remove leading identifier
# s = re.sub(r'\w+:', '', s) # remove role tags
# return s
######################################################################
## Demonstration
######################################################################
def demo():
"""
A demonstration showing how Trees and Trees can be
used. This demonstration creates a Tree, and loads a
Tree from the Treebank corpus,
and shows the results of calling several of their methods.
"""
from nltk import Tree, ProbabilisticTree
# Demonstrate tree parsing.
s = '(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))'
t = Tree.fromstring(s)
print("Convert bracketed string into tree:")
print(t)
print(t.__repr__())
print("Display tree properties:")
print(t.label()) # tree's constituent type
print(t[0]) # tree's first child
print(t[1]) # tree's second child
print(t.height())
print(t.leaves())
print(t[1])
print(t[1,1])
print(t[1,1,0])
# Demonstrate tree modification.
the_cat = t[0]
the_cat.insert(1, Tree.fromstring('(JJ big)'))
print("Tree modification:")
print(t)
t[1,1,1] = Tree.fromstring('(NN cake)')
print(t)
print()
# Tree transforms
print("Collapse unary:")
t.collapse_unary()
print(t)
print("Chomsky normal form:")
t.chomsky_normal_form()
print(t)
print()
# Demonstrate probabilistic trees.
pt = ProbabilisticTree('x', ['y', 'z'], prob=0.5)
print("Probabilistic Tree:")
print(pt)
print()
# Demonstrate parsing of treebank output format.
t = Tree.fromstring(t.pformat())
print("Convert tree to bracketed string and back again:")
print(t)
print()
# Demonstrate LaTeX output
print("LaTeX output:")
print(t.pformat_latex_qtree())
print()
# Demonstrate Productions
print("Production output:")
print(t.productions())
print()
# Demonstrate tree nodes containing objects other than strings
t.set_label(('test', 3))
print(t)
__all__ = ['ImmutableProbabilisticTree', 'ImmutableTree', 'ProbabilisticMixIn',
'ProbabilisticTree', 'Tree', 'bracket_parse',
'sinica_parse', 'ParentedTree', 'MultiParentedTree',
'ImmutableParentedTree', 'ImmutableMultiParentedTree']
| nelango/ViralityAnalysis | model/lib/nltk/tree.py | Python | mit | 64,375 |
from collections import defaultdict
import copy
import datetime
import json
from appengine_fixture_loader.loader import load_fixture
from google.appengine.ext import ndb
from helpers.event_details_manipulator import EventDetailsManipulator
from helpers.match_helper import MatchHelper
from helpers.match_manipulator import MatchManipulator
from models.event import Event
from models.event_details import EventDetails
from models.match import Match
class EventSimulator(object):
"""
Steps through an event in time. At step = 0, only the Event exists:
(step 0) Add all unplayed qual matches
(step 1, substep n) Add results of each of the n qual matches +
rankings being updated (if has_event_details)
(step 2) Add alliance selections (if has_event_details)
(step 3) Add unplayed QF matches
(step 4, substep n) Add results of each of the n QF matches +
update SF matches with advancing alliances (if not batch_advance) +
update alliance selection backups (if has_event_details)
(step 5) Add unplayed SF matches (if batch_advance)
(step 6, substep n) Add results of each of the n SF matches +
update F matches with advancing alliances (if not batch_advance) +
update alliance selection backups (if has_event_details)
(step 7) Add unplayed F matches (if batch_advance)
(step 8, substep n) Add results of each of the n F matches +
update alliance selection backups (if has_event_details)
"""
def __init__(self, has_event_details=True, batch_advance=False):
self._step = 0
self._substep = 0
# whether to update rankings and alliance selections
self._has_event_details = has_event_details
# whether to update next playoff level all at once, or as winners are determined
self._batch_advance = batch_advance
# Load and save complete data
load_fixture('test_data/fixtures/2016nytr_event_team_status.json',
kind={'EventDetails': EventDetails, 'Event': Event, 'Match': Match},
post_processor=self._event_key_adder)
event = Event.get_by_id('2016nytr')
# Add 3rd matches that never got played
unplayed_matches = [
Match(
id='2016nytr_qf1m3',
year=2016,
event=event.key,
comp_level='qf',
set_number=1,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc3990', 'frc359', 'frc4508'],
'score': -1,
},
'blue': {
'teams': ['frc3044', 'frc4930', 'frc4481'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 18, 34),
),
Match(
id='2016nytr_qf3m3',
year=2016,
event=event.key,
comp_level='qf',
set_number=3,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc20', 'frc5254', 'frc229'],
'score': -1,
},
'blue': {
'teams': ['frc3003', 'frc358', 'frc527'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 18, 48),
),
Match(
id='2016nytr_sf1m3',
year=2016,
event=event.key,
comp_level='sf',
set_number=1,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc3990', 'frc359', 'frc4508'],
'score': -1,
},
'blue': {
'teams': ['frc5240', 'frc3419', 'frc663'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 19, 42),
)
]
self._event_details = event.details
self._alliance_selections_without_backup = copy.deepcopy(event.details.alliance_selections)
self._alliance_selections_without_backup[1]['backup'] = None
self._played_matches = MatchHelper.organizeMatches(event.matches)
self._all_matches = MatchHelper.organizeMatches(event.matches + unplayed_matches)
# Delete data
event.details.key.delete()
ndb.delete_multi([match.key for match in event.matches])
ndb.get_context().clear_cache()
# Used to keep track of non-batch advancement
self._advancement_alliances = defaultdict(dict)
def _event_key_adder(self, obj):
obj.event = ndb.Key(Event, '2016nytr')
def _update_rankings(self):
"""
Generates and saves fake rankings
"""
event = Event.get_by_id('2016nytr')
team_wins = defaultdict(int)
team_losses = defaultdict(int)
team_ties = defaultdict(int)
teams = set()
for match in event.matches:
if match.comp_level == 'qm':
for alliance in ['red', 'blue']:
for team in match.alliances[alliance]['teams']:
teams.add(team)
if match.has_been_played:
if alliance == match.winning_alliance:
team_wins[team] += 1
elif match.winning_alliance == '':
team_ties[team] += 1
else:
team_losses[team] += 1
rankings = []
for team in sorted(teams):
wins = team_wins[team]
losses = team_losses[team]
ties = team_ties[team]
rankings.append({
'team_key': team,
'record': {
'wins': wins,
'losses': losses,
'ties': ties,
},
'matches_played': wins + losses + ties,
'dq': 0,
'sort_orders': [2 * wins + ties, 0, 0, 0, 0],
'qual_average': None,
})
rankings = sorted(rankings, key=lambda r: -r['sort_orders'][0])
for i, ranking in enumerate(rankings):
ranking['rank'] = i + 1
EventDetailsManipulator.createOrUpdate(EventDetails(
id='2016nytr',
rankings2=rankings,
))
def step(self):
event = Event.get_by_id('2016nytr')
if self._step == 0: # Qual match schedule added
for match in copy.deepcopy(self._all_matches['qm']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 1: # After each qual match
MatchManipulator.createOrUpdate(self._played_matches['qm'][self._substep])
if self._substep < len(self._played_matches['qm']) - 1:
self._substep += 1
else:
self._step += 1
self._substep = 0
EventDetailsManipulator.createOrUpdate(EventDetails(id='2016nytr'))
elif self._step == 2: # After alliance selections
EventDetailsManipulator.createOrUpdate(EventDetails(
id='2016nytr',
alliance_selections=self._alliance_selections_without_backup
))
self._step += 1
elif self._step == 3: # QF schedule added
for match in copy.deepcopy(self._all_matches['qf']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 4: # After each QF match
new_match = MatchHelper.play_order_sort_matches(self._played_matches['qf'])[self._substep]
MatchManipulator.createOrUpdate(new_match)
if not self._batch_advance:
win_counts = {
'red': 0,
'blue': 0,
}
for i in xrange(new_match.match_number):
win_counts[Match.get_by_id(
Match.renderKeyName(
new_match.event.id(),
new_match.comp_level,
new_match.set_number,
i+1)).winning_alliance] += 1
for alliance, wins in win_counts.items():
if wins == 2:
s = new_match.set_number
if s in {1, 2}:
self._advancement_alliances['sf1']['red' if s == 1 else 'blue'] = new_match.alliances[alliance]['teams']
elif s in {3, 4}:
self._advancement_alliances['sf2']['red' if s == 3 else 'blue'] = new_match.alliances[alliance]['teams']
else:
raise Exception("Invalid set number: {}".format(s))
for match_set, alliances in self._advancement_alliances.items():
if match_set.startswith('sf'):
for i in xrange(3):
for match in copy.deepcopy(self._all_matches['sf']):
key = '2016nytr_{}m{}'.format(match_set, i+1)
if match.key.id() == key:
for color in ['red', 'blue']:
match.alliances[color]['score'] = -1
match.alliances[color]['teams'] = alliances.get(color, [])
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
if self._substep < len(self._played_matches['qf']) - 1:
self._substep += 1
else:
self._step += 1 if self._batch_advance else 2
self._substep = 0
elif self._step == 5: # SF schedule added
if self._batch_advance:
for match in copy.deepcopy(self._all_matches['sf']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 6: # After each SF match
new_match = MatchHelper.play_order_sort_matches(self._played_matches['sf'])[self._substep]
MatchManipulator.createOrUpdate(new_match)
if not self._batch_advance:
win_counts = {
'red': 0,
'blue': 0,
}
for i in xrange(new_match.match_number):
win_counts[Match.get_by_id(
Match.renderKeyName(
new_match.event.id(),
new_match.comp_level,
new_match.set_number,
i+1)).winning_alliance] += 1
for alliance, wins in win_counts.items():
if wins == 2:
self._advancement_alliances['f1']['red' if new_match.set_number == 1 else 'blue'] = new_match.alliances[alliance]['teams']
for match_set, alliances in self._advancement_alliances.items():
if match_set.startswith('f'):
for i in xrange(3):
for match in copy.deepcopy(self._all_matches['f']):
key = '2016nytr_{}m{}'.format(match_set, i+1)
if match.key.id() == key:
for color in ['red', 'blue']:
match.alliances[color]['score'] = -1
match.alliances[color]['teams'] = alliances.get(color, [])
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
# Backup robot introduced
if self._substep == 3:
EventDetailsManipulator.createOrUpdate(EventDetails(
id='2016nytr',
alliance_selections=self._event_details.alliance_selections
))
if self._substep < len(self._played_matches['sf']) - 1:
self._substep += 1
else:
self._step += 1 if self._batch_advance else 2
self._substep = 0
elif self._step == 7: # F schedule added
if self._batch_advance:
for match in copy.deepcopy(self._all_matches['f']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 8: # After each F match
MatchManipulator.createOrUpdate(
MatchHelper.play_order_sort_matches(
self._played_matches['f'])[self._substep])
if self._substep < len(self._played_matches['f']) - 1:
self._substep += 1
else:
self._step += 1
self._substep = 0
ndb.get_context().clear_cache()
# Re fetch event matches
event = Event.get_by_id('2016nytr')
MatchHelper.deleteInvalidMatches(event.matches)
ndb.get_context().clear_cache()
self._update_rankings()
| nwalters512/the-blue-alliance | helpers/event_simulator.py | Python | mit | 15,162 |
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test
import os
from os.path import join, exists
TEST_262_HARNESS = ['sta.js']
class Test262TestCase(test.TestCase):
def __init__(self, filename, path, context, root, mode, framework):
super(Test262TestCase, self).__init__(context, path, mode)
self.filename = filename
self.framework = framework
self.root = root
def IsNegative(self):
return '@negative' in self.GetSource()
def GetLabel(self):
return "%s test262 %s" % (self.mode, self.GetName())
def IsFailureOutput(self, output):
if output.exit_code != 0:
return True
return 'FAILED!' in output.stdout
def GetCommand(self):
result = self.context.GetVmCommand(self, self.mode)
result += self.framework
result.append(self.filename)
return result
def GetName(self):
return self.path[-1]
def GetSource(self):
return open(self.filename).read()
class Test262TestConfiguration(test.TestConfiguration):
def __init__(self, context, root):
super(Test262TestConfiguration, self).__init__(context, root)
def ListTests(self, current_path, path, mode, variant_flags):
testroot = join(self.root, 'data', 'test', 'suite')
harness = [join(self.root, 'data', 'test', 'harness', f)
for f in TEST_262_HARNESS]
harness += [join(self.root, 'harness-adapt.js')]
tests = []
for root, dirs, files in os.walk(testroot):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
root_path = root[len(self.root):].split(os.path.sep)
root_path = current_path + [x for x in root_path if x]
files.sort()
for file in files:
if file.endswith('.js'):
test_path = ['test262', file[:-3]]
if self.Contains(path, test_path):
test = Test262TestCase(join(root, file), test_path, self.context,
self.root, mode, harness)
tests.append(test)
return tests
def GetBuildRequirements(self):
return ['d8']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, 'test262.status')
if exists(status_file):
test.ReadConfigurationInto(status_file, sections, defs)
def GetConfiguration(context, root):
return Test262TestConfiguration(context, root)
| sirchristian/v8DOM.NET | v8-read-only/test/test262/testcfg.py | Python | mit | 3,870 |
# -*- coding: utf-8 -*-
"""
IAMService
"""
import time
import xml.sax.saxutils as saxutils
# post xml soap message
import sys, httplib
from lxml import etree
from cStringIO import StringIO
#import static
import toml
class IAMClient(object):
def __init__(self):
conf_fn = "config.toml"
with open(conf_fn) as conf_fh:
self.conf = toml.loads(conf_fh.read())
print(self.conf)
def searchAll(self, startPage, pageSize ):
#config = static.ERP_CONFIG #'SL 8.0'
query = {"username":self.conf["Admin"],"password":self.conf["Admin_Password"], "nonce":self.conf["Nonce"], "startPage":startPage, "pageSize": pageSize}
SM_TEMPLATE = r"""<?xml version="1.0" encoding="UTF-8"?>
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:sear="http://search.service.iam.foton.com/">
<soapenv:Header>
<wsse:Security soapenv:mustUnderstand="1" xmlns:wsse="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd">
<wsse:UsernameToken wsu:Id="UsernameToken-1" xmlns:wsu="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<wsse:Username>%(username)s</wsse:Username>
<wsse:Password Type="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText">%(password)s</wsse:Password>
<wsse:Nonce EncodingType="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary">%(nonce)s</wsse:Nonce>
<wsu:Created>2012-07-06T01:49:02.953Z</wsu:Created>
</wsse:UsernameToken>
</wsse:Security>
</soapenv:Header>
<soapenv:Body>
<sear:searchAll>
<arg0>%(startPage)s</arg0>
<arg1>%(pageSize)s</arg1>
<!--Optional:-->
<arg2>ou</arg2>
<arg3>true</arg3>
</sear:searchAll>
</soapenv:Body>
</soapenv:Envelope>""" % query
SoapMessage = SM_TEMPLATE
#print SoapMessage
#construct and send the header
host =self.conf["HOST"]
print(host)
webservice = httplib.HTTP(host)
service = self.conf["Service2"]
url = "/IAMService/services/soap/%s" %(service)
webservice.putrequest("POST", url)
webservice.putheader("Host", host)
webservice.putheader("User-Agent", "Mozilla/4.0+(compatible;+MSIE+6.0;+Windows+NT+5.2;+SV1;+.NET+CLR+1.1.4322)")
webservice.putheader("Content-type", "text/xml; charset=\"UTF-8\"")
webservice.putheader("Accept-Language", "en-us")
webservice.putheader("Content-length", "%d" % len(SoapMessage))
#webservice.putheader("SOAPAction", "authenticate")
webservice.endheaders()
webservice.send(SoapMessage)
# get the response
statuscode, statusmessage, header = webservice.getreply()
print "Response: ", statuscode, statusmessage, startPage
#print "headers: ", header
#print dir(webservice)
res = webservice.getfile().read()
fn = "%d.xml" %(time.time())
#print res
#with open(fn, 'w') as fh:
# fh.write(res)
return res #self.parseSessionToken(res)
def getResponse(self, xmlstr):
string_file = StringIO(xmlstr.replace('soap:',''))
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
resp = None
for element in tree.xpath('/Envelope/Body'):
resp = element[0][1].text
return resp
def getResult(self, xmlstr):
resp = self.getResponse(xmlstr)
string_file = StringIO(resp)
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
result = None
v = tree.xpath('/Parameters')[0]
l = len(v)
result = v[l-1].text
if result.count('successful') >0:
return "S"
else:
return "F"
def get_element_text(element, node):
v = element.xpath(node)
if len(v)>0:
#print v[0].text.encode("utf8")
return v[0].text.encode("utf8")
else:
return ""
def main():
cm = IAMClient()
fh = open("id3.csv","w")
for i in range(1, 20):
xmlstr = cm.searchAll(i,10)
string_file = StringIO(xmlstr.replace('soap:','').replace("ns2:",""))
#root = etree.fromstring(xml)
tree = etree.parse(string_file)
resp = None
for element in tree.xpath('/Envelope/Body/searchAllResponse/return/userData'):
#resp = element[0][1].text
#print "\n"
v1 = get_element_text(element, "cn")
v2 = get_element_text(element, "mail")
v3 = get_element_text(element, "fotonAppAtt37")
v4 = get_element_text(element, "mobile")
v5 = get_element_text(element, "telephoneNumber")
v6 = get_element_text(element, "uid")
v7 = get_element_text(element, "ou")
#print userPassword[0].text,
x = "%s,%s,%s,%s,%s,%s,%s\n" % (v1, v2, v3, v4, v5, v6, v7)
fh.write(x)
time.sleep(0.5)
fh.close()
"""
token = cm.parseSessionToken(xmlstr)
rtn = cm.callMethod(token, "")
print cm.getResult(rtn)
"""
if __name__ == '__main__':
main()
| mabotech/mabo.ai | py/webservices/orgService.py | Python | mit | 5,747 |
# -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file 'InputChannelTemplate.ui'
#
# Created: Sun Feb 22 13:29:16 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(427, 220)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = GroupBox(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(5, 0, 0, 0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.recordCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordCheck.setFont(font)
self.recordCheck.setChecked(True)
self.recordCheck.setObjectName(_fromUtf8("recordCheck"))
self.gridLayout.addWidget(self.recordCheck, 0, 0, 1, 1)
self.displayCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.displayCheck.setFont(font)
self.displayCheck.setChecked(True)
self.displayCheck.setObjectName(_fromUtf8("displayCheck"))
self.gridLayout.addWidget(self.displayCheck, 0, 1, 1, 1)
self.recordInitCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.recordInitCheck.setFont(font)
self.recordInitCheck.setObjectName(_fromUtf8("recordInitCheck"))
self.gridLayout.addWidget(self.recordInitCheck, 1, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 2, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "GroupBox", None))
self.recordCheck.setText(_translate("Form", "Record Trace", None))
self.displayCheck.setText(_translate("Form", "Display", None))
self.recordInitCheck.setText(_translate("Form", "Record Initial State", None))
from acq4.pyqtgraph import GroupBox
| meganbkratz/acq4 | acq4/devices/DAQGeneric/InputChannelTemplate.py | Python | mit | 3,321 |
# Natural Language Toolkit: Confusion Matrices
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
from nltk.probability import FreqDist
from nltk.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class ConfusionMatrix(object):
"""
The confusion matrix between a list of reference values and a
corresponding list of test values. Entry *[r,t]* of this
matrix is a count of the number of times that the reference value
*r* corresponds to the test value *t*. E.g.:
>>> from nltk.metrics import ConfusionMatrix
>>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split()
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
>>> cm = ConfusionMatrix(ref, test)
>>> print(cm['NN', 'NN'])
3
Note that the diagonal entries *Ri=Tj* of this matrix
corresponds to correct values; and the off-diagonal entries
correspond to incorrect values.
"""
def __init__(self, reference, test, sort_by_count=False):
"""
Construct a new confusion matrix from a list of reference
values and a corresponding list of test values.
:type reference: list
:param reference: An ordered list of reference values.
:type test: list
:param test: A list of values to compare against the
corresponding reference values.
:raise ValueError: If ``reference`` and ``length`` do not have
the same length.
"""
if len(reference) != len(test):
raise ValueError('Lists must have the same length.')
# Get a list of all values.
if sort_by_count:
ref_fdist = FreqDist(reference)
test_fdist = FreqDist(test)
def key(v): return -(ref_fdist[v]+test_fdist[v])
values = sorted(set(reference+test), key=key)
else:
values = sorted(set(reference+test))
# Construct a value->index dictionary
indices = dict((val,i) for (i,val) in enumerate(values))
# Make a confusion matrix table.
confusion = [[0 for val in values] for val in values]
max_conf = 0 # Maximum confusion
for w,g in zip(reference, test):
confusion[indices[w]][indices[g]] += 1
max_conf = max(max_conf, confusion[indices[w]][indices[g]])
#: A list of all values in ``reference`` or ``test``.
self._values = values
#: A dictionary mapping values in ``self._values`` to their indices.
self._indices = indices
#: The confusion matrix itself (as a list of lists of counts).
self._confusion = confusion
#: The greatest count in ``self._confusion`` (used for printing).
self._max_conf = max_conf
#: The total number of values in the confusion matrix.
self._total = len(reference)
#: The number of correct (on-diagonal) values in the matrix.
self._correct = sum(confusion[i][i] for i in range(len(values)))
def __getitem__(self, li_lj_tuple):
"""
:return: The number of times that value ``li`` was expected and
value ``lj`` was given.
:rtype: int
"""
(li, lj) = li_lj_tuple
i = self._indices[li]
j = self._indices[lj]
return self._confusion[i][j]
def __repr__(self):
return '<ConfusionMatrix: %s/%s correct>' % (self._correct,
self._total)
def __str__(self):
return self.pretty_format()
def pretty_format(self, show_percents=False, values_in_chart=True,
truncate=None, sort_by_count=False):
"""
:return: A multi-line string representation of this confusion matrix.
:type truncate: int
:param truncate: If specified, then only show the specified
number of values. Any sorting (e.g., sort_by_count)
will be performed before truncation.
:param sort_by_count: If true, then sort by the count of each
label in the reference data. I.e., labels that occur more
frequently in the reference label will be towards the left
edge of the matrix, and labels that occur less frequently
will be towards the right edge.
@todo: add marginals?
"""
confusion = self._confusion
values = self._values
if sort_by_count:
values = sorted(values, key=lambda v:
-sum(self._confusion[self._indices[v]]))
if truncate:
values = values[:truncate]
if values_in_chart:
value_strings = ["%s" % val for val in values]
else:
value_strings = [str(n+1) for n in range(len(values))]
# Construct a format string for row values
valuelen = max(len(val) for val in value_strings)
value_format = '%' + repr(valuelen) + 's | '
# Construct a format string for matrix entries
if show_percents:
entrylen = 6
entry_format = '%5.1f%%'
zerostr = ' .'
else:
entrylen = len(repr(self._max_conf))
entry_format = '%' + repr(entrylen) + 'd'
zerostr = ' '*(entrylen-1) + '.'
# Write the column values.
s = ''
for i in range(valuelen):
s += (' '*valuelen)+' |'
for val in value_strings:
if i >= valuelen-len(val):
s += val[i-valuelen+len(val)].rjust(entrylen+1)
else:
s += ' '*(entrylen+1)
s += ' |\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write the entries.
for val, li in zip(value_strings, values):
i = self._indices[li]
s += value_format % val
for lj in values:
j = self._indices[lj]
if confusion[i][j] == 0:
s += zerostr
elif show_percents:
s += entry_format % (100.0*confusion[i][j]/self._total)
else:
s += entry_format % confusion[i][j]
if i == j:
prevspace = s.rfind(' ')
s = s[:prevspace] + '<' + s[prevspace+1:] + '>'
else: s += ' '
s += '|\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write a key
s += '(row = reference; col = test)\n'
if not values_in_chart:
s += 'Value key:\n'
for i, value in enumerate(values):
s += '%6d: %s\n' % (i+1, value)
return s
def key(self):
values = self._values
str = 'Value key:\n'
indexlen = len(repr(len(values)-1))
key_format = ' %'+repr(indexlen)+'d: %s\n'
for i in range(len(values)):
str += key_format % (i, values[i])
return str
def demo():
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print('Reference =', reference)
print('Test =', test)
print('Confusion matrix:')
print(ConfusionMatrix(reference, test))
print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True))
if __name__ == '__main__':
demo()
| MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/nltk/metrics/confusionmatrix.py | Python | mit | 7,825 |
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2012, Jonathan P Dawson"
__version__ = "0.1"
class Allocator:
"""Maintain a pool of registers, variables and arrays. Keep track of what they are used for."""
def __init__(self, reuse):
self.registers = []
self.all_registers = {}
self.memory_size_2 = 0
self.memory_size_4 = 0
self.reuse = reuse
self.memory_content_2 = {}
self.memory_content_4 = {}
def new_array(self, size, contents, element_size):
if element_size == 2:
reg = self.memory_size_2
self.memory_size_2 += int(size)
if contents is not None:
for location, value in enumerate(contents, reg):
self.memory_content_2[location] = value
return reg
elif element_size == 4:
reg = self.memory_size_4
self.memory_size_4 += int(size)
if contents is not None:
for location, value in enumerate(contents, reg):
self.memory_content_4[location] = value
return reg
def regsize(self, reg):
return self.all_registers[reg][1]
def new(self, size, name="temporary_register"):
assert type(size) == int
reg = 0
while reg in self.registers or (reg in self.all_registers and self.regsize(reg) != size):
reg += 1
self.registers.append(reg)
self.all_registers[reg] = (name, size)
return reg
def free(self, register):
if register in self.registers and self.reuse:
self.registers.remove(register)
| amerc/phimii | chips2/chips/compiler/allocator.py | Python | mit | 1,644 |
from tkinter.scrolledtext import ScrolledText
import tkinter as tk
from trace_json import traceparse
from parsley_json import jsonGrammar
jsonData = open('337141-steamcube.json').read()
class Tracer(object):
def __init__(self, grammarWin, inputWin, logWin, trace):
self.grammarWin = grammarWin
self.inputWin = inputWin
self.logWin = logWin
self.trace = trace
self.position = 0
def advance(self):
if self.position < len(self.trace):
self.position += 1
self.display()
def rewind(self):
if self.position > 0:
self.position -= 1
self.display()
def display(self):
def updateHighlight(w, start, end=None):
w.tag_remove("highlight", "1.0", tk.END)
start = "1.0+%sc" % (start,)
if end is not None:
end = "1.0+%sc" % (end,)
w.tag_add("highlight", start, end)
w.tag_configure("highlight", background="yellow")
_, (grammarStart, grammarEnd), inputPos = self.trace[self.position]
updateHighlight(self.grammarWin, grammarStart, grammarEnd)
updateHighlight(self.inputWin, inputPos)
def display(grammar, src, trace):
r = tk.Tk()
f = tk.Frame(master=r)
lt = ScrolledText(master=f)
rt = ScrolledText(master=f)
lt.pack(side="left", expand=True, fill="both")
rt.pack(side="right", expand=True, fill="both")
bot = ScrolledText(master=r, height=5)
tracer = Tracer(lt, rt, bot, trace)
toolbar = tk.Frame(master=r)
tk.Button(toolbar, text="Next", width=5, command=tracer.advance).pack(
side="left")
tk.Button(toolbar, text="Prev", width=5, command=tracer.rewind).pack(
side="left")
f.pack(expand=1, fill="both")
toolbar.pack(fill=tk.X)
bot.pack(fill=tk.X)
lt.insert(tk.END, grammar)
rt.insert(tk.END, src)
tracer.display()
return r
_, trace = traceparse(jsonData)
root = display(jsonGrammar, jsonData, trace)
root.mainloop()
| JaDogg/__py_playground | reference/parsley/examples/trace_visualiser.py | Python | mit | 2,033 |
#!/usr/bin/env python2.7
#The MIT License (MIT)
#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sys import argv,exit
from os import getuid
from PyQt4.QtGui import QApplication,QIcon
from Core.Privilege import frm_privelege
from Core.Main import Initialize
from Core.check import check_dependencies
from Modules.utils import Refactor
def ExecRootApp():
check_dependencies()
root = QApplication(argv)
app = Initialize()
app.setWindowIcon(QIcon('rsc/icon.ico'))
app.center(),app.show()
exit(root.exec_())
if __name__ == '__main__':
if not getuid() == 0:
app2 = QApplication(argv)
priv = frm_privelege()
priv.setWindowIcon(QIcon('rsc/icon.ico'))
priv.show(),app2.exec_()
exit(Refactor.threadRoot(priv.Editpassword.text()))
ExecRootApp() | samyoyo/3vilTwinAttacker | 3vilTwin-Attacker.py | Python | mit | 1,843 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to interface with the Crystallography Open
Database. If you use data from the COD, please cite the following works (as
stipulated by the COD developers)::
Merkys, A., Vaitkus, A., Butkus, J., Okulič-Kazarinas, M., Kairys, V. &
Gražulis, S. (2016) "COD::CIF::Parser: an error-correcting CIF parser for
the Perl language". Journal of Applied Crystallography 49.
Gražulis, S., Merkys, A., Vaitkus, A. & Okulič-Kazarinas, M. (2015)
"Computing stoichiometric molecular composition from crystal structures".
Journal of Applied Crystallography 48, 85-91.
Gražulis, S., Daškevič, A., Merkys, A., Chateigner, D., Lutterotti, L.,
Quirós, M., Serebryanaya, N. R., Moeck, P., Downs, R. T. & LeBail, A.
(2012) "Crystallography Open Database (COD): an open-access collection of
crystal structures and platform for world-wide collaboration". Nucleic
Acids Research 40, D420-D427.
Grazulis, S., Chateigner, D., Downs, R. T., Yokochi, A. T., Quiros, M.,
Lutterotti, L., Manakova, E., Butkus, J., Moeck, P. & Le Bail, A. (2009)
"Crystallography Open Database – an open-access collection of crystal
structures". J. Appl. Cryst. 42, 726-729.
Downs, R. T. & Hall-Wallace, M. (2003) "The American Mineralogist Crystal
Structure Database". American Mineralogist 88, 247-250.
"""
import requests
import subprocess
from monty.dev import requires
from monty.os.path import which
import re
from pymatgen.core.composition import Composition
from pymatgen.core.structure import Structure
from pymatgen.util.string import formula_double_format
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
class COD(object):
"""
An interface to the Crystallography Open Database.
"""
def __init__(self):
pass
def query(self, sql):
r = subprocess.check_output(["mysql", "-u", "cod_reader", "-h",
"www.crystallography.net", "-e",
sql, "cod"])
return r.decode("utf-8")
@requires(which("mysql"), "mysql must be installed to use this query.")
def get_cod_ids(self, formula):
"""
Queries the COD for all cod ids associated with a formula. Requires
mysql executable to be in the path.
Args:
formula (str): Formula.
Returns:
List of cod ids.
"""
# TODO: Remove dependency on external mysql call. MySQL-python package does not support Py3!
# Standardize formula to the version used by COD.
sql = 'select file from data where formula="- %s -"' % \
Composition(formula).hill_formula
text = self.query(sql).split("\n")
cod_ids = []
for l in text:
m = re.search(r"(\d+)", l)
if m:
cod_ids.append(int(m.group(1)))
return cod_ids
def get_structure_by_id(self, cod_id, **kwargs):
"""
Queries the COD for a structure by id.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A Structure.
"""
r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id)
return Structure.from_str(r.text, fmt="cif", **kwargs)
@requires(which("mysql"), "mysql must be installed to use this query.")
def get_structure_by_formula(self, formula, **kwargs):
"""
Queries the COD for structures by formula. Requires mysql executable to
be in the path.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A list of dict of the format
[{"structure": Structure, "cod_id": cod_id, "sg": "P n m a"}]
"""
structures = []
sql = 'select file, sg from data where formula="- %s -"' % \
Composition(formula).hill_formula
text = self.query(sql).split("\n")
text.pop(0)
for l in text:
if l.strip():
cod_id, sg = l.split("\t")
r = requests.get("http://www.crystallography.net/cod/%s.cif"
% cod_id.strip())
try:
s = Structure.from_str(r.text, fmt="cif", **kwargs)
structures.append({"structure": s, "cod_id": int(cod_id),
"sg": sg})
except Exception:
import warnings
warnings.warn("\nStructure.from_str failed while parsing CIF file:\n%s" % r.text)
raise
return structures
| gpetretto/pymatgen | pymatgen/ext/cod.py | Python | mit | 5,037 |
"""foo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import include
from django.urls import path
urlpatterns = [
path("polls/", include("polls.urls")),
path("admin/", admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path("__debug__/", include(debug_toolbar.urls)),
] + urlpatterns
| rochacbruno/dynaconf | example/issues/449_django_lazy_path/foo/urls.py | Python | mit | 996 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Create a new document with defaults set
"""
import webnotes
from webnotes.utils import nowdate, nowtime, cint, flt
import webnotes.defaults
def get_new_doc(doctype, parent_doc = None, parentfield = None):
doc = webnotes.doc({
"doctype": doctype,
"__islocal": 1,
"owner": webnotes.session.user,
"docstatus": 0
})
meta = webnotes.get_doctype(doctype)
if parent_doc:
doc.parent = parent_doc.name
doc.parenttype = parent_doc.doctype
if parentfield:
doc.parentfield = parentfield
for d in meta.get({"doctype":"DocField", "parent": doctype}):
default = webnotes.defaults.get_user_default(d.fieldname)
if default:
doc.fields[d.fieldname] = default
elif d.fields.get("default"):
if d.default == "__user":
doc.fields[d.fieldname] = webnotes.session.user
elif d.default == "Today":
doc.fields[d.fieldname] = nowdate()
elif d.default.startswith(":"):
ref_fieldname = d.default[1:].lower().replace(" ", "_")
if parent_doc:
ref_docname = parent_doc.fields[ref_fieldname]
else:
ref_docname = webnotes.conn.get_default(ref_fieldname)
doc.fields[d.fieldname] = webnotes.conn.get_value(d.default[1:],
ref_docname, d.fieldname)
else:
doc.fields[d.fieldname] = d.default
# convert type of default
if d.fieldtype in ("Int", "Check"):
doc.fields[d.fieldname] = cint(doc.fields[d.fieldname])
elif d.fieldtype in ("Float", "Currency"):
doc.fields[d.fieldname] = flt(doc.fields[d.fieldname])
elif d.fieldtype == "Time":
doc.fields[d.fieldname] = nowtime()
return doc
| rohitw1991/latestadbwnf | webnotes/model/create_new.py | Python | mit | 1,707 |
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class vortex_sensor_t(object):
__slots__ = ["sensor1", "sensor2", "velocity"]
def __init__(self):
self.sensor1 = 0.0
self.sensor2 = 0.0
self.velocity = 0.0
def encode(self):
buf = BytesIO()
buf.write(vortex_sensor_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">ddd", self.sensor1, self.sensor2, self.velocity))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != vortex_sensor_t._get_packed_fingerprint():
raise ValueError("Decode error")
return vortex_sensor_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = vortex_sensor_t()
self.sensor1, self.sensor2, self.velocity = struct.unpack(">ddd", buf.read(24))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if vortex_sensor_t in parents: return 0
tmphash = (0x3525d46ae32101c3) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if vortex_sensor_t._packed_fingerprint is None:
vortex_sensor_t._packed_fingerprint = struct.pack(">Q", vortex_sensor_t._get_hash_recursive([]))
return vortex_sensor_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| blandry/crazyflie-tools | lcm/crazyflie_t/vortex_sensor_t.py | Python | mit | 1,903 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import warnings
import unittest
import os
from pymatgen.alchemy.transmuters import CifTransmuter, PoscarTransmuter
from pymatgen.alchemy.filters import ContainsSpecieFilter
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, RemoveSpeciesTransformation, \
OrderDisorderedStructureTransformation
from pymatgen.transformations.advanced_transformations import \
SuperTransformation
'''
Created on Mar 5, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 5, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class CifTransmuterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
trans = []
trans.append(SubstitutionTransformation({"Fe": "Mn", "Fe2+": "Mn2+"}))
tsc = CifTransmuter.from_filenames([os.path.join(test_dir,
"MultiStructure.cif")],
trans)
self.assertEqual(len(tsc), 2)
expected_ans = set(["Mn", "O", "Li", "P"])
for s in tsc:
els = set([el.symbol
for el in s.final_structure.composition.elements])
self.assertEqual(expected_ans, els)
class PoscarTransmuterTest(unittest.TestCase):
def test_init(self):
trans = []
trans.append(SubstitutionTransformation({"Fe": "Mn"}))
tsc = PoscarTransmuter.from_filenames([os.path.join(test_dir,
"POSCAR"),
os.path.join(test_dir,
"POSCAR")],
trans)
self.assertEqual(len(tsc), 2)
expected_ans = set(["Mn", "O", "P"])
for s in tsc:
els = set([el.symbol
for el in s.final_structure.composition.elements])
self.assertEqual(expected_ans, els)
def test_transmuter(self):
tsc = PoscarTransmuter.from_filenames(
[os.path.join(test_dir, "POSCAR")])
tsc.append_transformation(RemoveSpeciesTransformation('O'))
self.assertEqual(len(tsc[0].final_structure), 8)
tsc.append_transformation(SubstitutionTransformation({"Fe":
{"Fe2+": 0.25,
"Mn3+": .75},
"P": "P5+"}))
tsc.append_transformation(OrderDisorderedStructureTransformation(),
extend_collection=50)
self.assertEqual(len(tsc), 4)
t = SuperTransformation([SubstitutionTransformation({"Fe2+": "Mg2+"}),
SubstitutionTransformation({"Fe2+": "Zn2+"}),
SubstitutionTransformation({"Fe2+": "Be2+"})])
tsc.append_transformation(t, extend_collection=True)
self.assertEqual(len(tsc), 12)
for x in tsc:
self.assertEqual(len(x), 5, 'something might be wrong with the number of transformations in the history') #should be 4 trans + starting structure
#test the filter
tsc.apply_filter(ContainsSpecieFilter(['Zn2+', 'Be2+', 'Mn4+'],
strict_compare=True, AND=False))
self.assertEqual(len(tsc), 8)
self.assertEqual(tsc.transformed_structures[0].as_dict()[
'history'][-1]['@class'], 'ContainsSpecieFilter')
tsc.apply_filter(ContainsSpecieFilter(['Be2+']))
self.assertEqual(len(tsc), 4)
#Test set_parameter and add_tag.
tsc.set_parameter("para1", "hello")
self.assertEqual(tsc.transformed_structures[0]
.as_dict()['other_parameters']['para1'], 'hello')
tsc.add_tags(["world", "universe"])
self.assertEqual(tsc.transformed_structures[0]
.as_dict()['other_parameters']['tags'],
["world", "universe"])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| dongsenfo/pymatgen | pymatgen/alchemy/tests/test_transmuters.py | Python | mit | 4,616 |
import unittest
import numpy as np
from chainermn.datasets import create_empty_dataset
import chainerx as chx
class TestEmptyDataset(unittest.TestCase):
def setUp(self):
pass
def check_create_empty_dataset(self, original_dataset):
empty_dataset = create_empty_dataset(original_dataset)
self.assertEqual(len(original_dataset), len(empty_dataset))
for i in range(len(original_dataset)):
self.assertEqual((), empty_dataset[i])
def test_empty_dataset_numpy(self):
self.check_empty_dataset(np)
def test_empty_dataset_chx(self):
self.check_empty_dataset(chx)
def check_empty_dataset(self, xp):
n = 10
self.check_create_empty_dataset([])
self.check_create_empty_dataset([0])
self.check_create_empty_dataset(list(range(n)))
self.check_create_empty_dataset(list(range(n * 5 - 1)))
self.check_create_empty_dataset(xp.array([]))
self.check_create_empty_dataset(xp.array([0]))
self.check_create_empty_dataset(xp.arange(n))
self.check_create_empty_dataset(xp.arange(n * 5 - 1))
| hvy/chainer | tests/chainermn_tests/datasets_tests/test_empty_dataset.py | Python | mit | 1,128 |
# -*- coding: utf-8 -*-
#
# Natural Language Toolkit: Snowball Stemmer
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Peter Michael Stahl <[email protected]>
# Peter Ljunglof <[email protected]> (revisions)
# Algorithms: Dr Martin Porter <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Snowball stemmers
This module provides a port of the Snowball stemmers
developed by Martin Porter.
There is also a demo function: `snowball.demo()`.
"""
from __future__ import unicode_literals, print_function
from nltk import compat
from nltk.corpus import stopwords
from nltk.stem import porter
from nltk.stem.util import suffix_replace
from nltk.stem.api import StemmerI
class SnowballStemmer(StemmerI):
"""
Snowball Stemmer
The following languages are supported:
Danish, Dutch, English, Finnish, French, German,
Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian,
Spanish and Swedish.
The algorithm for English is documented here:
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
The algorithms have been developed by Martin Porter.
These stemmers are called Snowball, because Porter created
a programming language with this name for creating
new stemming algorithms. There is more information available
at http://snowball.tartarus.org/
The stemmer is invoked as shown below:
>>> from nltk.stem import SnowballStemmer
>>> print(" ".join(SnowballStemmer.languages)) # See which languages are supported
danish dutch english finnish french german hungarian
italian norwegian porter portuguese romanian russian
spanish swedish
>>> stemmer = SnowballStemmer("german") # Choose a language
>>> stemmer.stem("Autobahnen") # Stem a word
'autobahn'
Invoking the stemmers that way is useful if you do not know the
language to be stemmed at runtime. Alternatively, if you already know
the language, then you can invoke the language specific stemmer directly:
>>> from nltk.stem.snowball import GermanStemmer
>>> stemmer = GermanStemmer()
>>> stemmer.stem("Autobahnen")
'autobahn'
:param language: The language whose subclass is instantiated.
:type language: str or unicode
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
:raise ValueError: If there is no stemmer for the specified
language, a ValueError is raised.
"""
languages = ("danish", "dutch", "english", "finnish", "french", "german",
"hungarian", "italian", "norwegian", "porter", "portuguese",
"romanian", "russian", "spanish", "swedish")
def __init__(self, language, ignore_stopwords=False):
if language not in self.languages:
raise ValueError("The language '%s' is not supported." % language)
stemmerclass = globals()[language.capitalize() + "Stemmer"]
self.stemmer = stemmerclass(ignore_stopwords)
self.stem = self.stemmer.stem
self.stopwords = self.stemmer.stopwords
@compat.python_2_unicode_compatible
class _LanguageSpecificStemmer(StemmerI):
"""
This helper subclass offers the possibility
to invoke a specific stemmer directly.
This is useful if you already know the language to be stemmed at runtime.
Create an instance of the Snowball stemmer.
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
"""
def __init__(self, ignore_stopwords=False):
# The language is the name of the class, minus the final "Stemmer".
language = type(self).__name__.lower()
if language.endswith("stemmer"):
language = language[:-7]
self.stopwords = set()
if ignore_stopwords:
try:
for word in stopwords.words(language):
self.stopwords.add(word)
except IOError:
raise ValueError("%r has no list of stopwords. Please set"
" 'ignore_stopwords' to 'False'." % self)
def __repr__(self):
"""
Print out the string representation of the respective class.
"""
return "<%s>" % type(self).__name__
class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer):
"""
A word stemmer based on the original Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of the module
nltk.stem.porter for more information.
"""
def __init__(self, ignore_stopwords=False):
_LanguageSpecificStemmer.__init__(self, ignore_stopwords)
porter.PorterStemmer.__init__(self)
class _ScandinavianStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) >= 3:
r1 = word[i+1:]
else:
return word
break
return r1
class _StandardStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
elif word[0] in vowels and word[1] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i+1:]
break
else:
rv = word[3:]
return rv
class DanishStemmer(_ScandinavianStemmer):
"""
The Danish Snowball stemmer.
:cvar __vowels: The Danish vowels.
:type __vowels: unicode
:cvar __consonants: The Danish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Danish double consonants.
:type __double_consonants: tuple
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Danish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/danish/stemmer.html
"""
# The language's vowels and other important characters are defined.
__vowels = "aeiouy\xE6\xE5\xF8"
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__s_ending = "abcdfghjklmnoprtvyz\xE5"
# The different suffixes, divided into the algorithm's steps
# and organized by length, are listed in tuples.
__step1_suffixes = ("erendes", "erende", "hedens", "ethed",
"erede", "heden", "heder", "endes",
"ernes", "erens", "erets", "ered",
"ende", "erne", "eren", "erer", "heds",
"enes", "eres", "eret", "hed", "ene", "ere",
"ens", "ers", "ets", "en", "er", "es", "et",
"e", "s")
__step2_suffixes = ("gd", "dt", "gt", "kt")
__step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig")
def stem(self, word):
"""
Stem a Danish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
# Every word is put into lower case for normalization.
word = word.lower()
if word in self.stopwords:
return word
# After this, the required regions are generated
# by the respective helper method.
r1 = self._r1_scandinavian(word, self.__vowels)
# Then the actual stemming process starts.
# Every new step is explicitly indicated
# according to the descriptions on the Snowball website.
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
if r1.endswith("igst"):
word = word[:-2]
r1 = r1[:-2]
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "l\xF8st":
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(self.__step2_suffixes):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 4: Undouble
for double_cons in self.__double_consonants:
if word.endswith(double_cons) and len(word) > 3:
word = word[:-1]
break
return word
class DutchStemmer(_StandardStemmer):
"""
The Dutch Snowball stemmer.
:cvar __vowels: The Dutch vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm.
:type __step3b_suffixes: tuple
:note: A detailed description of the Dutch
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/dutch/stemmer.html
"""
__vowels = "aeiouy\xE8"
__step1_suffixes = ("heden", "ene", "en", "se", "s")
__step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig")
def stem(self, word):
"""
Stem a Dutch word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step2_success = False
# Vowel accents are removed.
word = (word.replace("\xE4", "a").replace("\xE1", "a")
.replace("\xEB", "e").replace("\xE9", "e")
.replace("\xED", "i").replace("\xEF", "i")
.replace("\xF6", "o").replace("\xF3", "o")
.replace("\xFC", "u").replace("\xFA", "u"))
# An initial 'y', a 'y' after a vowel,
# and an 'i' between self.__vowels is put into upper case.
# As from now these are treated as consonants.
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
for i in range(1, len(word)-1):
if (word[i-1] in self.__vowels and word[i] == "i" and
word[i+1] in self.__vowels):
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "heden":
word = suffix_replace(word, suffix, "heid")
r1 = suffix_replace(r1, suffix, "heid")
if r2.endswith("heden"):
r2 = suffix_replace(r2, suffix, "heid")
elif (suffix in ("ene", "en") and
not word.endswith("heden") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-3:-len(suffix)] != "gem"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif (suffix in ("se", "s") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-1] != "j"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
if r1.endswith("e") and word[-2] not in self.__vowels:
step2_success = True
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3a
if r2.endswith("heid") and word[-5] != "c":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
if (r1.endswith("en") and word[-3] not in self.__vowels and
word[-5:-2] != "gem"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3b: Derivational suffixes
for suffix in self.__step3b_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ing"):
word = word[:-3]
r2 = r2[:-3]
if r2.endswith("ig") and word[-3] != "e":
word = word[:-2]
else:
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "ig" and word[-3] != "e":
word = word[:-2]
elif suffix == "lijk":
word = word[:-4]
r1 = r1[:-4]
if r1.endswith("e") and word[-2] not in self.__vowels:
word = word[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "baar":
word = word[:-4]
elif suffix == "bar" and step2_success:
word = word[:-3]
break
# STEP 4: Undouble vowel
if len(word) >= 4:
if word[-1] not in self.__vowels and word[-1] != "I":
if word[-3:-1] in ("aa", "ee", "oo", "uu"):
if word[-4] not in self.__vowels:
word = "".join((word[:-3], word[-3], word[-1]))
# All occurrences of 'I' and 'Y' are put back into lower case.
word = word.replace("I", "i").replace("Y", "y")
return word
class EnglishStemmer(_StandardStemmer):
"""
The English Snowball stemmer.
:cvar __vowels: The English vowels.
:type __vowels: unicode
:cvar __double_consonants: The English double consonants.
:type __double_consonants: tuple
:cvar __li_ending: Letters that may directly appear before a word final 'li'.
:type __li_ending: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm.
:type __step1a_suffixes: tuple
:cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm.
:type __step1b_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __special_words: A dictionary containing words
which have to be stemmed specially.
:type __special_words: dict
:note: A detailed description of the English
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/english/stemmer.html
"""
__vowels = "aeiouy"
__double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn",
"pp", "rr", "tt")
__li_ending = "cdeghkmnrt"
__step0_suffixes = ("'s'", "'s", "'")
__step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s")
__step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed")
__step2_suffixes = ('ization', 'ational', 'fulness', 'ousness',
'iveness', 'tional', 'biliti', 'lessli',
'entli', 'ation', 'alism', 'aliti', 'ousli',
'iviti', 'fulli', 'enci', 'anci', 'abli',
'izer', 'ator', 'alli', 'bli', 'ogi', 'li')
__step3_suffixes = ('ational', 'tional', 'alize', 'icate', 'iciti',
'ative', 'ical', 'ness', 'ful')
__step4_suffixes = ('ement', 'ance', 'ence', 'able', 'ible', 'ment',
'ant', 'ent', 'ism', 'ate', 'iti', 'ous',
'ive', 'ize', 'ion', 'al', 'er', 'ic')
__step5_suffixes = ("e", "l")
__special_words = {"skis" : "ski",
"skies" : "sky",
"dying" : "die",
"lying" : "lie",
"tying" : "tie",
"idly" : "idl",
"gently" : "gentl",
"ugly" : "ugli",
"early" : "earli",
"only" : "onli",
"singly" : "singl",
"sky" : "sky",
"news" : "news",
"howe" : "howe",
"atlas" : "atlas",
"cosmos" : "cosmos",
"bias" : "bias",
"andes" : "andes",
"inning" : "inning",
"innings" : "inning",
"outing" : "outing",
"outings" : "outing",
"canning" : "canning",
"cannings" : "canning",
"herring" : "herring",
"herrings" : "herring",
"earring" : "earring",
"earrings" : "earring",
"proceed" : "proceed",
"proceeds" : "proceed",
"proceeded" : "proceed",
"proceeding" : "proceed",
"exceed" : "exceed",
"exceeds" : "exceed",
"exceeded" : "exceed",
"exceeding" : "exceed",
"succeed" : "succeed",
"succeeds" : "succeed",
"succeeded" : "succeed",
"succeeding" : "succeed"}
def stem(self, word):
"""
Stem an English word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords or len(word) <= 2:
return word
elif word in self.__special_words:
return self.__special_words[word]
# Map the different apostrophe characters to a single consistent one
word = (word.replace("\u2019", "\x27")
.replace("\u2018", "\x27")
.replace("\u201B", "\x27"))
if word.startswith("\x27"):
word = word[1:]
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
step1a_vowel_found = False
step1b_vowel_found = False
r1 = ""
r2 = ""
if word.startswith(("gener", "commun", "arsen")):
if word.startswith(("gener", "arsen")):
r1 = word[5:]
else:
r1 = word[6:]
for i in range(1, len(r1)):
if r1[i] not in self.__vowels and r1[i-1] in self.__vowels:
r2 = r1[i+1:]
break
else:
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 0
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 1a
for suffix in self.__step1a_suffixes:
if word.endswith(suffix):
if suffix == "sses":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("ied", "ies"):
if len(word[:-len(suffix)]) > 1:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix == "s":
for letter in word[:-2]:
if letter in self.__vowels:
step1a_vowel_found = True
break
if step1a_vowel_found:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
break
# STEP 1b
for suffix in self.__step1b_suffixes:
if word.endswith(suffix):
if suffix in ("eed", "eedly"):
if r1.endswith(suffix):
word = suffix_replace(word, suffix, "ee")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ee")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ee")
else:
r2 = ""
else:
for letter in word[:-len(suffix)]:
if letter in self.__vowels:
step1b_vowel_found = True
break
if step1b_vowel_found:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("at", "bl", "iz")):
word = "".join((word, "e"))
r1 = "".join((r1, "e"))
if len(word) > 5 or len(r1) >=3:
r2 = "".join((r2, "e"))
elif word.endswith(self.__double_consonants):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif ((r1 == "" and len(word) >= 3 and
word[-1] not in self.__vowels and
word[-1] not in "wxY" and
word[-2] in self.__vowels and
word[-3] not in self.__vowels)
or
(r1 == "" and len(word) == 2 and
word[0] in self.__vowels and
word[1] not in self.__vowels)):
word = "".join((word, "e"))
if len(r1) > 0:
r1 = "".join((r1, "e"))
if len(r2) > 0:
r2 = "".join((r2, "e"))
break
# STEP 1c
if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels:
word = "".join((word[:-1], "i"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "i"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "i"))
else:
r2 = ""
# STEP 2
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("enci", "anci", "abli"):
word = "".join((word[:-1], "e"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "e"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "e"))
else:
r2 = ""
elif suffix == "entli":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("izer", "ization"):
word = suffix_replace(word, suffix, "ize")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ize")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ize")
else:
r2 = ""
elif suffix in ("ational", "ation", "ator"):
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = "e"
elif suffix in ("alism", "aliti", "alli"):
word = suffix_replace(word, suffix, "al")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "al")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "al")
else:
r2 = ""
elif suffix == "fulness":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
elif suffix in ("ousli", "ousness"):
word = suffix_replace(word, suffix, "ous")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ous")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ous")
else:
r2 = ""
elif suffix in ("iveness", "iviti"):
word = suffix_replace(word, suffix, "ive")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ive")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ive")
else:
r2 = "e"
elif suffix in ("biliti", "bli"):
word = suffix_replace(word, suffix, "ble")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ble")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ble")
else:
r2 = ""
elif suffix == "ogi" and word[-4] == "l":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix in ("fulli", "lessli"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "li" and word[-3] in self.__li_ending:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
break
# STEP 3
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ational":
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = ""
elif suffix == "alize":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
elif suffix in ("icate", "iciti", "ical"):
word = suffix_replace(word, suffix, "ic")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ic")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ic")
else:
r2 = ""
elif suffix in ("ful", "ness"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
elif suffix == "ative" and r2.endswith(suffix):
word = word[:-5]
r1 = r1[:-5]
r2 = r2[:-5]
break
# STEP 4
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if r2.endswith(suffix):
if suffix == "ion":
if word[-4] in "st":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5
if r2.endswith("l") and word[-2] == "l":
word = word[:-1]
elif r2.endswith("e"):
word = word[:-1]
elif r1.endswith("e"):
if len(word) >= 4 and (word[-2] in self.__vowels or
word[-2] in "wxY" or
word[-3] not in self.__vowels or
word[-4] in self.__vowels):
word = word[:-1]
word = word.replace("Y", "y")
return word
class FinnishStemmer(_StandardStemmer):
"""
The Finnish Snowball stemmer.
:cvar __vowels: The Finnish vowels.
:type __vowels: unicode
:cvar __restricted_vowels: A subset of the Finnish vowels.
:type __restricted_vowels: unicode
:cvar __long_vowels: The Finnish vowels in their long forms.
:type __long_vowels: tuple
:cvar __consonants: The Finnish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Finnish double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Finnish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/finnish/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6"
__restricted_vowels = "aeiou\xE4\xF6"
__long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4",
"\xF6\xF6")
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__step1_suffixes = ('kaan', 'k\xE4\xE4n', 'sti', 'kin', 'han',
'h\xE4n', 'ko', 'k\xF6', 'pa', 'p\xE4')
__step2_suffixes = ('nsa', 'ns\xE4', 'mme', 'nne', 'si', 'ni',
'an', '\xE4n', 'en')
__step3_suffixes = ('siin', 'tten', 'seen', 'han', 'hen', 'hin',
'hon', 'h\xE4n', 'h\xF6n', 'den', 'tta',
'tt\xE4', 'ssa', 'ss\xE4', 'sta',
'st\xE4', 'lla', 'll\xE4', 'lta',
'lt\xE4', 'lle', 'ksi', 'ine', 'ta',
't\xE4', 'na', 'n\xE4', 'a', '\xE4',
'n')
__step4_suffixes = ('impi', 'impa', 'imp\xE4', 'immi', 'imma',
'imm\xE4', 'mpi', 'mpa', 'mp\xE4', 'mmi',
'mma', 'mm\xE4', 'eja', 'ej\xE4')
def stem(self, word):
"""
Stem a Finnish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix)-1] in "ntaeiouy\xE4\xF6":
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "si":
if word[-3] != "k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith("kse"):
word = suffix_replace(word, "kse", "ksi")
if r1.endswith("kse"):
r1 = suffix_replace(r1, "kse", "ksi")
if r2.endswith("kse"):
r2 = suffix_replace(r2, "kse", "ksi")
elif suffix == "an":
if (word[-4:-2] in ("ta", "na") or
word[-5:-2] in ("ssa", "sta", "lla", "lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "\xE4n":
if (word[-4:-2] in ("t\xE4", "n\xE4") or
word[-5:-2] in ("ss\xE4", "st\xE4",
"ll\xE4", "lt\xE4")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "en":
if word[-5:-2] in ("lle", "ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("han", "hen", "hin", "hon", "h\xE4n",
"h\xF6n"):
if ((suffix == "han" and word[-4] == "a") or
(suffix == "hen" and word[-4] == "e") or
(suffix == "hin" and word[-4] == "i") or
(suffix == "hon" and word[-4] == "o") or
(suffix == "h\xE4n" and word[-4] == "\xE4") or
(suffix == "h\xF6n" and word[-4] == "\xF6")):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in ("siin", "den", "tten"):
if (word[-len(suffix)-1] == "i" and
word[-len(suffix)-2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == "seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in ("a", "\xE4"):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in ("tta", "tt\xE4"):
if word[-4] == "e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == "n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == "ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma",
"mm\xE4"):
if word[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in "ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == "t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith("imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith("mma") and r2[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in "a\xE4ei"):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(("oj", "uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith("jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in range(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i-1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i-1:-i+1] in self.__double_consonants:
word = "".join((word[:-i], word[-i+1:]))
break
return word
class FrenchStemmer(_StandardStemmer):
"""
The French Snowball stemmer.
:cvar __vowels: The French vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the French
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/french/stemmer.html
"""
__vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9"
__step1_suffixes = ('issements', 'issement', 'atrices', 'atrice',
'ateurs', 'ations', 'logies', 'usions',
'utions', 'ements', 'amment', 'emment',
'ances', 'iqUes', 'ismes', 'ables', 'istes',
'ateur', 'ation', 'logie', 'usion', 'ution',
'ences', 'ement', 'euses', 'ments', 'ance',
'iqUe', 'isme', 'able', 'iste', 'ence',
'it\xE9s', 'ives', 'eaux', 'euse', 'ment',
'eux', 'it\xE9', 'ive', 'ifs', 'aux', 'if')
__step2a_suffixes = ('issaIent', 'issantes', 'iraIent', 'issante',
'issants', 'issions', 'irions', 'issais',
'issait', 'issant', 'issent', 'issiez', 'issons',
'irais', 'irait', 'irent', 'iriez', 'irons',
'iront', 'isses', 'issez', '\xEEmes',
'\xEEtes', 'irai', 'iras', 'irez', 'isse',
'ies', 'ira', '\xEEt', 'ie', 'ir', 'is',
'it', 'i')
__step2b_suffixes = ('eraIent', 'assions', 'erions', 'assent',
'assiez', '\xE8rent', 'erais', 'erait',
'eriez', 'erons', 'eront', 'aIent', 'antes',
'asses', 'ions', 'erai', 'eras', 'erez',
'\xE2mes', '\xE2tes', 'ante', 'ants',
'asse', '\xE9es', 'era', 'iez', 'ais',
'ait', 'ant', '\xE9e', '\xE9s', 'er',
'ez', '\xE2t', 'ai', 'as', '\xE9', 'a')
__step4_suffixes = ('i\xE8re', 'I\xE8re', 'ion', 'ier', 'Ier',
'e', '\xEB')
def stem(self, word):
"""
Stem a French word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
rv_ending_found = False
step2a_success = False
step2b_success = False
# Every occurrence of 'u' after 'q' is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
# Every occurrence of 'y' preceded or
# followed by a vowel is also put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
if word[i-1] in self.__vowels or word[i+1] in self.__vowels:
if word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self.__rv_french(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "eaux":
word = word[:-1]
step1_success = True
elif suffix in ("euse", "euses"):
if suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in r1:
word = suffix_replace(word, suffix, "eux")
step1_success = True
elif suffix in ("ement", "ements") and suffix in rv:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "iv" and "iv" in r2:
word = word[:-2]
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
elif word[-3:] == "eus":
if "eus" in r2:
word = word[:-3]
elif "eus" in r1:
word = "".join((word[:-1], "x"))
elif word[-3:] in ("abl", "iqU"):
if "abl" in r2 or "iqU" in r2:
word = word[:-3]
elif word[-3:] in ("i\xE8r", "I\xE8r"):
if "i\xE8r" in rv or "I\xE8r" in rv:
word = "".join((word[:-3], "i"))
elif suffix == "amment" and suffix in rv:
word = suffix_replace(word, "amment", "ant")
rv = suffix_replace(rv, "amment", "ant")
rv_ending_found = True
elif suffix == "emment" and suffix in rv:
word = suffix_replace(word, "emment", "ent")
rv_ending_found = True
elif (suffix in ("ment", "ments") and suffix in rv and
not rv.startswith(suffix) and
rv[rv.rindex(suffix)-1] in self.__vowels):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
rv_ending_found = True
elif suffix == "aux" and suffix in r1:
word = "".join((word[:-2], "l"))
step1_success = True
elif (suffix in ("issement", "issements") and suffix in r1
and word[-len(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("ance", "iqUe", "isme", "able", "iste",
"eux", "ances", "iqUes", "ismes",
"ables", "istes") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("atrice", "ateur", "ation", "atrices",
"ateurs", "ations") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif suffix in ("logie", "logies") and suffix in r2:
word = suffix_replace(word, suffix, "log")
step1_success = True
elif (suffix in ("usion", "ution", "usions", "utions") and
suffix in r2):
word = suffix_replace(word, suffix, "u")
step1_success = True
elif suffix in ("ence", "ences") and suffix in r2:
word = suffix_replace(word, suffix, "ent")
step1_success = True
elif suffix in ("it\xE9", "it\xE9s") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-4:] == "abil":
if "abil" in r2:
word = word[:-4]
else:
word = "".join((word[:-2], "l"))
elif word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif word[-2:] == "iv":
if "iv" in r2:
word = word[:-2]
elif (suffix in ("if", "ive", "ifs", "ives") and
suffix in r2):
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
break
# STEP 2a: Verb suffixes beginning 'i'
if not step1_success or rv_ending_found:
for suffix in self.__step2a_suffixes:
if word.endswith(suffix):
if (suffix in rv and len(rv) > len(suffix) and
rv[rv.rindex(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step2a_success = True
break
# STEP 2b: Other verb suffixes
if not step2a_success:
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix == "ions" and "ions" in r2:
word = word[:-4]
step2b_success = True
elif suffix in ('eraIent', 'erions', '\xE8rent',
'erais', 'erait', 'eriez',
'erons', 'eront', 'erai', 'eras',
'erez', '\xE9es', 'era', 'iez',
'\xE9e', '\xE9s', 'er', 'ez',
'\xE9'):
word = word[:-len(suffix)]
step2b_success = True
elif suffix in ('assions', 'assent', 'assiez',
'aIent', 'antes', 'asses',
'\xE2mes', '\xE2tes', 'ante',
'ants', 'asse', 'ais', 'ait',
'ant', '\xE2t', 'ai', 'as',
'a'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
step2b_success = True
if rv.endswith("e"):
word = word[:-1]
break
# STEP 3
if step1_success or step2a_success or step2b_success:
if word[-1] == "Y":
word = "".join((word[:-1], "i"))
elif word[-1] == "\xE7":
word = "".join((word[:-1], "c"))
# STEP 4: Residual suffixes
else:
if (len(word) >= 2 and word[-1] == "s" and
word[-2] not in "aiou\xE8s"):
word = word[:-1]
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if suffix in rv:
if (suffix == "ion" and suffix in r2 and
rv[-4] in "st"):
word = word[:-3]
elif suffix in ("ier", "i\xE8re", "Ier",
"I\xE8re"):
word = suffix_replace(word, suffix, "i")
elif suffix == "e":
word = word[:-1]
elif suffix == "\xEB" and word[-3:-1] == "gu":
word = word[:-1]
break
# STEP 5: Undouble
if word.endswith(("enn", "onn", "ett", "ell", "eill")):
word = word[:-1]
# STEP 6: Un-accent
for i in range(1, len(word)):
if word[-i] not in self.__vowels:
i += 1
else:
if i != 1 and word[-i] in ("\xE9", "\xE8"):
word = "".join((word[:-i], "e", word[-i+1:]))
break
word = (word.replace("I", "i")
.replace("U", "u")
.replace("Y", "y"))
return word
def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv
class GermanStemmer(_StandardStemmer):
"""
The German Snowball stemmer.
:cvar __vowels: The German vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __st_ending: Letter that may directly appear before a word final 'st'.
:type __st_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the German
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/german/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6\xFC"
__s_ending = "bdfghklmnrt"
__st_ending = "bdfghklmnt"
__step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s")
__step2_suffixes = ("est", "en", "er", "st")
__step3_suffixes = ("isch", "lich", "heit", "keit",
"end", "ung", "ig", "ik")
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
word = word.replace("\xDF", "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix)-4:-len(suffix)] == "niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix)-2:-len(suffix)] and
"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix)-2:-len(suffix)] or
"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif "ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace("\xE4", "a").replace("\xF6", "o")
.replace("\xFC", "u").replace("U", "u")
.replace("Y", "y"))
return word
class HungarianStemmer(_LanguageSpecificStemmer):
"""
The Hungarian Snowball stemmer.
:cvar __vowels: The Hungarian vowels.
:type __vowels: unicode
:cvar __digraphs: The Hungarian digraphs.
:type __digraphs: tuple
:cvar __double_consonants: The Hungarian double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm.
:type __step6_suffixes: tuple
:cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm.
:type __step7_suffixes: tuple
:cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm.
:type __step8_suffixes: tuple
:cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm.
:type __step9_suffixes: tuple
:note: A detailed description of the Hungarian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/hungarian/stemmer.html
"""
__vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB"
__digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs")
__double_consonants = ("bb", "cc", "ccs", "dd", "ff", "gg",
"ggy", "jj", "kk", "ll", "lly", "mm",
"nn", "nny", "pp", "rr", "ss", "ssz",
"tt", "tty", "vv", "zz", "zzs")
__step1_suffixes = ("al", "el")
__step2_suffixes = ('k\xE9ppen', 'onk\xE9nt', 'enk\xE9nt',
'ank\xE9nt', 'k\xE9pp', 'k\xE9nt', 'ban',
'ben', 'nak', 'nek', 'val', 'vel', 't\xF3l',
't\xF5l', 'r\xF3l', 'r\xF5l', 'b\xF3l',
'b\xF5l', 'hoz', 'hez', 'h\xF6z',
'n\xE1l', 'n\xE9l', '\xE9rt', 'kor',
'ba', 'be', 'ra', 're', 'ig', 'at', 'et',
'ot', '\xF6t', 'ul', '\xFCl', 'v\xE1',
'v\xE9', 'en', 'on', 'an', '\xF6n',
'n', 't')
__step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n")
__step4_suffixes = ('astul', 'est\xFCl', '\xE1stul',
'\xE9st\xFCl', 'stul', 'st\xFCl')
__step5_suffixes = ("\xE1", "\xE9")
__step6_suffixes = ('ok\xE9', '\xF6k\xE9', 'ak\xE9',
'ek\xE9', '\xE1k\xE9', '\xE1\xE9i',
'\xE9k\xE9', '\xE9\xE9i', 'k\xE9',
'\xE9i', '\xE9\xE9', '\xE9')
__step7_suffixes = ('\xE1juk', '\xE9j\xFCk', '\xFCnk',
'unk', 'juk', 'j\xFCk', '\xE1nk',
'\xE9nk', 'nk', 'uk', '\xFCk', 'em',
'om', 'am', 'od', 'ed', 'ad', '\xF6d',
'ja', 'je', '\xE1m', '\xE1d', '\xE9m',
'\xE9d', 'm', 'd', 'a', 'e', 'o',
'\xE1', '\xE9')
__step8_suffixes = ('jaitok', 'jeitek', 'jaink', 'jeink', 'aitok',
'eitek', '\xE1itok', '\xE9itek', 'jaim',
'jeim', 'jaid', 'jeid', 'eink', 'aink',
'itek', 'jeik', 'jaik', '\xE1ink',
'\xE9ink', 'aim', 'eim', 'aid', 'eid',
'jai', 'jei', 'ink', 'aik', 'eik',
'\xE1im', '\xE1id', '\xE1ik', '\xE9im',
'\xE9id', '\xE9ik', 'im', 'id', 'ai',
'ei', 'ik', '\xE1i', '\xE9i', 'i')
__step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok",
"ek", "ak", "k")
def stem(self, word):
"""
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = "".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = "".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith("\xE1"):
word = "".join((word[:-1], "a"))
r1 = suffix_replace(r1, "\xE1", "a")
elif r1.endswith("\xE9"):
word = "".join((word[:-1], "e"))
r1 = suffix_replace(r1, "\xE9", "e")
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "\xE9n":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == "\xE1stul":
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix == "\xE9st\xFCl":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = "".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = "".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in ("\xE1k\xE9", "\xE1\xE9i"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9k\xE9", "\xE9\xE9i",
"\xE9\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1nk", "\xE1juk", "\xE1m",
"\xE1d", "\xE1"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9nk", "\xE9j\xFCk",
"\xE9m", "\xE9d", "\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1im", "\xE1id", "\xE1i",
"\xE1ink", "\xE1itok", "\xE1ik"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9im", "\xE9id", "\xE9i",
"\xE9ink", "\xE9itek", "\xE9ik"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "\xE1k":
word = suffix_replace(word, suffix, "a")
elif suffix == "\xE9k":
word = suffix_replace(word, suffix, "e")
else:
word = word[:-len(suffix)]
break
return word
def __r1_hungarian(self, word, vowels, digraphs):
"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
"""
r1 = ""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in range(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in range(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
class ItalianStemmer(_StandardStemmer):
"""
The Italian Snowball stemmer.
:cvar __vowels: The Italian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:note: A detailed description of the Italian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/italian/stemmer.html
"""
__vowels = "aeiou\xE0\xE8\xEC\xF2\xF9"
__step0_suffixes = ('gliela', 'gliele', 'glieli', 'glielo',
'gliene', 'sene', 'mela', 'mele', 'meli',
'melo', 'mene', 'tela', 'tele', 'teli',
'telo', 'tene', 'cela', 'cele', 'celi',
'celo', 'cene', 'vela', 'vele', 'veli',
'velo', 'vene', 'gli', 'ci', 'la', 'le',
'li', 'lo', 'mi', 'ne', 'si', 'ti', 'vi')
__step1_suffixes = ('atrice', 'atrici', 'azione', 'azioni',
'uzione', 'uzioni', 'usione', 'usioni',
'amento', 'amenti', 'imento', 'imenti',
'amente', 'abile', 'abili', 'ibile', 'ibili',
'mente', 'atore', 'atori', 'logia', 'logie',
'anza', 'anze', 'iche', 'ichi', 'ismo',
'ismi', 'ista', 'iste', 'isti', 'ist\xE0',
'ist\xE8', 'ist\xEC', 'ante', 'anti',
'enza', 'enze', 'ico', 'ici', 'ica', 'ice',
'oso', 'osi', 'osa', 'ose', 'it\xE0',
'ivo', 'ivi', 'iva', 'ive')
__step2_suffixes = ('erebbero', 'irebbero', 'assero', 'assimo',
'eranno', 'erebbe', 'eremmo', 'ereste',
'eresti', 'essero', 'iranno', 'irebbe',
'iremmo', 'ireste', 'iresti', 'iscano',
'iscono', 'issero', 'arono', 'avamo', 'avano',
'avate', 'eremo', 'erete', 'erono', 'evamo',
'evano', 'evate', 'iremo', 'irete', 'irono',
'ivamo', 'ivano', 'ivate', 'ammo', 'ando',
'asse', 'assi', 'emmo', 'enda', 'ende',
'endi', 'endo', 'erai', 'erei', 'Yamo',
'iamo', 'immo', 'irai', 'irei', 'isca',
'isce', 'isci', 'isco', 'ano', 'are', 'ata',
'ate', 'ati', 'ato', 'ava', 'avi', 'avo',
'er\xE0', 'ere', 'er\xF2', 'ete', 'eva',
'evi', 'evo', 'ir\xE0', 'ire', 'ir\xF2',
'ita', 'ite', 'iti', 'ito', 'iva', 'ivi',
'ivo', 'ono', 'uta', 'ute', 'uti', 'uto',
'ar', 'ir')
def stem(self, word):
"""
Stem an Italian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
# All acute accents are replaced by grave accents.
word = (word.replace("\xE1", "\xE0")
.replace("\xE9", "\xE8")
.replace("\xED", "\xEC")
.replace("\xF3", "\xF2")
.replace("\xFA", "\xF9"))
# Every occurrence of 'u' after 'q'
# is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word [i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if rv.endswith(suffix):
if rv[-len(suffix)-4:-len(suffix)] in ("ando", "endo"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[-len(suffix)-2:-len(suffix)] in
("ar", "er", "ir")):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
r2 = suffix_replace(r2, suffix, "e")
rv = suffix_replace(rv, suffix, "e")
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic")):
word = word[:-2]
rv = rv[:-2]
elif r2 .endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif (suffix in ("amento", "amenti",
"imento", "imenti") and
rv.endswith(suffix)):
step1_success = True
word = word[:-6]
rv = rv[:-6]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("azione", "azioni", "atore", "atori"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("logia", "logie"):
word = word[:-2]
rv = word[:-2]
elif suffix in ("uzione", "uzioni",
"usione", "usioni"):
word = word[:-5]
rv = rv[:-5]
elif suffix in ("enza", "enze"):
word = suffix_replace(word, suffix, "te")
rv = suffix_replace(rv, suffix, "te")
elif suffix == "it\xE0":
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("ivo", "ivi", "iva", "ive"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith("at"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3a
if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8",
"\xEC", "\xF2")):
word = word[:-1]
rv = rv[:-1]
if rv.endswith("i"):
word = word[:-1]
rv = rv[:-1]
# STEP 3b
if rv.endswith(("ch", "gh")):
word = word[:-1]
word = word.replace("I", "i").replace("U", "u")
return word
class NorwegianStemmer(_ScandinavianStemmer):
"""
The Norwegian Snowball stemmer.
:cvar __vowels: The Norwegian vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Norwegian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/norwegian/stemmer.html
"""
__vowels = "aeiouy\xE6\xE5\xF8"
__s_ending = "bcdfghjlmnoprtvyz"
__step1_suffixes = ("hetenes", "hetene", "hetens", "heter",
"heten", "endes", "ande", "ende", "edes",
"enes", "erte", "ede", "ane", "ene", "ens",
"ers", "ets", "het", "ast", "ert", "en",
"ar", "er", "as", "es", "et", "a", "e", "s")
__step2_suffixes = ("dt", "vt")
__step3_suffixes = ("hetslov", "eleg", "elig", "elov", "slov",
"leg", "eig", "lig", "els", "lov", "ig")
def stem(self, word):
"""
Stem a Norwegian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix in ("erte", "ert"):
word = suffix_replace(word, suffix, "er")
r1 = suffix_replace(r1, suffix, "er")
elif suffix == "s":
if (word[-2] in self.__s_ending or
(word[-2] == "k" and word[-3] not in self.__vowels)):
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
word = word[:-len(suffix)]
break
return word
class PortugueseStemmer(_StandardStemmer):
"""
The Portuguese Snowball stemmer.
:cvar __vowels: The Portuguese vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Portuguese
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/portuguese/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4"
__step1_suffixes = ('amentos', 'imentos', 'uço~es', 'amento',
'imento', 'adoras', 'adores', 'a\xE7o~es',
'logias', '\xEAncias', 'amente',
'idades', 'an\xE7as', 'ismos', 'istas', 'adora',
'a\xE7a~o', 'antes', '\xE2ncia',
'logia', 'uça~o', '\xEAncia',
'mente', 'idade', 'an\xE7a', 'ezas', 'icos', 'icas',
'ismo', '\xE1vel', '\xEDvel', 'ista',
'osos', 'osas', 'ador', 'ante', 'ivas',
'ivos', 'iras', 'eza', 'ico', 'ica',
'oso', 'osa', 'iva', 'ivo', 'ira')
__step2_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'\xE1ssemos', '\xEAssemos', '\xEDssemos',
'ar\xEDeis', 'er\xEDeis', 'ir\xEDeis',
'\xE1sseis', '\xE9sseis', '\xEDsseis',
'\xE1ramos', '\xE9ramos', '\xEDramos',
'\xE1vamos', 'aremos', 'eremos', 'iremos',
'ariam', 'eriam', 'iriam', 'assem', 'essem',
'issem', 'ara~o', 'era~o', 'ira~o', 'arias',
'erias', 'irias', 'ardes', 'erdes', 'irdes',
'asses', 'esses', 'isses', 'astes', 'estes',
'istes', '\xE1reis', 'areis', '\xE9reis',
'ereis', '\xEDreis', 'ireis', '\xE1veis',
'\xEDamos', 'armos', 'ermos', 'irmos',
'aria', 'eria', 'iria', 'asse', 'esse',
'isse', 'aste', 'este', 'iste', 'arei',
'erei', 'irei', 'aram', 'eram', 'iram',
'avam', 'arem', 'erem', 'irem',
'ando', 'endo', 'indo', 'adas', 'idas',
'ar\xE1s', 'aras', 'er\xE1s', 'eras',
'ir\xE1s', 'avas', 'ares', 'eres', 'ires',
'\xEDeis', 'ados', 'idos', '\xE1mos',
'amos', 'emos', 'imos', 'iras', 'ada', 'ida',
'ar\xE1', 'ara', 'er\xE1', 'era',
'ir\xE1', 'ava', 'iam', 'ado', 'ido',
'ias', 'ais', 'eis', 'ira', 'ia', 'ei', 'am',
'em', 'ar', 'er', 'ir', 'as',
'es', 'is', 'eu', 'iu', 'ou')
__step4_suffixes = ("os", "a", "i", "o", "\xE1",
"\xED", "\xF3")
def stem(self, word):
"""
Stem a Portuguese word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
word = (word.replace("\xE3", "a~")
.replace("\xF5", "o~")
.replace("q\xFC", "qu")
.replace("g\xFC", "gu"))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif (suffix in ("ira", "iras") and rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "e"):
step1_success = True
word = suffix_replace(word, suffix, "ir")
rv = suffix_replace(rv, suffix, "ir")
elif r2.endswith(suffix):
step1_success = True
if suffix in ("logia", "logias"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uça~o", "uço~es"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("\xEAncia", "\xEAncias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith(("ante", "avel", "ivel")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idade", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("iva", "ivo", "ivas", "ivos"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
step2_success = True
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3
if step1_success or step2_success:
if rv.endswith("i") and word[-2] == "c":
word = word[:-1]
rv = rv[:-1]
### STEP 4: Residual suffix
if not step1_success and not step2_success:
for suffix in self.__step4_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 5
if rv.endswith(("e", "\xE9", "\xEA")):
word = word[:-1]
rv = rv[:-1]
if ((word.endswith("gu") and rv.endswith("u")) or
(word.endswith("ci") and rv.endswith("i"))):
word = word[:-1]
elif word.endswith("\xE7"):
word = suffix_replace(word, "\xE7", "c")
word = word.replace("a~", "\xE3").replace("o~", "\xF5")
return word
class RomanianStemmer(_StandardStemmer):
"""
The Romanian Snowball stemmer.
:cvar __vowels: The Romanian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Romanian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/romanian/stemmer.html
"""
__vowels = "aeiou\u0103\xE2\xEE"
__step0_suffixes = ('iilor', 'ului', 'elor', 'iile', 'ilor',
'atei', 'a\u0163ie', 'a\u0163ia', 'aua',
'ele', 'iua', 'iei', 'ile', 'ul', 'ea',
'ii')
__step1_suffixes = ('abilitate', 'abilitati', 'abilit\u0103\u0163i',
'ibilitate', 'abilit\u0103i', 'ivitate',
'ivitati', 'ivit\u0103\u0163i', 'icitate',
'icitati', 'icit\u0103\u0163i', 'icatori',
'ivit\u0103i', 'icit\u0103i', 'icator',
'a\u0163iune', 'atoare', '\u0103toare',
'i\u0163iune', 'itoare', 'iciva', 'icive',
'icivi', 'iciv\u0103', 'icala', 'icale',
'icali', 'ical\u0103', 'ativa', 'ative',
'ativi', 'ativ\u0103', 'atori', '\u0103tori',
'itiva', 'itive', 'itivi', 'itiv\u0103',
'itori', 'iciv', 'ical', 'ativ', 'ator',
'\u0103tor', 'itiv', 'itor')
__step2_suffixes = ('abila', 'abile', 'abili', 'abil\u0103',
'ibila', 'ibile', 'ibili', 'ibil\u0103',
'atori', 'itate', 'itati', 'it\u0103\u0163i',
'abil', 'ibil', 'oasa', 'oas\u0103', 'oase',
'anta', 'ante', 'anti', 'ant\u0103', 'ator',
'it\u0103i', 'iune', 'iuni', 'isme', 'ista',
'iste', 'isti', 'ist\u0103', 'i\u015Fti',
'ata', 'at\u0103', 'ati', 'ate', 'uta',
'ut\u0103', 'uti', 'ute', 'ita', 'it\u0103',
'iti', 'ite', 'ica', 'ice', 'ici', 'ic\u0103',
'osi', 'o\u015Fi', 'ant', 'iva', 'ive', 'ivi',
'iv\u0103', 'ism', 'ist', 'at', 'ut', 'it',
'ic', 'os', 'iv')
__step3_suffixes = ('seser\u0103\u0163i', 'aser\u0103\u0163i',
'iser\u0103\u0163i', '\xE2ser\u0103\u0163i',
'user\u0103\u0163i', 'seser\u0103m',
'aser\u0103m', 'iser\u0103m', '\xE2ser\u0103m',
'user\u0103m', 'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'easc\u0103', 'ar\u0103\u0163i',
'ur\u0103\u0163i', 'ir\u0103\u0163i',
'\xE2r\u0103\u0163i', 'ase\u015Fi',
'aser\u0103', 'ise\u015Fi', 'iser\u0103',
'\xe2se\u015Fi', '\xE2ser\u0103',
'use\u015Fi', 'user\u0103', 'ser\u0103m',
'sesem', 'indu', '\xE2ndu', 'eaz\u0103',
'e\u015Fti', 'e\u015Fte', '\u0103\u015Fti',
'\u0103\u015Fte', 'ea\u0163i', 'ia\u0163i',
'ar\u0103m', 'ur\u0103m', 'ir\u0103m',
'\xE2r\u0103m', 'asem', 'isem',
'\xE2sem', 'usem', 'se\u015Fi', 'ser\u0103',
'sese', 'are', 'ere', 'ire', '\xE2re',
'ind', '\xE2nd', 'eze', 'ezi', 'esc',
'\u0103sc', 'eam', 'eai', 'eau', 'iam',
'iai', 'iau', 'a\u015Fi', 'ar\u0103',
'u\u015Fi', 'ur\u0103', 'i\u015Fi', 'ir\u0103',
'\xE2\u015Fi', '\xe2r\u0103', 'ase',
'ise', '\xE2se', 'use', 'a\u0163i',
'e\u0163i', 'i\u0163i', '\xe2\u0163i', 'sei',
'ez', 'am', 'ai', 'au', 'ea', 'ia', 'ui',
'\xE2i', '\u0103m', 'em', 'im', '\xE2m',
'se')
def stem(self, word):
"""
Stem a Romanian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Removal of plurals and other simplifications
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if suffix in r1:
if suffix in ("ul", "ului"):
word = word[:-len(suffix)]
if suffix in rv:
rv = rv[:-len(suffix)]
else:
rv = ""
elif (suffix == "aua" or suffix == "atei" or
(suffix == "ile" and word[-5:-3] != "ab")):
word = word[:-2]
elif suffix in ("ea", "ele", "elor"):
word = suffix_replace(word, suffix, "e")
if suffix in rv:
rv = suffix_replace(rv, suffix, "e")
else:
rv = ""
elif suffix in ("ii", "iua", "iei",
"iile", "iilor", "ilor"):
word = suffix_replace(word, suffix, "i")
if suffix in rv:
rv = suffix_replace(rv, suffix, "i")
else:
rv = ""
elif suffix in ("a\u0163ie", "a\u0163ia"):
word = word[:-1]
break
# STEP 1: Reduction of combining suffixes
while True:
replacement_done = False
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix in r1:
step1_success = True
replacement_done = True
if suffix in ("abilitate", "abilitati",
"abilit\u0103i",
"abilit\u0103\u0163i"):
word = suffix_replace(word, suffix, "abil")
elif suffix == "ibilitate":
word = word[:-5]
elif suffix in ("ivitate", "ivitati",
"ivit\u0103i",
"ivit\u0103\u0163i"):
word = suffix_replace(word, suffix, "iv")
elif suffix in ("icitate", "icitati", "icit\u0103i",
"icit\u0103\u0163i", "icator",
"icatori", "iciv", "iciva",
"icive", "icivi", "iciv\u0103",
"ical", "icala", "icale", "icali",
"ical\u0103"):
word = suffix_replace(word, suffix, "ic")
elif suffix in ("ativ", "ativa", "ative", "ativi",
"ativ\u0103", "a\u0163iune",
"atoare", "ator", "atori",
"\u0103toare",
"\u0103tor", "\u0103tori"):
word = suffix_replace(word, suffix, "at")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "at")
elif suffix in ("itiv", "itiva", "itive", "itivi",
"itiv\u0103", "i\u0163iune",
"itoare", "itor", "itori"):
word = suffix_replace(word, suffix, "it")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "it")
else:
step1_success = False
break
if not replacement_done:
break
# STEP 2: Removal of standard suffixes
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if suffix in r2:
step2_success = True
if suffix in ("iune", "iuni"):
if word[-5] == "\u0163":
word = "".join((word[:-5], "t"))
elif suffix in ("ism", "isme", "ist", "ista", "iste",
"isti", "ist\u0103", "i\u015Fti"):
word = suffix_replace(word, suffix, "ist")
else:
word = word[:-len(suffix)]
break
# STEP 3: Removal of verb suffixes
if not step1_success and not step2_success:
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if suffix in rv:
if suffix in ('seser\u0103\u0163i', 'seser\u0103m',
'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'ser\u0103m', 'sesem',
'se\u015Fi', 'ser\u0103', 'sese',
'a\u0163i', 'e\u0163i', 'i\u0163i',
'\xE2\u0163i', 'sei', '\u0103m',
'em', 'im', '\xE2m', 'se'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
else:
if (not rv.startswith(suffix) and
rv[rv.index(suffix)-1] not in
"aeio\u0103\xE2\xEE"):
word = word[:-len(suffix)]
break
# STEP 4: Removal of final vowel
for suffix in ("ie", "a", "e", "i", "\u0103"):
if word.endswith(suffix):
if suffix in rv:
word = word[:-len(suffix)]
break
word = word.replace("I", "i").replace("U", "u")
return word
class RussianStemmer(_LanguageSpecificStemmer):
"""
The Russian Snowball stemmer.
:cvar __perfective_gerund_suffixes: Suffixes to be deleted.
:type __perfective_gerund_suffixes: tuple
:cvar __adjectival_suffixes: Suffixes to be deleted.
:type __adjectival_suffixes: tuple
:cvar __reflexive_suffixes: Suffixes to be deleted.
:type __reflexive_suffixes: tuple
:cvar __verb_suffixes: Suffixes to be deleted.
:type __verb_suffixes: tuple
:cvar __noun_suffixes: Suffixes to be deleted.
:type __noun_suffixes: tuple
:cvar __superlative_suffixes: Suffixes to be deleted.
:type __superlative_suffixes: tuple
:cvar __derivational_suffixes: Suffixes to be deleted.
:type __derivational_suffixes: tuple
:note: A detailed description of the Russian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/russian/stemmer.html
"""
__perfective_gerund_suffixes = ("ivshis'", "yvshis'", "vshis'",
"ivshi", "yvshi", "vshi", "iv",
"yv", "v")
__adjectival_suffixes = ('ui^ushchi^ui^u', 'ui^ushchi^ai^a',
'ui^ushchimi', 'ui^ushchymi', 'ui^ushchego',
'ui^ushchogo', 'ui^ushchemu', 'ui^ushchomu',
'ui^ushchikh', 'ui^ushchykh',
'ui^ushchui^u', 'ui^ushchaia',
'ui^ushchoi^u', 'ui^ushchei^u',
'i^ushchi^ui^u', 'i^ushchi^ai^a',
'ui^ushchee', 'ui^ushchie',
'ui^ushchye', 'ui^ushchoe', 'ui^ushchei`',
'ui^ushchii`', 'ui^ushchyi`',
'ui^ushchoi`', 'ui^ushchem', 'ui^ushchim',
'ui^ushchym', 'ui^ushchom', 'i^ushchimi',
'i^ushchymi', 'i^ushchego', 'i^ushchogo',
'i^ushchemu', 'i^ushchomu', 'i^ushchikh',
'i^ushchykh', 'i^ushchui^u', 'i^ushchai^a',
'i^ushchoi^u', 'i^ushchei^u', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`',
'i^ushchyi`', 'i^ushchoi`', 'i^ushchem',
'i^ushchim', 'i^ushchym', 'i^ushchom',
'shchi^ui^u', 'shchi^ai^a', 'ivshi^ui^u',
'ivshi^ai^a', 'yvshi^ui^u', 'yvshi^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'shchui^u', 'shchai^a', 'shchoi^u',
'shchei^u', 'ivshimi', 'ivshymi',
'ivshego', 'ivshogo', 'ivshemu', 'ivshomu',
'ivshikh', 'ivshykh', 'ivshui^u',
'ivshai^a', 'ivshoi^u', 'ivshei^u',
'yvshimi', 'yvshymi', 'yvshego', 'yvshogo',
'yvshemu', 'yvshomu', 'yvshikh', 'yvshykh',
'yvshui^u', 'yvshai^a', 'yvshoi^u',
'yvshei^u', 'vshi^ui^u', 'vshi^ai^a',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'ivshee', 'ivshie', 'ivshye', 'ivshoe',
'ivshei`', 'ivshii`', 'ivshyi`',
'ivshoi`', 'ivshem', 'ivshim', 'ivshym',
'ivshom', 'yvshee', 'yvshie', 'yvshye',
'yvshoe', 'yvshei`', 'yvshii`',
'yvshyi`', 'yvshoi`', 'yvshem',
'yvshim', 'yvshym', 'yvshom', 'vshimi',
'vshymi', 'vshego', 'vshogo', 'vshemu',
'vshomu', 'vshikh', 'vshykh', 'vshui^u',
'vshai^a', 'vshoi^u', 'vshei^u',
'emi^ui^u', 'emi^ai^a', 'nni^ui^u',
'nni^ai^a', 'vshee',
'vshie', 'vshye', 'vshoe', 'vshei`',
'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'emee', 'emie', 'emye', 'emoe',
'emei`', 'emii`', 'emyi`',
'emoi`', 'emem', 'emim', 'emym',
'emom', 'nnee', 'nnie', 'nnye', 'nnoe',
'nnei`', 'nnii`', 'nnyi`',
'nnoi`', 'nnem', 'nnim', 'nnym',
'nnom', 'i^ui^u', 'i^ai^a', 'imi', 'ymi',
'ego', 'ogo', 'emu', 'omu', 'ikh',
'ykh', 'ui^u', 'ai^a', 'oi^u', 'ei^u',
'ee', 'ie', 'ye', 'oe', 'ei`',
'ii`', 'yi`', 'oi`', 'em',
'im', 'ym', 'om')
__reflexive_suffixes = ("si^a", "s'")
__verb_suffixes = ("esh'", 'ei`te', 'ui`te', 'ui^ut',
"ish'", 'ete', 'i`te', 'i^ut', 'nno',
'ila', 'yla', 'ena', 'ite', 'ili', 'yli',
'ilo', 'ylo', 'eno', 'i^at', 'uet', 'eny',
"it'", "yt'", 'ui^u', 'la', 'na', 'li',
'em', 'lo', 'no', 'et', 'ny', "t'",
'ei`', 'ui`', 'il', 'yl', 'im',
'ym', 'en', 'it', 'yt', 'i^u', 'i`',
'l', 'n')
__noun_suffixes = ('ii^ami', 'ii^akh', 'i^ami', 'ii^am', 'i^akh',
'ami', 'iei`', 'i^am', 'iem', 'akh',
'ii^u', "'i^u", 'ii^a', "'i^a", 'ev', 'ov',
'ie', "'e", 'ei', 'ii', 'ei`',
'oi`', 'ii`', 'em', 'am', 'om',
'i^u', 'i^a', 'a', 'e', 'i', 'i`',
'o', 'u', 'y', "'")
__superlative_suffixes = ("ei`she", "ei`sh")
__derivational_suffixes = ("ost'", "ost")
def stem(self, word):
"""
Stem a Russian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
if word in self.stopwords:
return word
chr_exceeded = False
for i in range(len(word)):
if ord(word[i]) > 255:
chr_exceeded = True
break
if chr_exceeded:
word = self.__cyrillic_to_roman(word)
step1_success = False
adjectival_removed = False
verb_removed = False
undouble_success = False
superlative_removed = False
rv, r2 = self.__regions_russian(word)
# Step 1
for suffix in self.__perfective_gerund_suffixes:
if rv.endswith(suffix):
if suffix in ("v", "vshi", "vshis'"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
if not step1_success:
for suffix in self.__reflexive_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
for suffix in self.__adjectival_suffixes:
if rv.endswith(suffix):
if suffix in ('i^ushchi^ui^u', 'i^ushchi^ai^a',
'i^ushchui^u', 'i^ushchai^a', 'i^ushchoi^u',
'i^ushchei^u', 'i^ushchimi', 'i^ushchymi',
'i^ushchego', 'i^ushchogo', 'i^ushchemu',
'i^ushchomu', 'i^ushchikh', 'i^ushchykh',
'shchi^ui^u', 'shchi^ai^a', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`', 'i^ushchyi`',
'i^ushchoi`', 'i^ushchem', 'i^ushchim',
'i^ushchym', 'i^ushchom', 'vshi^ui^u',
'vshi^ai^a', 'shchui^u', 'shchai^a',
'shchoi^u', 'shchei^u', 'emi^ui^u',
'emi^ai^a', 'nni^ui^u', 'nni^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'vshui^u', 'vshai^a', 'vshoi^u', 'vshei^u',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'vshimi', 'vshymi', 'vshego', 'vshogo',
'vshemu', 'vshomu', 'vshikh', 'vshykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'vshee', 'vshie', 'vshye', 'vshoe',
'vshei`', 'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'emee', 'emie', 'emye', 'emoe', 'emei`',
'emii`', 'emyi`', 'emoi`', 'emem', 'emim',
'emym', 'emom', 'nnee', 'nnie', 'nnye',
'nnoe', 'nnei`', 'nnii`', 'nnyi`', 'nnoi`',
'nnem', 'nnim', 'nnym', 'nnom'):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
if not adjectival_removed:
for suffix in self.__verb_suffixes:
if rv.endswith(suffix):
if suffix in ("la", "na", "ete", "i`te", "li",
"i`", "l", "em", "n", "lo", "no",
"et", "i^ut", "ny", "t'", "esh'",
"nno"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
if not adjectival_removed and not verb_removed:
for suffix in self.__noun_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# Step 2
if rv.endswith("i"):
word = word[:-1]
r2 = r2[:-1]
# Step 3
for suffix in self.__derivational_suffixes:
if r2.endswith(suffix):
word = word[:-len(suffix)]
break
# Step 4
if word.endswith("nn"):
word = word[:-1]
undouble_success = True
if not undouble_success:
for suffix in self.__superlative_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
superlative_removed = True
break
if word.endswith("nn"):
word = word[:-1]
if not undouble_success and not superlative_removed:
if word.endswith("'"):
word = word[:-1]
if chr_exceeded:
word = self.__roman_to_cyrillic(word)
return word
def __regions_russian(self, word):
"""
Return the regions RV and R2 which are used by the Russian stemmer.
In any word, RV is the region after the first vowel,
or the end of the word if it contains no vowel.
R2 is the region after the first non-vowel following
a vowel in R1, or the end of the word if there is no such non-vowel.
R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
:param word: The Russian word whose regions RV and R2 are determined.
:type word: str or unicode
:return: the regions RV and R2 for the respective Russian word.
:rtype: tuple
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
r1 = ""
r2 = ""
rv = ""
vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y")
word = (word.replace("i^a", "A")
.replace("i^u", "U")
.replace("e`", "E"))
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
for i in range(len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
r2 = (r2.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
rv = (rv.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
return (rv, r2)
def __cyrillic_to_roman(self, word):
"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("\u0410", "a").replace("\u0430", "a")
.replace("\u0411", "b").replace("\u0431", "b")
.replace("\u0412", "v").replace("\u0432", "v")
.replace("\u0413", "g").replace("\u0433", "g")
.replace("\u0414", "d").replace("\u0434", "d")
.replace("\u0415", "e").replace("\u0435", "e")
.replace("\u0401", "e").replace("\u0451", "e")
.replace("\u0416", "zh").replace("\u0436", "zh")
.replace("\u0417", "z").replace("\u0437", "z")
.replace("\u0418", "i").replace("\u0438", "i")
.replace("\u0419", "i`").replace("\u0439", "i`")
.replace("\u041A", "k").replace("\u043A", "k")
.replace("\u041B", "l").replace("\u043B", "l")
.replace("\u041C", "m").replace("\u043C", "m")
.replace("\u041D", "n").replace("\u043D", "n")
.replace("\u041E", "o").replace("\u043E", "o")
.replace("\u041F", "p").replace("\u043F", "p")
.replace("\u0420", "r").replace("\u0440", "r")
.replace("\u0421", "s").replace("\u0441", "s")
.replace("\u0422", "t").replace("\u0442", "t")
.replace("\u0423", "u").replace("\u0443", "u")
.replace("\u0424", "f").replace("\u0444", "f")
.replace("\u0425", "kh").replace("\u0445", "kh")
.replace("\u0426", "t^s").replace("\u0446", "t^s")
.replace("\u0427", "ch").replace("\u0447", "ch")
.replace("\u0428", "sh").replace("\u0448", "sh")
.replace("\u0429", "shch").replace("\u0449", "shch")
.replace("\u042A", "''").replace("\u044A", "''")
.replace("\u042B", "y").replace("\u044B", "y")
.replace("\u042C", "'").replace("\u044C", "'")
.replace("\u042D", "e`").replace("\u044D", "e`")
.replace("\u042E", "i^u").replace("\u044E", "i^u")
.replace("\u042F", "i^a").replace("\u044F", "i^a"))
return word
def __roman_to_cyrillic(self, word):
"""
Transliterate a Russian word back into the Cyrillic alphabet.
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process, is transliterated back
into the Cyrillic alphabet, its original form.
:param word: The word that is transliterated.
:type word: str or unicode
:return: word, the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("i^u", "\u044E").replace("i^a", "\u044F")
.replace("shch", "\u0449").replace("kh", "\u0445")
.replace("t^s", "\u0446").replace("ch", "\u0447")
.replace("e`", "\u044D").replace("i`", "\u0439")
.replace("sh", "\u0448").replace("k", "\u043A")
.replace("e", "\u0435").replace("zh", "\u0436")
.replace("a", "\u0430").replace("b", "\u0431")
.replace("v", "\u0432").replace("g", "\u0433")
.replace("d", "\u0434").replace("e", "\u0435")
.replace("z", "\u0437").replace("i", "\u0438")
.replace("l", "\u043B").replace("m", "\u043C")
.replace("n", "\u043D").replace("o", "\u043E")
.replace("p", "\u043F").replace("r", "\u0440")
.replace("s", "\u0441").replace("t", "\u0442")
.replace("u", "\u0443").replace("f", "\u0444")
.replace("''", "\u044A").replace("y", "\u044B")
.replace("'", "\u044C"))
return word
class SpanishStemmer(_StandardStemmer):
"""
The Spanish Snowball stemmer.
:cvar __vowels: The Spanish vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Spanish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/spanish/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC"
__step0_suffixes = ("selas", "selos", "sela", "selo", "las",
"les", "los", "nos", "me", "se", "la", "le",
"lo")
__step1_suffixes = ('amientos', 'imientos', 'amiento', 'imiento',
'aciones', 'uciones', 'adoras', 'adores',
'ancias', 'log\xEDas', 'encias', 'amente',
'idades', 'anzas', 'ismos', 'ables', 'ibles',
'istas', 'adora', 'aci\xF3n', 'antes',
'ancia', 'log\xEDa', 'uci\xf3n', 'encia',
'mente', 'anza', 'icos', 'icas', 'ismo',
'able', 'ible', 'ista', 'osos', 'osas',
'ador', 'ante', 'idad', 'ivas', 'ivos',
'ico',
'ica', 'oso', 'osa', 'iva', 'ivo')
__step2a_suffixes = ('yeron', 'yendo', 'yamos', 'yais', 'yan',
'yen', 'yas', 'yes', 'ya', 'ye', 'yo',
'y\xF3')
__step2b_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'i\xE9ramos', 'i\xE9semos', 'ar\xEDais',
'aremos', 'er\xEDais', 'eremos',
'ir\xEDais', 'iremos', 'ierais', 'ieseis',
'asteis', 'isteis', '\xE1bamos',
'\xE1ramos', '\xE1semos', 'ar\xEDan',
'ar\xEDas', 'ar\xE9is', 'er\xEDan',
'er\xEDas', 'er\xE9is', 'ir\xEDan',
'ir\xEDas', 'ir\xE9is',
'ieran', 'iesen', 'ieron', 'iendo', 'ieras',
'ieses', 'abais', 'arais', 'aseis',
'\xE9amos', 'ar\xE1n', 'ar\xE1s',
'ar\xEDa', 'er\xE1n', 'er\xE1s',
'er\xEDa', 'ir\xE1n', 'ir\xE1s',
'ir\xEDa', 'iera', 'iese', 'aste', 'iste',
'aban', 'aran', 'asen', 'aron', 'ando',
'abas', 'adas', 'idas', 'aras', 'ases',
'\xEDais', 'ados', 'idos', 'amos', 'imos',
'emos', 'ar\xE1', 'ar\xE9', 'er\xE1',
'er\xE9', 'ir\xE1', 'ir\xE9', 'aba',
'ada', 'ida', 'ara', 'ase', '\xEDan',
'ado', 'ido', '\xEDas', '\xE1is',
'\xE9is', '\xEDa', 'ad', 'ed', 'id',
'an', 'i\xF3', 'ar', 'er', 'ir', 'as',
'\xEDs', 'en', 'es')
__step3_suffixes = ("os", "a", "e", "o", "\xE1",
"\xE9", "\xED", "\xF3")
def stem(self, word):
"""
Stem a Spanish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if not (word.endswith(suffix) and rv.endswith(suffix)):
continue
if ((rv[:-len(suffix)].endswith(("ando", "\xE1ndo",
"ar", "\xE1r",
"er", "\xE9r",
"iendo", "i\xE9ndo",
"ir", "\xEDr"))) or
(rv[:-len(suffix)].endswith("yendo") and
word[:-len(suffix)].endswith("uyendo"))):
word = self.__replace_accented(word[:-len(suffix)])
r1 = self.__replace_accented(r1[:-len(suffix)])
r2 = self.__replace_accented(r2[:-len(suffix)])
rv = self.__replace_accented(rv[:-len(suffix)])
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if not word.endswith(suffix):
continue
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("adora", "ador", "aci\xF3n", "adoras",
"adores", "aciones", "ante", "antes",
"ancia", "ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("log\xEDa", "log\xEDas"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uci\xF3n", "uciones"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("encia", "encias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ante", "able", "ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idad", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in ("abil", "ic", "iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in ("ivo", "iva", "ivos", "ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if suffix in ("en", "es", "\xE9is", "emos"):
if word.endswith("gu"):
word = word[:-1]
if rv.endswith("gu"):
rv = rv[:-1]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
if suffix in ("e", "\xE9"):
rv = rv[:-len(suffix)]
if word[-2:] == "gu" and rv.endswith("u"):
word = word[:-1]
break
word = self.__replace_accented(word)
return word
def __replace_accented(self, word):
"""
Replaces all accented letters on a word with their non-accented
counterparts.
:param word: A spanish word, with or without accents
:type word: str or unicode
:return: a word with the accented letters (á, é, í, ó, ú) replaced with
their non-accented counterparts (a, e, i, o, u)
:rtype: str or unicode
"""
return (word.replace("\xE1", "a")
.replace("\xE9", "e")
.replace("\xED", "i")
.replace("\xF3", "o")
.replace("\xFA", "u"))
class SwedishStemmer(_ScandinavianStemmer):
"""
The Swedish Snowball stemmer.
:cvar __vowels: The Swedish vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Swedish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/swedish/stemmer.html
"""
__vowels = "aeiouy\xE4\xE5\xF6"
__s_ending = "bcdfghjklmnoprtvy"
__step1_suffixes = ("heterna", "hetens", "heter", "heten",
"anden", "arnas", "ernas", "ornas", "andes",
"andet", "arens", "arna", "erna", "orna",
"ande", "arne", "aste", "aren", "ades",
"erns", "ade", "are", "ern", "ens", "het",
"ast", "ad", "en", "ar", "er", "or", "as",
"es", "at", "a", "e", "s")
__step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt")
__step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig")
def stem(self, word):
"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("els", "lig", "ig"):
word = word[:-len(suffix)]
elif suffix in ("fullt", "l\xF6st"):
word = word[:-1]
break
return word
def demo():
"""
This function provides a demonstration of the Snowball stemmers.
After invoking this function and specifying a language,
it stems an excerpt of the Universal Declaration of Human Rights
(which is a part of the NLTK corpus collection) and then prints
out the original and the stemmed text.
"""
import re
from nltk.corpus import udhr
udhr_corpus = {"danish": "Danish_Dansk-Latin1",
"dutch": "Dutch_Nederlands-Latin1",
"english": "English-Latin1",
"finnish": "Finnish_Suomi-Latin1",
"french": "French_Francais-Latin1",
"german": "German_Deutsch-Latin1",
"hungarian": "Hungarian_Magyar-UTF8",
"italian": "Italian_Italiano-Latin1",
"norwegian": "Norwegian-Latin1",
"porter": "English-Latin1",
"portuguese": "Portuguese_Portugues-Latin1",
"romanian": "Romanian_Romana-Latin2",
"russian": "Russian-UTF8",
"spanish": "Spanish-Latin1",
"swedish": "Swedish_Svenska-Latin1",
}
print("\n")
print("******************************")
print("Demo for the Snowball stemmers")
print("******************************")
while True:
language = compat.raw_input("Please enter the name of the language " +
"to be demonstrated\n" +
"/".join(SnowballStemmer.languages) +
"\n" +
"(enter 'exit' in order to leave): ")
if language == "exit":
break
if language not in SnowballStemmer.languages:
print(("\nOops, there is no stemmer for this language. " +
"Please try again.\n"))
continue
stemmer = SnowballStemmer(language)
excerpt = udhr.words(udhr_corpus[language]) [:300]
stemmed = " ".join(stemmer.stem(word) for word in excerpt)
stemmed = re.sub(r"(.{,70})\s", r'\1\n', stemmed+' ').rstrip()
excerpt = " ".join(excerpt)
excerpt = re.sub(r"(.{,70})\s", r'\1\n', excerpt+' ').rstrip()
print("\n")
print('-' * 70)
print('ORIGINAL'.center(70))
print(excerpt)
print("\n\n")
print('STEMMED RESULTS'.center(70))
print(stemmed)
print('-' * 70)
print("\n")
| nelango/ViralityAnalysis | model/lib/nltk/stem/snowball.py | Python | mit | 145,854 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.util import Qt
import acq4.pyqtgraph as pg
from .CanvasItem import CanvasItem
from .itemtypes import registerItemType
class GridCanvasItem(CanvasItem):
_typeName = "Grid"
def __init__(self, **kwds):
kwds.pop('viewRect', None)
item = pg.GridItem()
CanvasItem.__init__(self, item, **kwds)
registerItemType(GridCanvasItem)
class RulerCanvasItem(CanvasItem):
_typeName = "Ruler"
def __init__(self, points=None, **kwds):
vr = kwds.pop('viewRect', None)
if points is None:
if vr is None:
points = ((0, 0), (1, 1))
else:
p1 = vr.center()
p2 = p1 + 0.2 * (vr.topRight()-p1)
points = ((p1.x(), p1.y()), (p2.x(), p2.y()))
item = pg.graphicsItems.ROI.RulerROI(points)
CanvasItem.__init__(self, item, **kwds)
registerItemType(RulerCanvasItem)
class SvgCanvasItem(CanvasItem):
_typeName = "SVG"
def __init__(self, handle, **opts):
opts['handle'] = handle
item = Qt.QGraphicsSvgItem(handle.name())
CanvasItem.__init__(self, item, **opts)
@classmethod
def checkFile(cls, fh):
if fh.isFile() and fh.ext() == '.svg':
return 100
else:
return 0
registerItemType(SvgCanvasItem)
| pbmanis/acq4 | acq4/util/Canvas/items/simpleitems.py | Python | mit | 1,421 |
"""
Django settings for kboard project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fbk#a_$7&@566onvmd1xfxyszz)npb+d5gq#y9q(n0wg_k)v0x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'accounts.Account'
# Application definition
INSTALLED_APPS = [
'accounts',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'board',
'django_summernote',
'djangobower',
'pipeline',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'core.context_processors.navbar'
],
},
},
]
WSGI_APPLICATION = 'kboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../database/db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'kboard',
'USER': 'root',
'PASSWORD': 'root'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Bower settings
BOWER_INSTALLED_APPS = [
'jquery#3.1.1',
'bootstrap#3.3.7'
]
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, '../')
# Summernote settings
SUMMERNOTE_CONFIG = {}
# pipeline settings
PIPELINE = {
'COMPILERS': {
'libsasscompiler.LibSassCompiler',
},
'JAVASCRIPT': {
'main': {
'source_filenames': [
'js/*.js'
],
'output_filename': 'js/vendor.js'
},
},
'STYLESHEETS': {
'main': {
'source_filenames': [
'style/*.scss'
],
'output_filename': 'style/main.css'
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '../static')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
os.path.join(BOWER_COMPONENTS_ROOT, 'bower_components'),
]
MEDIA_URL = '/file/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'file')
# Registration
# https://django-registration.readthedocs.io/en/2.1.2/index.html
ACCOUNT_ACTIVATION_DAYS = 7
# Email Activation
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('KBOARD_EMAIL')
EMAIL_HOST_PASSWORD = os.environ.get('KBOARD_PASSWORD')
# When Login success, go to main page.
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
| kboard/kboard | kboard/kboard/settings.py | Python | mit | 5,166 |
"""Test for the once bitten strategy."""
import random
import axelrod
from .test_player import TestPlayer
C, D = 'C', 'D'
class TestOnceBitten(TestPlayer):
name = "Once Bitten"
player = axelrod.OnceBitten
expected_classifier = {
'memory_depth': 12,
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_initial_strategy(self):
"""Starts by cooperating."""
P1 = axelrod.OnceBitten()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), C)
def test_strategy(self):
"""If opponent defects at any point then the player will defect
forever."""
P1 = axelrod.OnceBitten()
P2 = axelrod.Player()
# Starts by playing C
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.grudged, False)
P2.history.append(C)
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.grudged, False)
P2.history.append(C)
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.grudged, False)
P2.history.append(D)
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.grudged, False)
P2.history.append(D)
self.assertEqual(P2.history, [C, C, D, D])
self.assertEqual(P1.strategy(P2), D)
self.assertEqual(P1.grudged, True)
for turn in range(P1.mem_length-1):
self.assertEqual(P1.strategy(P2), D)
# Doesn't matter what opponent plays now
P2.history.append(C)
self.assertEqual(P1.grudged, True)
P2.history.append(D)
self.assertEqual(P1.grudged, True)
self.assertEqual(P1.strategy(P2), D)
self.assertEqual(P1.grudge_memory, 10)
self.assertEqual(P1.grudged, True)
P2.history.append(C)
def test_reset(self):
"""Check that grudged gets reset properly"""
P1 = self.player()
P1.history = [C, D]
P2 = axelrod.Player()
P2.history = [D, D]
self.assertEqual(P1.strategy(P2), D)
self.assertTrue(P1.grudged)
P1.reset()
self.assertFalse(P1.grudged)
self.assertEqual(P1.history, [])
class TestFoolMeOnce(TestPlayer):
name = "Fool Me Once"
player = axelrod.FoolMeOnce
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_initial(self):
self.first_play_test(C)
def test_strategy(self):
"""
If opponent defects more than once, defect forever
"""
self.responses_test([C], [D], [C])
self.responses_test([C, C], [D, D], [D])
self.responses_test([C, C], [D, C], [C])
self.responses_test([C, C, C], [D, D, D], [D])
class TestForgetfulFoolMeOnce(TestPlayer):
name = 'Forgetful Fool Me Once'
player = axelrod.ForgetfulFoolMeOnce
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': True,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_initial(self):
self.first_play_test(C)
def test_strategy(self):
"""Test that will forgive one D but will grudge after 2 Ds, randomly
forgets count"""
random.seed(2)
self.responses_test([C], [D], [C])
self.responses_test([C, C], [D, D], [D])
# Sometime eventually forget count:
self.responses_test([C, C], [D, D], [D] * 13 + [C])
def test_reset(self):
"""Check that count gets reset properly"""
P1 = self.player()
P1.history = [C, D]
P2 = axelrod.Player()
P2.history = [D]
random.seed(1)
self.assertEqual(P1.strategy(P2), C)
self.assertEqual(P1.D_count, 1)
P1.reset()
self.assertEqual(P1.D_count, 0)
self.assertEqual(P1.history, [])
class TestFoolMeForever(TestPlayer):
name = "Fool Me Forever"
player = axelrod.FoolMeForever
expected_classifier = {
'memory_depth': float('inf'), # Long memory
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""
If opponent defects more than once, defect forever
"""
self.responses_test([], [], [D])
self.responses_test([D], [D], [C])
self.responses_test([D], [C], [D])
self.responses_test([D, C], [D, C], [C])
self.responses_test([D, C, C], [D, C, C], [C])
| emmagordon/Axelrod | axelrod/tests/unit/test_oncebitten.py | Python | mit | 4,785 |
#!./env/bin/python
""" Windows Registry Network Query
Lists the network name and MAC addresses of the networks that
this computer has connected to. If the location command is given
print the coordinates of the network if they are in the wigile
datebase
Don't be a moron, please don't use this for something illegal.
Usage:
windows_registry.py
windows_registry.py location <username> <password>
windows_registry.py -h | --help
windows_registry.py --version
Options:
-h, --help Display this message
--version Display the version of this program
"""
import mechanize
import urllib
import re
from _winreg import OpenKey, EnumKey, EnumValue, HKEY_LOCAL_MACHINE, CloseKey
from docopt import docopt
def binary2mac(binary):
address = ""
for char in binary:
address += ("%02x " % ord(char))
address = address.strip(" ").replace(" ", ":")[0:17]
return address
def wigle_print(username, password, netid):
browser = mechanize.Browser()
browser.open('http://wigle.net')
reqData = urllib.urlencode({'credential_0': username,
'credential_1': password})
browser.open('https://wigle.net//gps/gps/main/login', reqData)
params = {}
params['netid'] = netid
reqParams = urllib.urlencode(params)
respURL = 'http://wigle.net/gps/gps/main/confirmquery/'
resp = browser.open(respURL, reqParams).read()
mapLat = 'N/A'
mapLon = 'N/A'
rLat = re.findall(r'maplat=.*\&', resp)
if rLat:
mapLat = rLat[0].split('&')[0].split('=')[1]
rLon = re.findall(r'maplon=.*\&', resp)
if rLon:
mapLon = rLon[0].split
print '[-] Lat: ' + mapLat + ', Lon: ' + mapLon
def print_networks(username=None, password=None):
net = "SOFTWARE\Microsoft\Windows NT\CurrentVersion\NetworkList\Signatures\Unmanaged"
key = OpenKey(HKEY_LOCAL_MACHINE, net)
print '\n[*] Networks You have Joined.'
for i in range(100):
try:
guid = EnumKey(key, i)
netKey = OpenKey(key, str(guid))
(n, addr, t) = EnumValue(netKey, 5)
(n, name, t) = EnumValue(netKey, 4)
mac = binary2mac(addr)
net_name = str(name)
print '[+] ' + net_name + ' ' + mac
wigle_print(username, password, mac)
CloseKey(netKey)
except:
break
def main():
arguments = docopt(__doc__, version=0.1)
if arguments['location']:
print_networks(username=arguments['username'], password=arguments['password'])
else:
print_networks()
if __name__ == '__main__':
main()
| JackStouffer/Violent-Python | forensic/windows_registry.py | Python | mit | 2,693 |
# Usage: mitmdump -s "iframe_injector.py url"
# (this script works best with --anticache)
import sys
from bs4 import BeautifulSoup
class Injector:
def __init__(self, iframe_url):
self.iframe_url = iframe_url
def response(self, flow):
if flow.request.host in self.iframe_url:
return
html = BeautifulSoup(flow.response.content, "html.parser")
if html.body:
iframe = html.new_tag(
"iframe",
src=self.iframe_url,
frameborder=0,
height=0,
width=0)
html.body.insert(0, iframe)
flow.response.content = str(html).encode("utf8")
def start():
if len(sys.argv) != 2:
raise ValueError('Usage: -s "iframe_injector.py url"')
return Injector(sys.argv[1])
| mosajjal/mitmproxy | examples/simple/modify_body_inject_iframe.py | Python | mit | 829 |
#!/usr/bin/env python
import re
class Templates:
TOKENS = re.compile('([A-Za-z]+|[^ ])')
SIMPLE = {
'l': '_n.l.ptb()',
'r': '_n.r.ptb()',
'<': 'addr(_n)',
'>': 'addl(_n)',
}
def compile(self, template):
python = self.parse(self.TOKENS.findall(template))
return eval("lambda _n: %s" % python)
def parse(self, tokens):
t = tokens.pop(0)
if t in '([':
if t == '(':
label = "'%s'" % tokens.pop(0)
args = self.parse_args(tokens, ')')
elif s[0] == '[':
label = 'None'
args = self.parse_args(tokens, ']')
return 'PTB(_n, %s, %s)' % (label, ', '.join(args))
elif t in self.SIMPLE:
return self.SIMPLE[t]
else:
raise SyntaxError, "unknown token '%s'" % t
def parse_args(self, tokens, delimiter):
args = []
while tokens:
if tokens[0] == delimiter:
tokens.pop(0)
return args
args.append(self.parse(tokens))
raise SyntaxError, "missing closing '%s'" % delimiter
templates = Templates()
t = templates.compile("<")
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/candc/src/scripts/ccg/template.py | Python | mit | 1,061 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_backend_service
description:
- A Backend Service defines a group of virtual machines that will serve traffic for
load balancing. This resource is a global backend service, appropriate for external
load balancing or self-managed internal load balancing.
- For managed internal load balancing, use a regional backend service instead.
- Currently self-managed internal load balancing is only available in beta.
short_description: Creates a GCP BackendService
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
affinity_cookie_ttl_sec:
description:
- Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set
to 0, the cookie is non-persistent and lasts only until the end of the browser
session (or equivalent). The maximum allowed value for TTL is one day.
- When the load balancing scheme is INTERNAL, this field is not used.
required: false
type: int
backends:
description:
- The set of backends that serve this BackendService.
required: false
type: list
suboptions:
balancing_mode:
description:
- Specifies the balancing mode for this backend.
- For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION.
Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).
- 'Some valid choices include: "UTILIZATION", "RATE", "CONNECTION"'
required: false
default: UTILIZATION
type: str
capacity_scaler:
description:
- A multiplier applied to the group's maximum servicing capacity (based on
UTILIZATION, RATE or CONNECTION).
- Default value is 1, which means the group will serve up to 100% of its configured
capacity (depending on balancingMode). A setting of 0 means the group is
completely drained, offering 0% of its available Capacity. Valid range is
[0.0,1.0].
required: false
default: '1.0'
type: str
description:
description:
- An optional description of this resource.
- Provide this property when you create the resource.
required: false
type: str
group:
description:
- The fully-qualified URL of an Instance Group or Network Endpoint Group resource.
In case of instance group this defines the list of instances that serve
traffic. Member virtual machine instances from each instance group must
live in the same zone as the instance group itself. No two backends in a
backend service are allowed to use same Instance Group resource.
- For Network Endpoint Groups this defines list of endpoints. All endpoints
of Network Endpoint Group must be hosted on instances located in the same
zone as the Network Endpoint Group.
- Backend service can not contain mix of Instance Group and Network Endpoint
Group backends.
- Note that you must specify an Instance Group or Network Endpoint Group resource
using the fully-qualified URL, rather than a partial URL.
required: false
type: str
max_connections:
description:
- The max number of simultaneous connections for the group. Can be used with
either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance
or maxConnectionsPerEndpoint, as appropriate for group type, must be set.
required: false
type: int
max_connections_per_instance:
description:
- The max number of simultaneous connections that a single backend instance
can handle. This is used to calculate the capacity of the group. Can be
used in either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerInstance
must be set.
required: false
type: int
max_connections_per_endpoint:
description:
- The max number of simultaneous connections that a single backend network
endpoint can handle. This is used to calculate the capacity of the group.
Can be used in either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint
must be set.
required: false
type: int
version_added: '2.9'
max_rate:
description:
- The max requests per second (RPS) of the group.
- Can be used with either RATE or UTILIZATION balancing modes, but required
if RATE mode. For RATE mode, either maxRate or one of maxRatePerInstance
or maxRatePerEndpoint, as appropriate for group type, must be set.
required: false
type: int
max_rate_per_instance:
description:
- The max requests per second (RPS) that a single backend instance can handle.
This is used to calculate the capacity of the group. Can be used in either
balancing mode. For RATE mode, either maxRate or maxRatePerInstance must
be set.
required: false
type: str
max_rate_per_endpoint:
description:
- The max requests per second (RPS) that a single backend network endpoint
can handle. This is used to calculate the capacity of the group. Can be
used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint
must be set.
required: false
type: str
version_added: '2.9'
max_utilization:
description:
- Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization
target for the group. The default is 0.8. Valid range is [0.0, 1.0].
required: false
default: '0.8'
type: str
cdn_policy:
description:
- Cloud CDN configuration for this BackendService.
required: false
type: dict
suboptions:
cache_key_policy:
description:
- The CacheKeyPolicy for this CdnPolicy.
required: false
type: dict
suboptions:
include_host:
description:
- If true requests to different hosts will be cached separately.
required: false
type: bool
include_protocol:
description:
- If true, http and https requests will be cached separately.
required: false
type: bool
include_query_string:
description:
- If true, include query string parameters in the cache key according
to query_string_whitelist and query_string_blacklist. If neither is
set, the entire query string will be included.
- If false, the query string will be excluded from the cache key entirely.
required: false
type: bool
query_string_blacklist:
description:
- Names of query string parameters to exclude in cache keys.
- All other parameters will be included. Either specify query_string_whitelist
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
required: false
type: list
query_string_whitelist:
description:
- Names of query string parameters to include in cache keys.
- All other parameters will be excluded. Either specify query_string_whitelist
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
required: false
type: list
signed_url_cache_max_age_sec:
description:
- Maximum number of seconds the response to a signed URL request will be considered
fresh, defaults to 1hr (3600s). After this time period, the response will
be revalidated before being served.
- 'When serving responses to signed URL requests, Cloud CDN will internally
behave as though all responses from this backend had a "Cache-Control: public,
max-age=[TTL]" header, regardless of any existing Cache-Control header.
The actual headers served in responses will not be altered.'
required: false
default: '3600'
type: int
version_added: '2.8'
connection_draining:
description:
- Settings for connection draining .
required: false
type: dict
suboptions:
draining_timeout_sec:
description:
- Time for which instance will be drained (not accept new connections, but
still work to finish started).
required: false
default: '300'
type: int
description:
description:
- An optional description of this resource.
required: false
type: str
enable_cdn:
description:
- If true, enable Cloud CDN for this BackendService.
required: false
type: bool
health_checks:
description:
- The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health
checking this BackendService. Currently at most one health check can be specified,
and a health check is required.
- For internal load balancing, a URL to a HealthCheck resource must be specified
instead.
required: true
type: list
iap:
description:
- Settings for enabling Cloud Identity Aware Proxy.
required: false
type: dict
version_added: '2.7'
suboptions:
enabled:
description:
- Enables IAP.
required: false
type: bool
oauth2_client_id:
description:
- OAuth2 Client ID for IAP .
required: true
type: str
oauth2_client_secret:
description:
- OAuth2 Client Secret for IAP .
required: true
type: str
load_balancing_scheme:
description:
- Indicates whether the backend service will be used with internal or external
load balancing. A backend service created for one type of load balancing cannot
be used with the other. Must be `EXTERNAL` or `INTERNAL_SELF_MANAGED` for a
global backend service. Defaults to `EXTERNAL`.
- 'Some valid choices include: "EXTERNAL", "INTERNAL_SELF_MANAGED"'
required: false
default: EXTERNAL
type: str
version_added: '2.7'
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
port_name:
description:
- Name of backend port. The same name should appear in the instance groups referenced
by this service. Required when the load balancing scheme is EXTERNAL.
required: false
type: str
protocol:
description:
- The protocol this BackendService uses to communicate with backends.
- 'Possible values are HTTP, HTTPS, HTTP2, TCP, and SSL. The default is HTTP.
**NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer types and may result
in errors if used with the GA API.'
- 'Some valid choices include: "HTTP", "HTTPS", "HTTP2", "TCP", "SSL"'
required: false
type: str
security_policy:
description:
- The security policy associated with this backend service.
required: false
type: str
version_added: '2.8'
session_affinity:
description:
- Type of session affinity to use. The default is NONE. Session affinity is not
applicable if the protocol is UDP.
- 'Some valid choices include: "NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO",
"GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE"'
required: false
type: str
timeout_sec:
description:
- How many seconds to wait for the backend before considering it a failed request.
Default is 30 seconds. Valid range is [1, 86400].
required: false
type: int
aliases:
- timeout_seconds
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/backendServices)'
- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/http/backend-service)'
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a instance group
gcp_compute_instance_group:
name: instancegroup-backendservice
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instancegroup
- name: create a HTTP health check
gcp_compute_http_health_check:
name: httphealthcheck-backendservice
healthy_threshold: 10
port: 8080
timeout_sec: 2
unhealthy_threshold: 5
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: healthcheck
- name: create a backend service
gcp_compute_backend_service:
name: test_object
backends:
- group: "{{ instancegroup.selfLink }}"
health_checks:
- "{{ healthcheck.selfLink }}"
enable_cdn: 'true'
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
affinityCookieTtlSec:
description:
- Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set
to 0, the cookie is non-persistent and lasts only until the end of the browser
session (or equivalent). The maximum allowed value for TTL is one day.
- When the load balancing scheme is INTERNAL, this field is not used.
returned: success
type: int
backends:
description:
- The set of backends that serve this BackendService.
returned: success
type: complex
contains:
balancingMode:
description:
- Specifies the balancing mode for this backend.
- For global HTTP(S) or TCP/SSL load balancing, the default is UTILIZATION.
Valid values are UTILIZATION, RATE (for HTTP(S)) and CONNECTION (for TCP/SSL).
returned: success
type: str
capacityScaler:
description:
- A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION,
RATE or CONNECTION).
- Default value is 1, which means the group will serve up to 100% of its configured
capacity (depending on balancingMode). A setting of 0 means the group is completely
drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].
returned: success
type: str
description:
description:
- An optional description of this resource.
- Provide this property when you create the resource.
returned: success
type: str
group:
description:
- The fully-qualified URL of an Instance Group or Network Endpoint Group resource.
In case of instance group this defines the list of instances that serve traffic.
Member virtual machine instances from each instance group must live in the
same zone as the instance group itself. No two backends in a backend service
are allowed to use same Instance Group resource.
- For Network Endpoint Groups this defines list of endpoints. All endpoints
of Network Endpoint Group must be hosted on instances located in the same
zone as the Network Endpoint Group.
- Backend service can not contain mix of Instance Group and Network Endpoint
Group backends.
- Note that you must specify an Instance Group or Network Endpoint Group resource
using the fully-qualified URL, rather than a partial URL.
returned: success
type: str
maxConnections:
description:
- The max number of simultaneous connections for the group. Can be used with
either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or one of maxConnectionsPerInstance
or maxConnectionsPerEndpoint, as appropriate for group type, must be set.
returned: success
type: int
maxConnectionsPerInstance:
description:
- The max number of simultaneous connections that a single backend instance
can handle. This is used to calculate the capacity of the group. Can be used
in either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must
be set.
returned: success
type: int
maxConnectionsPerEndpoint:
description:
- The max number of simultaneous connections that a single backend network endpoint
can handle. This is used to calculate the capacity of the group. Can be used
in either CONNECTION or UTILIZATION balancing modes.
- For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must
be set.
returned: success
type: int
maxRate:
description:
- The max requests per second (RPS) of the group.
- Can be used with either RATE or UTILIZATION balancing modes, but required
if RATE mode. For RATE mode, either maxRate or one of maxRatePerInstance or
maxRatePerEndpoint, as appropriate for group type, must be set.
returned: success
type: int
maxRatePerInstance:
description:
- The max requests per second (RPS) that a single backend instance can handle.
This is used to calculate the capacity of the group. Can be used in either
balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be
set.
returned: success
type: str
maxRatePerEndpoint:
description:
- The max requests per second (RPS) that a single backend network endpoint can
handle. This is used to calculate the capacity of the group. Can be used in
either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint
must be set.
returned: success
type: str
maxUtilization:
description:
- Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization
target for the group. The default is 0.8. Valid range is [0.0, 1.0].
returned: success
type: str
cdnPolicy:
description:
- Cloud CDN configuration for this BackendService.
returned: success
type: complex
contains:
cacheKeyPolicy:
description:
- The CacheKeyPolicy for this CdnPolicy.
returned: success
type: complex
contains:
includeHost:
description:
- If true requests to different hosts will be cached separately.
returned: success
type: bool
includeProtocol:
description:
- If true, http and https requests will be cached separately.
returned: success
type: bool
includeQueryString:
description:
- If true, include query string parameters in the cache key according to
query_string_whitelist and query_string_blacklist. If neither is set,
the entire query string will be included.
- If false, the query string will be excluded from the cache key entirely.
returned: success
type: bool
queryStringBlacklist:
description:
- Names of query string parameters to exclude in cache keys.
- All other parameters will be included. Either specify query_string_whitelist
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
returned: success
type: list
queryStringWhitelist:
description:
- Names of query string parameters to include in cache keys.
- All other parameters will be excluded. Either specify query_string_whitelist
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
returned: success
type: list
signedUrlCacheMaxAgeSec:
description:
- Maximum number of seconds the response to a signed URL request will be considered
fresh, defaults to 1hr (3600s). After this time period, the response will
be revalidated before being served.
- 'When serving responses to signed URL requests, Cloud CDN will internally
behave as though all responses from this backend had a "Cache-Control: public,
max-age=[TTL]" header, regardless of any existing Cache-Control header. The
actual headers served in responses will not be altered.'
returned: success
type: int
connectionDraining:
description:
- Settings for connection draining .
returned: success
type: complex
contains:
drainingTimeoutSec:
description:
- Time for which instance will be drained (not accept new connections, but still
work to finish started).
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
fingerprint:
description:
- Fingerprint of this resource. A hash of the contents stored in this object. This
field is used in optimistic locking.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
enableCDN:
description:
- If true, enable Cloud CDN for this BackendService.
returned: success
type: bool
healthChecks:
description:
- The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health
checking this BackendService. Currently at most one health check can be specified,
and a health check is required.
- For internal load balancing, a URL to a HealthCheck resource must be specified
instead.
returned: success
type: list
id:
description:
- The unique identifier for the resource.
returned: success
type: int
iap:
description:
- Settings for enabling Cloud Identity Aware Proxy.
returned: success
type: complex
contains:
enabled:
description:
- Enables IAP.
returned: success
type: bool
oauth2ClientId:
description:
- OAuth2 Client ID for IAP .
returned: success
type: str
oauth2ClientSecret:
description:
- OAuth2 Client Secret for IAP .
returned: success
type: str
oauth2ClientSecretSha256:
description:
- OAuth2 Client Secret SHA-256 for IAP .
returned: success
type: str
loadBalancingScheme:
description:
- Indicates whether the backend service will be used with internal or external load
balancing. A backend service created for one type of load balancing cannot be
used with the other. Must be `EXTERNAL` or `INTERNAL_SELF_MANAGED` for a global
backend service. Defaults to `EXTERNAL`.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
portName:
description:
- Name of backend port. The same name should appear in the instance groups referenced
by this service. Required when the load balancing scheme is EXTERNAL.
returned: success
type: str
protocol:
description:
- The protocol this BackendService uses to communicate with backends.
- 'Possible values are HTTP, HTTPS, HTTP2, TCP, and SSL. The default is HTTP. **NOTE**:
HTTP2 is only valid for beta HTTP/2 load balancer types and may result in errors
if used with the GA API.'
returned: success
type: str
securityPolicy:
description:
- The security policy associated with this backend service.
returned: success
type: str
sessionAffinity:
description:
- Type of session affinity to use. The default is NONE. Session affinity is not
applicable if the protocol is UDP.
returned: success
type: str
timeoutSec:
description:
- How many seconds to wait for the backend before considering it a failed request.
Default is 30 seconds. Valid range is [1, 86400].
returned: success
type: int
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
affinity_cookie_ttl_sec=dict(type='int'),
backends=dict(
type='list',
elements='dict',
options=dict(
balancing_mode=dict(default='UTILIZATION', type='str'),
capacity_scaler=dict(default=1.0, type='str'),
description=dict(type='str'),
group=dict(type='str'),
max_connections=dict(type='int'),
max_connections_per_instance=dict(type='int'),
max_connections_per_endpoint=dict(type='int'),
max_rate=dict(type='int'),
max_rate_per_instance=dict(type='str'),
max_rate_per_endpoint=dict(type='str'),
max_utilization=dict(default=0.8, type='str'),
),
),
cdn_policy=dict(
type='dict',
options=dict(
cache_key_policy=dict(
type='dict',
options=dict(
include_host=dict(type='bool'),
include_protocol=dict(type='bool'),
include_query_string=dict(type='bool'),
query_string_blacklist=dict(type='list', elements='str'),
query_string_whitelist=dict(type='list', elements='str'),
),
),
signed_url_cache_max_age_sec=dict(default=3600, type='int'),
),
),
connection_draining=dict(type='dict', options=dict(draining_timeout_sec=dict(default=300, type='int'))),
description=dict(type='str'),
enable_cdn=dict(type='bool'),
health_checks=dict(required=True, type='list', elements='str'),
iap=dict(
type='dict',
options=dict(enabled=dict(type='bool'), oauth2_client_id=dict(required=True, type='str'), oauth2_client_secret=dict(required=True, type='str')),
),
load_balancing_scheme=dict(default='EXTERNAL', type='str'),
name=dict(required=True, type='str'),
port_name=dict(type='str'),
protocol=dict(type='str'),
security_policy=dict(type='str'),
session_affinity=dict(type='str'),
timeout_sec=dict(type='int', aliases=['timeout_seconds']),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#backendService'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def update_fields(module, request, response):
if response.get('securityPolicy') != request.get('securityPolicy'):
security_policy_update(module, request, response)
def security_policy_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/backendServices/{name}/setSecurityPolicy"]).format(**module.params),
{u'securityPolicy': module.params.get('security_policy')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#backendService',
u'affinityCookieTtlSec': module.params.get('affinity_cookie_ttl_sec'),
u'backends': BackendServiceBackendsArray(module.params.get('backends', []), module).to_request(),
u'cdnPolicy': BackendServiceCdnpolicy(module.params.get('cdn_policy', {}), module).to_request(),
u'connectionDraining': BackendServiceConnectiondraining(module.params.get('connection_draining', {}), module).to_request(),
u'description': module.params.get('description'),
u'enableCDN': module.params.get('enable_cdn'),
u'healthChecks': module.params.get('health_checks'),
u'iap': BackendServiceIap(module.params.get('iap', {}), module).to_request(),
u'loadBalancingScheme': module.params.get('load_balancing_scheme'),
u'name': module.params.get('name'),
u'portName': module.params.get('port_name'),
u'protocol': module.params.get('protocol'),
u'securityPolicy': module.params.get('security_policy'),
u'sessionAffinity': module.params.get('session_affinity'),
u'timeoutSec': module.params.get('timeout_sec'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'affinityCookieTtlSec': response.get(u'affinityCookieTtlSec'),
u'backends': BackendServiceBackendsArray(response.get(u'backends', []), module).from_response(),
u'cdnPolicy': BackendServiceCdnpolicy(response.get(u'cdnPolicy', {}), module).from_response(),
u'connectionDraining': BackendServiceConnectiondraining(response.get(u'connectionDraining', {}), module).from_response(),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'fingerprint': response.get(u'fingerprint'),
u'description': response.get(u'description'),
u'enableCDN': response.get(u'enableCDN'),
u'healthChecks': response.get(u'healthChecks'),
u'id': response.get(u'id'),
u'iap': BackendServiceIap(response.get(u'iap', {}), module).from_response(),
u'loadBalancingScheme': module.params.get('load_balancing_scheme'),
u'name': module.params.get('name'),
u'portName': response.get(u'portName'),
u'protocol': response.get(u'protocol'),
u'securityPolicy': response.get(u'securityPolicy'),
u'sessionAffinity': response.get(u'sessionAffinity'),
u'timeoutSec': response.get(u'timeoutSec'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#backendService')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class BackendServiceBackendsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'balancingMode': item.get('balancing_mode'),
u'capacityScaler': item.get('capacity_scaler'),
u'description': item.get('description'),
u'group': item.get('group'),
u'maxConnections': item.get('max_connections'),
u'maxConnectionsPerInstance': item.get('max_connections_per_instance'),
u'maxConnectionsPerEndpoint': item.get('max_connections_per_endpoint'),
u'maxRate': item.get('max_rate'),
u'maxRatePerInstance': item.get('max_rate_per_instance'),
u'maxRatePerEndpoint': item.get('max_rate_per_endpoint'),
u'maxUtilization': item.get('max_utilization'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'balancingMode': item.get(u'balancingMode'),
u'capacityScaler': item.get(u'capacityScaler'),
u'description': item.get(u'description'),
u'group': item.get(u'group'),
u'maxConnections': item.get(u'maxConnections'),
u'maxConnectionsPerInstance': item.get(u'maxConnectionsPerInstance'),
u'maxConnectionsPerEndpoint': item.get(u'maxConnectionsPerEndpoint'),
u'maxRate': item.get(u'maxRate'),
u'maxRatePerInstance': item.get(u'maxRatePerInstance'),
u'maxRatePerEndpoint': item.get(u'maxRatePerEndpoint'),
u'maxUtilization': item.get(u'maxUtilization'),
}
)
class BackendServiceCdnpolicy(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'cacheKeyPolicy': BackendServiceCachekeypolicy(self.request.get('cache_key_policy', {}), self.module).to_request(),
u'signedUrlCacheMaxAgeSec': self.request.get('signed_url_cache_max_age_sec'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'cacheKeyPolicy': BackendServiceCachekeypolicy(self.request.get(u'cacheKeyPolicy', {}), self.module).from_response(),
u'signedUrlCacheMaxAgeSec': self.request.get(u'signedUrlCacheMaxAgeSec'),
}
)
class BackendServiceCachekeypolicy(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'includeHost': self.request.get('include_host'),
u'includeProtocol': self.request.get('include_protocol'),
u'includeQueryString': self.request.get('include_query_string'),
u'queryStringBlacklist': self.request.get('query_string_blacklist'),
u'queryStringWhitelist': self.request.get('query_string_whitelist'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'includeHost': self.request.get(u'includeHost'),
u'includeProtocol': self.request.get(u'includeProtocol'),
u'includeQueryString': self.request.get(u'includeQueryString'),
u'queryStringBlacklist': self.request.get(u'queryStringBlacklist'),
u'queryStringWhitelist': self.request.get(u'queryStringWhitelist'),
}
)
class BackendServiceConnectiondraining(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'drainingTimeoutSec': self.request.get('draining_timeout_sec')})
def from_response(self):
return remove_nones_from_dict({u'drainingTimeoutSec': self.request.get(u'drainingTimeoutSec')})
class BackendServiceIap(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'enabled': self.request.get('enabled'),
u'oauth2ClientId': self.request.get('oauth2_client_id'),
u'oauth2ClientSecret': self.request.get('oauth2_client_secret'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'enabled': self.request.get(u'enabled'),
u'oauth2ClientId': self.request.get(u'oauth2ClientId'),
u'oauth2ClientSecret': self.request.get(u'oauth2ClientSecret'),
}
)
if __name__ == '__main__':
main()
| thaim/ansible | lib/ansible/modules/cloud/google/gcp_compute_backend_service.py | Python | mit | 44,362 |
#!/usr/bin/env python
import os
from watermark.config import config as conf
from watermark import connect
config_name = os.getenv('WM_CONFIG_ENV') or 'default'
config = conf[config_name]()
conn = connect.get_connection(config)
conn.message.create_queue(name=config.NAME)
print("{name} queue created".format(name=config.NAME))
| danabauer/app-on-openstack | code/worker/deploy.py | Python | mit | 331 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "non-BIP68-final (code 64)"
class BIP68Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[], ["-acceptnonstdtxn=0"]]
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))["hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(ToHex(tx))
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(tx2.hash, int(-self.relayfee*COIN))
cur_time = self.mocktime
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(tx2.hash, int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(self.mocktime)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1), self.mocktime + 600)
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
# getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block.
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert_greater_than(min_activation_height - height, 2)
self.nodes[0].generate(min_activation_height - height - 2)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in")
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active")
self.sync_blocks()
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))["hex"]
self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
| dashpay/dash | test/functional/feature_bip68_sequence.py | Python | mit | 17,835 |
import unittest
import testRObject
import testVector
import testArray
import testDataFrame
import testFormula
import testFunction
import testEnvironment
import testRobjects
import testMethods
import testPackages
import testHelp
import testLanguage
# wrap this nicely so a warning is issued if no numpy present
import testNumpyConversions
def suite():
suite_RObject = testRObject.suite()
suite_Vector = testVector.suite()
suite_Array = testArray.suite()
suite_DataFrame = testDataFrame.suite()
suite_Function = testFunction.suite()
suite_Environment = testEnvironment.suite()
suite_Formula = testFormula.suite()
suite_Robjects = testRobjects.suite()
suite_NumpyConversions = testNumpyConversions.suite()
suite_Methods = testMethods.suite()
suite_Packages = testPackages.suite()
suite_Help = testHelp.suite()
suite_Language = testLanguage.suite()
alltests = unittest.TestSuite([suite_RObject,
suite_Vector,
suite_Array,
suite_DataFrame,
suite_Function,
suite_Environment,
suite_Formula,
suite_Robjects,
suite_Methods,
suite_NumpyConversions,
suite_Packages,
suite_Help,
suite_Language
])
return alltests
def main():
r = unittest.TestResult()
suite().run(r)
return r
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
suite = suite()
tr.run(suite)
| welltempered/rpy2-heroku | rpy/robjects/tests/__init__.py | Python | gpl-2.0 | 1,832 |
# Copyright 2020 Red Hat, Inc. Jake Hunsaker <[email protected]>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.cleaner.archives import SoSObfuscationArchive
import os
import tarfile
class DataDirArchive(SoSObfuscationArchive):
"""A plain directory on the filesystem that is not directly associated with
any known or supported collection utility
"""
type_name = 'data_dir'
description = 'unassociated directory'
@classmethod
def check_is_type(cls, arc_path):
return os.path.isdir(arc_path)
def set_archive_root(self):
return os.path.abspath(self.archive_path)
class TarballArchive(SoSObfuscationArchive):
"""A generic tar archive that is not associated with any known or supported
collection utility
"""
type_name = 'tarball'
description = 'unassociated tarball'
@classmethod
def check_is_type(cls, arc_path):
try:
return tarfile.is_tarfile(arc_path)
except Exception:
return False
def set_archive_root(self):
if self.tarobj.firstmember.isdir():
return self.tarobj.firstmember.name
return ''
| slashdd/sos | sos/cleaner/archives/generic.py | Python | gpl-2.0 | 1,463 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2010 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
"""Payment Flow History Report Dialog"""
from storm.expr import And, Eq, Or
from stoqlib.database.expr import Date
from stoqlib.gui.dialogs.daterangedialog import DateRangeDialog
from stoqlib.gui.utils.printing import print_report
from stoqlib.lib.message import info
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.payment import PaymentFlowHistoryReport
_ = stoqlib_gettext
# A few comments for the payment_flow_query:
# - The first table in the FROM clause is the list of all possible dates
# (due_date and paid_date) in the results. This is done so that the subsequent
# subselect can be joined properly
# - In that same subselect, we use IS NOT NULL to avoid an empty row for
# payments that were not received yet.
# - We filter out statuses (0, 5) to not include PREVIEW and CANCELED payments
# - payment_type = 1 are OUT_PAYMENTS and 0 are IN_PAYMENTS
payment_flow_query = """
SELECT all_payment_dates.date,
COALESCE(payments_to_pay.count, 0) as to_pay_payments,
COALESCE(payments_to_pay.to_pay, 0) as to_pay,
COALESCE(payments_paid.count, 0) as paid_payments,
COALESCE(payments_paid.paid, 0) as paid,
COALESCE(payments_to_receive.count, 0) as to_receive_payments,
COALESCE(payments_to_receive.to_receive, 0) as to_receive,
COALESCE(payments_received.count, 0) as received_payments,
COALESCE(payments_received.received, 0) as received
FROM (SELECT date(due_date) as date FROM payment
UNION SELECT date(paid_date) as date FROM payment WHERE
paid_date IS NOT NULL) as all_payment_dates
-- To pay (out payments)
LEFT JOIN (SELECT DATE(due_date) as date, count(1) as count, sum(value) as to_pay
FROM payment WHERE payment_type = 'out' AND status not in ('preview', 'cancelled')
GROUP BY DATE(due_date))
AS payments_to_pay ON (all_payment_dates.date = payments_to_pay.date)
-- Paid (out payments)
LEFT JOIN (SELECT DATE(paid_date) as date, count(1) as count, sum(value) as paid
FROM payment WHERE payment_type = 'out'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(paid_date))
AS payments_paid ON (all_payment_dates.date = payments_paid.date)
-- To receive (in payments)
LEFT JOIN (SELECT DATE(due_date) as date, count(1) as count, sum(value) as to_receive
FROM payment WHERE payment_type = 'in'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(due_date))
AS payments_to_receive ON (all_payment_dates.date = payments_to_receive.date)
-- Received (in payments)
LEFT JOIN (SELECT DATE(paid_date) as date, count(1) as count, sum(value) as received
FROM payment WHERE payment_type = 'in'
AND payment.status not in ('preview', 'cancelled')
GROUP BY DATE(paid_date))
AS payments_received ON (all_payment_dates.date = payments_received.date)
ORDER BY all_payment_dates.date;
"""
class PaymentFlowDay(object):
def __init__(self, store, row, previous_day=None):
"""Payment Flow History for a given date
:param row: A list of values from the payment_flow_query above
:param previous_day: The `previous_day <PaymentFlowDay>`. This is used
to calculate the expected and real balances for each day (based on the
previous dates).
"""
(date, to_pay_count, to_pay, paid_count, paid, to_receive_count,
to_receive, received_count, received) = row
self.history_date = date
# values
self.to_pay = to_pay
self.to_receive = to_receive
self.paid = paid
self.received = received
# counts
self.to_pay_payments = to_pay_count
self.to_receive_payments = to_receive_count
self.paid_payments = paid_count
self.received_payments = received_count
if previous_day:
self.previous_balance = previous_day.balance_real
else:
self.previous_balance = 0
# Today's balance is the previous day balance, plus the payments we
# received, minus what we paid. expected if for the payments we should
# have paid/received
self.balance_expected = self.previous_balance + to_receive - to_pay
self.balance_real = self.previous_balance + received - paid
self.store = store
def get_divergent_payments(self):
"""Returns a :class:`Payment` sequence that meet the following requirements:
* The payment due date, paid date or cancel date is the current
PaymentFlowHistory date.
* The payment was paid/received with different values (eg with
discount or surcharge).
* The payment was scheduled to be paid/received on the current,
but it was not.
* The payment was not expected to be paid/received on the current date.
"""
from stoqlib.domain.payment.payment import Payment
date = self.history_date
query = And(Or(Date(Payment.due_date) == date,
Date(Payment.paid_date) == date,
Date(Payment.cancel_date) == date),
Or(Eq(Payment.paid_value, None),
Payment.value != Payment.paid_value,
Eq(Payment.paid_date, None),
Date(Payment.due_date) != Date(Payment.paid_date)))
return self.store.find(Payment, query)
@classmethod
def get_flow_history(cls, store, start, end):
"""Get the payment flow history for a given date interval
This will return a list of PaymentFlowDay, one for each date that has
payments registered and are in the interval specified.
"""
history = []
previous_entry = None
for row in store.execute(payment_flow_query).get_all():
entry = cls(store, row, previous_entry)
if entry.history_date > end:
break
# We only store entries for dates higher than the user requested, but
# we still need to create the entries from the beginning, so we
# have the real balances
if entry.history_date >= start:
history.append(entry)
previous_entry = entry
return history
class PaymentFlowHistoryDialog(DateRangeDialog):
title = _(u'Payment Flow History Dialog')
desc = _("Select a date or a range to be visualised in the report:")
size = (-1, -1)
def __init__(self, store):
"""A dialog to print the PaymentFlowHistoryReport report.
:param store: a store
"""
self.store = store
DateRangeDialog.__init__(self, title=self.title, header_text=self.desc)
#
# BasicDialog
#
def confirm(self):
DateRangeDialog.confirm(self)
start = self.retval.start
end = self.retval.end
results = PaymentFlowDay.get_flow_history(self.store, start, end)
if not results:
info(_('No payment history found.'))
return False
print_report(PaymentFlowHistoryReport, payment_histories=results)
return True
| andrebellafronte/stoq | stoqlib/gui/dialogs/paymentflowhistorydialog.py | Python | gpl-2.0 | 8,070 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""DocExtract REST and Web API
Exposes document extration facilities to the world
"""
from tempfile import NamedTemporaryFile
from invenio.webinterface_handler import WebInterfaceDirectory
from invenio.webuser import collect_user_info
from invenio.webpage import page
from invenio.config import CFG_TMPSHAREDDIR, CFG_ETCDIR
from invenio.refextract_api import extract_references_from_file_xml, \
extract_references_from_url_xml, \
extract_references_from_string_xml
from invenio.bibformat_engine import format_record
def check_login(req):
"""Check that the user is logged in"""
user_info = collect_user_info(req)
if user_info['email'] == 'guest':
# 1. User is guest: must login prior to upload
# return 'Please login before uploading file.'
pass
def check_url(url):
"""Check that the url we received is not gibberish"""
return url.startswith('http://') or \
url.startswith('https://') or \
url.startswith('ftp://')
def extract_from_pdf_string(pdf):
"""Extract references from a pdf stored in a string
Given a string representing a pdf, this function writes the string to
disk and passes it to refextract.
We need to create a temoporary file because we need to run pdf2text on it"""
# Save new record to file
tf = NamedTemporaryFile(prefix='docextract-pdf',
dir=CFG_TMPSHAREDDIR)
try:
tf.write(pdf)
tf.flush()
refs = extract_references_from_file_xml(tf.name)
finally:
# Also deletes the file
tf.close()
return refs
def make_arxiv_url(arxiv_id):
"""Make a url we can use to download a pdf from arxiv
Arguments:
arxiv_id -- the arxiv id of the record to link to
"""
return "http://arxiv.org/pdf/%s.pdf" % arxiv_id
class WebInterfaceAPIDocExtract(WebInterfaceDirectory):
"""DocExtract REST API"""
_exports = [
('extract-references-pdf', 'extract_references_pdf'),
('extract-references-pdf-url', 'extract_references_pdf_url'),
('extract-references-txt', 'extract_references_txt'),
]
def extract_references_pdf(self, req, form):
"""Extract references from uploaded pdf"""
check_login(req)
if 'pdf' not in form:
return 'No PDF file uploaded'
return extract_from_pdf_string(form['pdf'].stream.read())
def extract_references_pdf_url(self, req, form):
"""Extract references from the pdf pointed by the passed url"""
check_login(req)
if 'url' not in form:
return 'No URL specified'
url = form['url']
if not check_url(url):
return 'Invalid URL specified'
return extract_references_from_url_xml(url)
def extract_references_txt(self, req, form):
"""Extract references from plain text"""
check_login(req)
if 'txt' not in form:
return 'No text specified'
txt = form['txt'].stream.read()
return extract_references_from_string_xml(txt)
class WebInterfaceDocExtract(WebInterfaceDirectory):
"""DocExtract API"""
_exports = ['api',
('extract-references', 'extract_references'),
('example.pdf', 'example_pdf'),
]
api = WebInterfaceAPIDocExtract()
def example_pdf(self, req, _form):
"""Serve a test pdf for tests"""
f = open("%s/docextract/example.pdf" % CFG_ETCDIR, 'rb')
try:
req.write(f.read())
finally:
f.close()
def extract_references_template(self):
"""Template for reference extraction page"""
return """Please specify a pdf or a url or some references to parse
<form action="extract-references" method="post"
enctype="multipart/form-data">
<p>PDF: <input type="file" name="pdf" /></p>
<p>arXiv: <input type="text" name="arxiv" /></p>
<p>URL: <input type="text" name="url" style="width: 600px;"/></p>
<textarea name="txt" style="width: 500px; height: 500px;"></textarea>
<p><input type="submit" /></p>
</form>
"""
def extract_references(self, req, form):
"""Refrences extraction page
This page can be used for authors to test their pdfs against our
refrences extraction process"""
user_info = collect_user_info(req)
# Handle the 3 POST parameters
if 'pdf' in form and form['pdf']:
pdf = form['pdf']
references_xml = extract_from_pdf_string(pdf)
elif 'arxiv' in form and form['arxiv']:
url = make_arxiv_url(arxiv_id=form['arxiv'])
references_xml = extract_references_from_url_xml(url)
elif 'url' in form and form['url']:
url = form['url']
references_xml = extract_references_from_url_xml(url)
elif 'txt' in form and form['txt']:
txt = form['txt']
references_xml = extract_references_from_string_xml(txt)
else:
references_xml = None
# If we have not uploaded anything yet
# Display the form that allows us to do so
if not references_xml:
out = self.extract_references_template()
else:
out = """
<style type="text/css">
#referenceinp_link { display: none; }
</style>
"""
out += format_record(0,
'hdref',
xml_record=references_xml.encode('utf-8'),
user_info=user_info)
# Render the page (including header, footer)
return page(title='References Extractor',
body=out,
uid=user_info['uid'],
req=req)
| labordoc/labordoc-next | modules/docextract/lib/docextract_webinterface.py | Python | gpl-2.0 | 6,745 |
# -*- coding: utf-8 -*-
#
# Copyright © 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
pkgdb tests for the Collection object.
'''
__requires__ = ['SQLAlchemy >= 0.7']
import pkg_resources
import json
import unittest
import sys
import os
from mock import patch
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import pkgdb2
import pkgdb2.lib.model as model
from tests import (Modeltests, FakeFasUser,
FakeFasGroupValid, create_package_acl,
create_package_acl2, user_set)
class PkgdbGrouptests(Modeltests):
""" PkgdbGroup tests. """
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PkgdbGrouptests, self).setUp()
pkgdb2.APP.config['TESTING'] = True
pkgdb2.SESSION = self.session
pkgdb2.api.extras.SESSION = self.session
pkgdb2.ui.SESSION = self.session
pkgdb2.ui.acls.SESSION = self.session
pkgdb2.ui.admin.SESSION = self.session
pkgdb2.ui.collections.SESSION = self.session
pkgdb2.ui.packagers.SESSION = self.session
pkgdb2.ui.packages.SESSION = self.session
self.app = pkgdb2.APP.test_client()
# Let's make sure the cache is empty for the tests
pkgdb2.CACHE.invalidate()
def set_group_acls(self):
''' Create some Group ACLs. '''
fedocal_pkg = model.Package.by_name(self.session, 'rpms', 'fedocal')
devel_collec = model.Collection.by_name(self.session, 'master')
f18_collec = model.Collection.by_name(self.session, 'f18')
pklist_fedocal_f18 = model.PackageListing.by_pkgid_collectionid(
self.session, fedocal_pkg.id, f18_collec.id)
pklist_fedocal_devel = model.PackageListing.by_pkgid_collectionid(
self.session, fedocal_pkg.id, devel_collec.id)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_f18.id,
acl='commit',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_devel.id,
acl='commit',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_f18.id,
acl='watchbugzilla',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_devel.id,
acl='watchbugzilla',
status='Approved',
)
self.session.add(packager)
self.session.commit()
def test_api_bugzilla_group(self):
""" Test the api_bugzilla function. """
create_package_acl2(self.session)
self.set_group_acls()
output = self.app.get('/api/bugzilla/')
self.assertEqual(output.status_code, 200)
expected = """# Package Database VCS Acls
# Text Format
# Collection|Package|Description|Owner|Initial QA|Initial CCList
# Backslashes (\) are escaped as \u005c Pipes (|) are escaped as \u007c
Fedora|fedocal|A web-based calendar for Fedora|pingou||group::infra-sig,pingou
Fedora|geany|A fast and lightweight IDE using GTK2|group::gtk-sig||
Fedora|guake|Top down terminal for GNOME|pingou||spot"""
self.assertEqual(output.data, expected)
output = self.app.get('/api/bugzilla/?format=json')
self.assertEqual(output.status_code, 200)
expected = {
u'bugzillaAcls': {
'Fedora': {
"fedocal": {
"owner": "pingou",
"cclist": {
"groups": ["@infra-sig"],
"people": ["pingou"]
},
"qacontact": None,
"summary": "A web-based calendar for Fedora"
},
'geany': {
'owner': '@gtk-sig',
'cclist': {
'groups': [],
'people': []
},
'qacontact': None,
'summary': 'A fast and lightweight IDE using '
'GTK2'
},
'guake': {
'owner': 'pingou',
'cclist': {
'groups': [],
'people': ['spot']
},
'qacontact': None,
'summary': 'Top down terminal for GNOME'
}
}
},
'title': 'Fedora Package Database -- Bugzilla ACLs'
}
data = json.loads(output.data)
self.assertEqual(data, expected)
@patch('pkgdb2.lib.utils')
@patch('pkgdb2.packager_login_required')
def test_package_give_group(self, login_func, mock_func):
""" Test the package_give function to a group. """
login_func.return_value = None
create_package_acl(self.session)
mock_func.get_packagers.return_value = ['spot']
group = FakeFasGroupValid()
group.name = 'gtk-sig'
mock_func.get_fas_group.return_value = group
mock_func.log.return_value = ''
user = FakeFasUser()
with user_set(pkgdb2.APP, user):
output = self.app.get('/package/rpms/guake/give')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Give Point of Contact of package: guake</h1>'
in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
data = {
'branches': 'master',
'poc': 'spot',
'csrf_token': csrf_token,
}
output = self.app.post('/package/rpms/guake/give', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'rpms/<span property="doap:name">guake</span>'
in output.data)
self.assertEqual(
output.data.count('<a href="/packager/spot/">'), 2)
user.username = 'spot'
user.groups.append('gtk-sig')
with user_set(pkgdb2.APP, user):
output = self.app.get('/package/rpms/guake/give')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Give Point of Contact of package: guake</h1>'
in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
data = {
'branches': 'master',
'poc': 'group::gtk-sig',
'csrf_token': csrf_token,
}
output = self.app.post('/package/rpms/guake/give', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count('<a href="/packager/spot/">'), 2)
self.assertEqual(
output.data.count('<a href="/packager/group::gtk-sig/">'),
1)
output = self.app.get('/package/rpms/guake/give')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Give Point of Contact of package: guake</h1>'
in output.data)
self.assertTrue(
'<td><select id="branches" multiple name="branches">'
'</select></td>'
in output.data)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(PkgdbGrouptests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| mizdebsk/pkgdb2 | tests/test_groups.py | Python | gpl-2.0 | 9,297 |
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Geant4 support, implemented as an easyblock.
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
import re
from distutils.version import LooseVersion
import easybuild.tools.environment as env
from easybuild.framework.easyconfig import CUSTOM
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.tools.filetools import run_cmd, run_cmd_qa
from easybuild.tools.modules import get_software_root
from easybuild.tools.filetools import mkdir
class EB_Geant4(CMakeMake):
"""
Support for building Geant4.
Note: Geant4 moved to a CMAKE like build system as of version 9.5.
"""
@staticmethod
def extra_options():
"""
Define extra options needed by Geant4
"""
extra_vars = {
'G4ABLAVersion': [None, "G4ABLA version", CUSTOM],
'G4NDLVersion': [None, "G4NDL version", CUSTOM],
'G4EMLOWVersion': [None, "G4EMLOW version", CUSTOM],
'PhotonEvaporationVersion': [None, "PhotonEvaporation version", CUSTOM],
'G4RadioactiveDecayVersion': [None, "G4RadioactiveDecay version", CUSTOM],
}
return CMakeMake.extra_options(extra_vars)
def configure_step(self):
"""
Configure Geant4 build, either via CMake for versions more recent than 9.5,
or using an interactive configuration procedure otherwise.
"""
# Geant4 switched to a cmake build system in 9.5
if LooseVersion(self.version) >= LooseVersion("9.5"):
mkdir('configdir')
os.chdir('configdir')
super(EB_Geant4, self).configure_step(srcdir="..")
else:
pwd = self.cfg['start_dir']
dst = self.installdir
clhepdir = get_software_root('CLHEP')
cmd = "%s/Configure -E -build" % pwd
self.qanda = {# questions and answers for version 9.1.p03
"There exists a config.sh file. Shall I use it to set the defaults? [y]": "n",
"Would you like to see the instructions? [n]": "",
"[Type carriage return to continue]": "",
"Definition of G4SYSTEM variable is Linux-g++. That stands for: 1) OS : Linux" \
"2) Compiler : g++ To modify default settings, select number above (e.g. 2) " \
"[Press [Enter] for default settings]": "2",
"Which C++ compiler? [g++]": "$(GPP)",
"Confirm your selection or set recommended 'g++'! [*]": "",
"Definition of G4SYSTEM variable is Linux-icc. That stands for: 1) OS : Linux 2)" \
"Compiler : icc To modify default settings, select number above (e.g. 2) " \
"[Press [Enter] for default settings]": "",
"Do you expect to run these scripts and binaries on multiple machines? [n]": "y",
"Where is Geant4 source installed? [%s]" % pwd: "",
"Specify the path where Geant4 libraries and source files should be installed." \
" [%s]" % pwd: dst,
"Do you want to copy all Geant4 headers in one directory? [n]": "y",
"Please, specify default directory where ALL the Geant4 data is installed:" \
"G4LEVELGAMMADATA: %(pwd)s/data/PhotonEvaporation2.0 G4RADIOACTIVEDATA: " \
"%(pwd)s/data/RadioactiveDecay3.2 G4LEDATA: %(pwd)s/data/G4EMLOW5.1 G4NEUTRONHPDATA: " \
"%(pwd)s/data/G4NDL3.12 G4ABLADATA: %(pwd)s/data/G4ABLA3.0 You will be asked about " \
"customizing these next. [%(pwd)s/data]" % {'pwd': pwd}: "%s/data" % dst,
"Directory %s/data doesn't exist. Use that name anyway? [n]" % dst: "y",
"Please, specify default directory where the Geant4 data is installed: " \
"1) G4LEVELGAMMADATA: %(dst)s/data/PhotonEvaporation2.0 2) G4RADIOACTIVEDATA: " \
"%(dst)s/data/RadioactiveDecay3.2 3) G4LEDATA: %(dst)s/data/G4EMLOW5.1 4) G4NEUTRONHPDATA: " \
"%(dst)s/data/G4NDL3.12 5) G4ABLADATA: %(dst)s/data/G4ABLA3.0 To modify default settings, " \
"select number above (e.g. 2) [Press [Enter] for default settings]" % {'dst': dst}: "",
"Please, specify where CLHEP is installed: CLHEP_BASE_DIR: ": clhepdir,
"Please, specify where CLHEP is installed: CLHEP_BASE_DIR: [%s]" % clhepdir: "",
"You can customize paths and library name of you CLHEP installation: 1) CLHEP_INCLUDE_DIR: " \
"%(clhepdir)s/include 2) CLHEP_LIB_DIR: %(clhepdir)s/lib 3) CLHEP_LIB: CLHEP To modify " \
"default settings, select number above (e.g. 2) [Press [Enter] for default settings]" %
{'clhepdir': clhepdir}: "",
"By default 'static' (.a) libraries are built. Do you want to build 'shared' (.so) " \
"libraries? [n]": "y",
"You selected to build 'shared' (.so) libraries. Do you want to build 'static' (.a) " \
"libraries too? [n]": "y",
"Do you want to build 'global' compound libraries? [n]": "",
"Do you want to compile libraries in DEBUG mode (-g)? [n]": "",
"G4UI_NONE If this variable is set, no UI sessions nor any UI libraries are built. " \
"This can be useful when running a pure batch job or in a user framework having its own " \
"UI system. Do you want to set this variable ? [n]": "",
"G4UI_BUILD_XAW_SESSION G4UI_USE_XAW Specifies to include and use the XAW interfaces in " \
"the application to be built. The XAW (X11 Athena Widget set) extensions are required to " \
"activate and build this driver. [n]": "",
"G4UI_BUILD_XM_SESSION G4UI_USE_XM Specifies to include and use the XM Motif based user " \
"interfaces. The XM Motif extensions are required to activate and build this driver. [n]": "",
"G4VIS_NONE If this variable is set, no visualization drivers will be built or used. Do " \
"you want to set this variable ? [n]": "n",
"G4VIS_BUILD_OPENGLX_DRIVER G4VIS_USE_OPENGLX It is an interface to the de facto standard " \
"3D graphics library, OpenGL. It is well suited for real-time fast visualization and " \
"prototyping. The X11 version of the OpenGL libraries is required. [n]": "",
"G4VIS_BUILD_OPENGLXM_DRIVER G4VIS_USE_OPENGLXM It is an interface to the de facto " \
"standard 3D graphics library, OpenGL. It is well suited for real-time fast visualization " \
"and prototyping. The X11 version of the OpenGL libraries and the Motif Xm extension is " \
"required. [n]": "",
"G4VIS_BUILD_DAWN_DRIVER G4VIS_USE_DAWN DAWN drivers are interfaces to the Fukui Renderer " \
"DAWN. DAWN is a vectorized 3D PostScript processor suited to prepare technical high " \
"quality outputs for presentation and/or documentation. [n]": "",
"G4VIS_BUILD_OIX_DRIVER G4VIS_USE_OIX The OpenInventor driver is based on OpenInventor tech" \
"nology for scientific visualization. The X11 version of OpenInventor is required. [n]": "",
"G4VIS_BUILD_RAYTRACERX_DRIVER G4VIS_USE_RAYTRACERX Allows for interactive ray-tracing " \
"graphics through X11. The X11 package is required. [n]": "",
"G4VIS_BUILD_VRML_DRIVER G4VIS_USE_VRML These driver generate VRML files, which describe " \
"3D scenes to be visualized with a proper VRML viewer. [n]": "",
"G4LIB_BUILD_GDML Setting this variable will enable building of the GDML plugin module " \
"embedded in Geant4 for detector description persistency. It requires your system to have " \
"the XercesC library and headers installed. Do you want to set this variable? [n]": "",
"G4LIB_BUILD_G3TOG4 The utility module 'g3tog4' will be built by setting this variable. " \
"NOTE: it requires a valid FORTRAN compiler to be installed on your system and the " \
"'cernlib' command in the path, in order to build the ancillary tools! Do you want to " \
"build 'g3tog4' ? [n]": "",
"G4LIB_BUILD_ZLIB Do you want to activate compression for output files generated by the " \
"HepRep visualization driver? [n]": "y",
"G4ANALYSIS_USE Activates the configuration setup for allowing plugins to analysis tools " \
"based on AIDA (Astract Interfaces for Data Analysis). In order to use AIDA features and " \
"compliant analysis tools, the proper environment for these tools will have to be set " \
"(see documentation for the specific analysis tools). [n]": "",
"Press [Enter] to start installation or use a shell escape to edit config.sh: ": "",
# extra questions and answers for version 9.2.p03
"Directory %s doesn't exist. Use that name anyway? [n]" % dst: "y",
"Specify the path where the Geant4 data libraries PhotonEvaporation%s " \
"RadioactiveDecay%s G4EMLOW%s G4NDL%s G4ABLA%s are " \
"installed. For now, a flat directory structure is assumed, and this can be customized " \
"at the next step if needed. [%s/data]" % (self.cfg['PhotonEvaporationVersion'],
self.cfg['G4RadioactiveDecayVersion'],
self.cfg['G4EMLOWVersion'],
self.cfg['G4NDLVersion'],
self.cfg['G4ABLAVersion'],
pwd
): "%s/data" % dst,
"Please enter 1) Another path to search in 2) 'f' to force the use of the path " \
"you entered previously (the data libraries are not needed to build Geant4, but " \
"are needed to run applications later). 3) 'c' to customize the data paths, e.g. " \
"if you have the data libraries installed in different locations. [f]": "",
"G4UI_BUILD_QT_SESSION G4UI_USE_QT Setting these variables will enable the building " \
"of the G4 Qt based user interface module and the use of this module in your " \
"applications respectively. The Qt3 or Qt4 headers, libraries and moc application are " \
"required to enable the building of this module. Do you want to enable build and use of " \
"this module? [n]": "",
# extra questions and answers for version 9.4.po1
"What is the path to the Geant4 source tree? [%s]" % pwd: "",
"Where should Geant4 be installed? [%s]" % pwd: dst,
"Do you want to install all Geant4 headers in one directory? [n]": "y",
"Do you want to build shared libraries? [y]": "",
"Do you want to build static libraries too? [n]": "",
"Do you want to build global libraries? [y]": "",
"Do you want to build granular libraries as well? [n]": "",
"Do you want to build libraries with debugging information? [n]": "",
"Specify the path where the Geant4 data libraries are installed: [%s/data]" % pwd: "%s/data" % dst,
"How many parallel jobs should make launch? [1]": "%s" % self.cfg['parallel'],
"Please enter 1) Another path to search in 2) 'f' to force the use of the path you entered " \
"previously (the data libraries are NOT needed to build Geant4, but are needed to run " \
"applications later). 3) 'c' to customize the data paths, e.g. if you have the data " \
"libraries installed in different locations. [f]": "",
"Enable building of User Interface (UI) modules? [y]": "",
"Enable building of the XAW (X11 Athena Widget set) UI module? [n]": "",
"Enable building of the X11-Motif (Xm) UI module? [n]": "",
"Enable building of the Qt UI module? [n]": "",
"Enable building of visualization drivers? [y]": "n",
"Enable the Geometry Description Markup Language (GDML) module? [n]": "",
"Enable build of the g3tog4 utility module? [n]": "",
"Enable internal zlib compression for HepRep visualization? [n] ": "",
}
self.noqanda = [r"Compiling\s+.*?\s+\.\.\.",
r"Making\s+dependency\s+for\s+file\s+.*?\s+\.\.\.",
r"Making\s+libname\.map\s+starter\s+file\s+\.\.\.",
r"Making\s+libname\.map\s+\.\.\.",
r"Reading\s+library\s+get_name\s+map\s+file\s*\.\.\.",
r"Reading\s+dependency\s+files\s*\.\.\.",
r"Creating\s+shared\s+library\s+.*?\s+\.\.\."
]
run_cmd_qa(cmd, self.qanda, self.noqanda, log_all=True, simple=True)
# determining self.g4system
try:
scriptdirbase = os.path.join(pwd, '.config', 'bin')
filelist = os.listdir(scriptdirbase)
except OSError, err:
self.log.error("Failed to determine self.g4system: %s" % err)
if len(filelist) != 1:
self.log.error("Exactly one directory is expected in %s; found back: %s" % (scriptdirbase, filelist))
else:
self.g4system = filelist[0]
self.scriptdir = os.path.join(scriptdirbase, self.g4system)
if not os.path.isdir(self.scriptdir):
self.log.error("Something went wrong. Dir: %s doesn't exist." % self.scriptdir)
self.log.info("The directory containing several important scripts to be copied was found: %s" % self.scriptdir)
# copying config.sh to pwd
try:
self.log.info("copying config.sh to %s" % pwd)
shutil.copy2(os.path.join(self.scriptdir, 'config.sh'), pwd)
except IOError, err:
self.log.error("Failed to copy config.sh to %s" % pwd)
# creating several scripts containing environment variables
cmd = "%s/Configure -S -f config.sh -D g4conf=%s -D abssrc=%s" % (pwd, self.scriptdir, pwd)
run_cmd(cmd, log_all=True, simple=True)
def build_step(self):
"""Build Geant4."""
if LooseVersion(self.version) >= LooseVersion("9.5"):
super(EB_Geant4, self).build_step()
else:
pwd = self.cfg['start_dir']
cmd = "%s/Configure -build" % pwd
run_cmd_qa(cmd, self.qanda, no_qa=self.noqanda, log_all=True, simple=True)
def install_step(self):
"""Install Geant4."""
if LooseVersion(self.version) >= LooseVersion("9.5"):
super(EB_Geant4, self).install_step()
self.datadst = os.path.join(self.installdir,
'share',
'%s-%s' % (self.name, self.version.replace("p0", "")),
'data',
)
else:
pwd = self.cfg['start_dir']
try:
datasrc = os.path.join(pwd, '..')
self.datadst = os.path.join(self.installdir, 'data')
os.mkdir(self.datadst)
except OSError, err:
self.log.error("Failed to create data destination file %s: %s" % (self.datadst, err))
datalist = ['G4ABLA%s' % self.cfg['G4ABLAVersion'],
'G4EMLOW%s' % self.cfg['G4EMLOWVersion'],
'G4NDL%s' % self.cfg['G4NDLVersion'],
'PhotonEvaporation%s' % self.cfg['PhotonEvaporationVersion'],
'RadioactiveDecay%s' % self.cfg['G4RadioactiveDecayVersion'],
]
try:
for dat in datalist:
self.log.info("Copying %s to %s" % (dat, self.datadst))
shutil.copytree(os.path.join(datasrc, dat), os.path.join(self.datadst, dat))
except IOError, err:
self.log.error("Something went wrong during data copying (%s) to %s: %s" % (dat, self.datadst, err))
try:
for fil in ['config', 'environments', 'examples']:
self.log.info("Copying %s to %s" % (fil, self.installdir))
if not os.path.exists(os.path.join(pwd, fil)):
self.log.error("No such file or directory: %s" % fil)
if os.path.isdir(os.path.join(pwd, fil)):
shutil.copytree(os.path.join(pwd, fil), os.path.join(self.installdir, fil))
elif os.path.isfile(os.path.join(pwd, fil)):
shutil.copy2(os.path.join(pwd, fil), os.path.join(self.installdir, fil))
except IOError, err:
self.log.error("Something went wrong during copying of %s to %s: %s" % (fil, self.installdir, err))
try:
for fil in ['config.sh', 'env.sh', 'env.csh']:
self.log.info("Copying %s to %s" % (fil, self.installdir))
if not os.path.exists(os.path.join(self.scriptdir, fil)):
self.log.error("No such file or directory: %s" % fil)
if os.path.isdir(os.path.join(self.scriptdir, fil)):
shutil.copytree(os.path.join(self.scriptdir, fil), os.path.join(self.installdir, fil))
elif os.path.isfile(os.path.join(self.scriptdir, fil)):
shutil.copy2(os.path.join(self.scriptdir, fil), os.path.join(self.installdir, fil))
except IOError, err:
self.log.error("Something went wrong during copying of (%s) to %s: %s" % (fil, self.installdir, err))
cmd = "%(pwd)s/Configure -f %(pwd)s/config.sh -d -install" % {'pwd': pwd}
run_cmd(cmd, log_all=True, simple=True)
mpiuidir = os.path.join(self.installdir, "examples/extended/parallel/MPI/mpi_interface")
os.chdir(mpiuidir)
# tweak config file as needed
f = open("G4MPI.gmk", "r")
G4MPItxt = f.read()
f.close()
root_re = re.compile("(.*G4MPIROOT\s+=\s+).*", re.MULTILINE)
cxx_re = re.compile("(.*CXX\s+:=\s+).*", re.MULTILINE)
cppflags_re = re.compile("(.*CPPFLAGS\s+\+=\s+.*)", re.MULTILINE)
G4MPItxt = root_re.sub(r"\1%s/intel64" % get_software_root('IMPI'), G4MPItxt)
G4MPItxt = cxx_re.sub(r"\1mpicxx -cxx=icpc", G4MPItxt)
G4MPItxt = cppflags_re.sub(r"\1 -I$(G4INCLUDE) -I%s)/include" % get_software_root('CLHEP'), G4MPItxt)
self.log.debug("contents of G4MPI.gmk: %s" % G4MPItxt)
shutil.copyfile("G4MPI.gmk", "G4MPI.gmk.ORIG")
f = open("G4MPI.gmk", "w")
f.write(G4MPItxt)
f.close()
# make sure the required environment variables are there
env.setvar("G4INSTALL", self.installdir)
env.setvar("G4SYSTEM", self.g4system)
env.setvar("G4LIB", "%s/lib/geant4/" % self.installdir)
env.setvar("G4INCLUDE", "%s/include/geant4/" % self.installdir)
run_cmd("make", log_all=True, simple=True)
run_cmd("make includes", log_all=True, simple=True)
def make_module_extra(self):
"""Define Geant4-specific environment variables in module file."""
g4version = '.'.join(self.version.split('.')[:2])
txt = super(EB_Geant4, self).make_module_extra()
txt += self.moduleGenerator.set_environment('G4INSTALL', "$root")
#no longer needed in > 9.5, but leave it there for now.
txt += self.moduleGenerator.set_environment('G4VERSION', g4version)
if LooseVersion(self.version) >= LooseVersion("9.5"):
txt += self.moduleGenerator.set_environment('G4INCLUDE', "$root/include/Geant4")
txt += self.moduleGenerator.set_environment('G4LIB', "$root/lib64/Geant4")
else:
txt += self.moduleGenerator.set_environment('G4INCLUDE', "$root/include/geant4")
txt += self.moduleGenerator.set_environment('G4LIB', "$root/lib/geant4")
txt += self.moduleGenerator.set_environment('G4SYSTEM', self.g4system)
txt += self.moduleGenerator.set_environment('G4ABLADATA',
"%s/G4ABLA%s" % (self.datadst, self.cfg['G4ABLAVersion']))
txt += self.moduleGenerator.set_environment('G4LEVELGAMMADATA',
"%s/PhotonEvaporation%s" % (self.datadst,
self.cfg['PhotonEvaporationVersion']))
txt += self.moduleGenerator.set_environment('G4RADIOACTIVEDATA',
"%s/RadioactiveDecay%s" % (self.datadst,
self.cfg['G4RadioactiveDecayVersion']))
txt += self.moduleGenerator.set_environment('G4LEDATA',
"%s/G4EMLOW%s" % (self.datadst, self.cfg['G4EMLOWVersion']))
txt += self.moduleGenerator.set_environment('G4NEUTRONHPDATA', "%s/G4NDL%s" % (self.datadst,
self.cfg['G4NDLVersion']))
return txt
def sanity_check_step(self):
"""
Custom sanity check for Geant4 >= 9.5
Not tested with previous versions
"""
custom_paths = {
'files': ["bin/geant4%s" % x for x in [".sh", ".csh", "-config"]] +
["lib64/libG4%s.so" % x for x in ['analysis', 'event', 'GMocren', 'materials',
'persistency', 'readout', 'Tree', 'VRML']],
'dirs': ['include/Geant4'],
}
super(EB_Geant4, self).sanity_check_step(custom_paths)
| geimer/easybuild-easyblocks | easybuild/easyblocks/g/geant4.py | Python | gpl-2.0 | 25,334 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import datetime
import os
from . import core
from .metadata import __version__, version_formatter
time_string = datetime.datetime.now().strftime('%A, %d %B %Y %I:%M%p')
pid = os.getpid()
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def print_header():
driver_info = version_formatter("""{version} {release}""")
git_info = version_formatter("""{{{branch}}} {githash} {clean}""")
datadir = core.get_environment("PSIDATADIR")
memory = sizeof_fmt(core.get_memory())
threads = str(core.get_num_threads())
header = """
-----------------------------------------------------------------------
Psi4: An Open-Source Ab Initio Electronic Structure Package
Psi4 %s
Git: Rev %s
R. M. Parrish, L. A. Burns, D. G. A. Smith, A. C. Simmonett,
A. E. DePrince III, E. G. Hohenstein, U. Bozkaya, A. Yu. Sokolov,
R. Di Remigio, R. M. Richard, J. F. Gonthier, A. M. James,
H. R. McAlexander, A. Kumar, M. Saitow, X. Wang, B. P. Pritchard,
P. Verma, H. F. Schaefer III, K. Patkowski, R. A. King, E. F. Valeev,
F. A. Evangelista, J. M. Turney, T. D. Crawford, and C. D. Sherrill,
submitted.
-----------------------------------------------------------------------
Psi4 started on: %s
Process ID: %6d
PSIDATADIR: %s
Memory: %s
Threads: %s
""" % (driver_info, git_info, time_string, pid, datadir, memory, threads)
core.print_out(header)
| kratman/psi4public | psi4/header.py | Python | gpl-2.0 | 2,628 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.versioning.shell import main
if __name__ == '__main__':
main(debug='False', repository='.')
| ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/db/sqlalchemy/migrate_repo/manage.py | Python | gpl-2.0 | 724 |
Subsets and Splits