gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EnergyGoal'
db.create_table(u'resource_goal_energygoal', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'])),
('date', self.gf('django.db.models.fields.DateField')()),
('actual_usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('baseline_usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('goal_usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('percent_reduction', self.gf('django.db.models.fields.IntegerField')(default=0)),
('current_goal_percent_reduction', self.gf('django.db.models.fields.IntegerField')(default=0)),
('goal_status', self.gf('django.db.models.fields.CharField')(default='Not available', max_length=20)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 11, 21, 0, 0), auto_now=True, blank=True)),
))
db.send_create_signal(u'resource_goal', ['EnergyGoal'])
# Adding unique constraint on 'EnergyGoal', fields ['date', 'team']
db.create_unique(u'resource_goal_energygoal', ['date', 'team_id'])
# Adding model 'WaterGoal'
db.create_table(u'resource_goal_watergoal', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'])),
('date', self.gf('django.db.models.fields.DateField')()),
('actual_usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('baseline_usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('goal_usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('percent_reduction', self.gf('django.db.models.fields.IntegerField')(default=0)),
('current_goal_percent_reduction', self.gf('django.db.models.fields.IntegerField')(default=0)),
('goal_status', self.gf('django.db.models.fields.CharField')(default='Not available', max_length=20)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 11, 21, 0, 0), auto_now=True, blank=True)),
))
db.send_create_signal(u'resource_goal', ['WaterGoal'])
# Adding unique constraint on 'WaterGoal', fields ['date', 'team']
db.create_unique(u'resource_goal_watergoal', ['date', 'team_id'])
# Adding model 'EnergyGoalSetting'
db.create_table(u'resource_goal_energygoalsetting', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'])),
('goal_percent_reduction', self.gf('django.db.models.fields.IntegerField')(default=5)),
('baseline_method', self.gf('django.db.models.fields.CharField')(default='Dynamic', max_length=20)),
('data_storage', self.gf('django.db.models.fields.CharField')(default='Wattdepot', max_length=20, null=True, blank=True)),
('wattdepot_source_name', self.gf('django.db.models.fields.CharField')(default=None, max_length=100, null=True, blank=True)),
('goal_points', self.gf('django.db.models.fields.IntegerField')(default=20)),
('manual_entry', self.gf('django.db.models.fields.BooleanField')(default=False)),
('manual_entry_time', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('realtime_meter_interval', self.gf('django.db.models.fields.IntegerField')(default=10)),
))
db.send_create_signal(u'resource_goal', ['EnergyGoalSetting'])
# Adding unique constraint on 'EnergyGoalSetting', fields ['team']
db.create_unique(u'resource_goal_energygoalsetting', ['team_id'])
# Adding model 'WaterGoalSetting'
db.create_table(u'resource_goal_watergoalsetting', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'])),
('goal_percent_reduction', self.gf('django.db.models.fields.IntegerField')(default=5)),
('baseline_method', self.gf('django.db.models.fields.CharField')(default='Dynamic', max_length=20)),
('data_storage', self.gf('django.db.models.fields.CharField')(default='Wattdepot', max_length=20, null=True, blank=True)),
('wattdepot_source_name', self.gf('django.db.models.fields.CharField')(default=None, max_length=100, null=True, blank=True)),
('goal_points', self.gf('django.db.models.fields.IntegerField')(default=20)),
('manual_entry', self.gf('django.db.models.fields.BooleanField')(default=False)),
('manual_entry_time', self.gf('django.db.models.fields.TimeField')(null=True, blank=True)),
('realtime_meter_interval', self.gf('django.db.models.fields.IntegerField')(default=10)),
))
db.send_create_signal(u'resource_goal', ['WaterGoalSetting'])
# Adding unique constraint on 'WaterGoalSetting', fields ['team']
db.create_unique(u'resource_goal_watergoalsetting', ['team_id'])
# Adding model 'EnergyBaselineDaily'
db.create_table(u'resource_goal_energybaselinedaily', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'])),
('day', self.gf('django.db.models.fields.IntegerField')()),
('usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'resource_goal', ['EnergyBaselineDaily'])
# Adding model 'WaterBaselineDaily'
db.create_table(u'resource_goal_waterbaselinedaily', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'])),
('day', self.gf('django.db.models.fields.IntegerField')()),
('usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'resource_goal', ['WaterBaselineDaily'])
# Adding model 'EnergyBaselineHourly'
db.create_table(u'resource_goal_energybaselinehourly', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'])),
('day', self.gf('django.db.models.fields.IntegerField')()),
('hour', self.gf('django.db.models.fields.IntegerField')()),
('usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'resource_goal', ['EnergyBaselineHourly'])
# Adding model 'WaterBaselineHourly'
db.create_table(u'resource_goal_waterbaselinehourly', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'])),
('day', self.gf('django.db.models.fields.IntegerField')()),
('hour', self.gf('django.db.models.fields.IntegerField')()),
('usage', self.gf('django.db.models.fields.IntegerField')(default=0)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'resource_goal', ['WaterBaselineHourly'])
def backwards(self, orm):
# Removing unique constraint on 'WaterGoalSetting', fields ['team']
db.delete_unique(u'resource_goal_watergoalsetting', ['team_id'])
# Removing unique constraint on 'EnergyGoalSetting', fields ['team']
db.delete_unique(u'resource_goal_energygoalsetting', ['team_id'])
# Removing unique constraint on 'WaterGoal', fields ['date', 'team']
db.delete_unique(u'resource_goal_watergoal', ['date', 'team_id'])
# Removing unique constraint on 'EnergyGoal', fields ['date', 'team']
db.delete_unique(u'resource_goal_energygoal', ['date', 'team_id'])
# Deleting model 'EnergyGoal'
db.delete_table(u'resource_goal_energygoal')
# Deleting model 'WaterGoal'
db.delete_table(u'resource_goal_watergoal')
# Deleting model 'EnergyGoalSetting'
db.delete_table(u'resource_goal_energygoalsetting')
# Deleting model 'WaterGoalSetting'
db.delete_table(u'resource_goal_watergoalsetting')
# Deleting model 'EnergyBaselineDaily'
db.delete_table(u'resource_goal_energybaselinedaily')
# Deleting model 'WaterBaselineDaily'
db.delete_table(u'resource_goal_waterbaselinedaily')
# Deleting model 'EnergyBaselineHourly'
db.delete_table(u'resource_goal_energybaselinehourly')
# Deleting model 'WaterBaselineHourly'
db.delete_table(u'resource_goal_waterbaselinehourly')
models = {
u'resource_goal.energybaselinedaily': {
'Meta': {'object_name': 'EnergyBaselineDaily'},
'day': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'resource_goal.energybaselinehourly': {
'Meta': {'object_name': 'EnergyBaselineHourly'},
'day': ('django.db.models.fields.IntegerField', [], {}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'resource_goal.energygoal': {
'Meta': {'ordering': "('-date', 'team')", 'unique_together': "(('date', 'team'),)", 'object_name': 'EnergyGoal'},
'actual_usage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'baseline_usage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'current_goal_percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateField', [], {}),
'goal_status': ('django.db.models.fields.CharField', [], {'default': "'Not available'", 'max_length': '20'}),
'goal_usage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 11, 21, 0, 0)', 'auto_now': 'True', 'blank': 'True'})
},
u'resource_goal.energygoalsetting': {
'Meta': {'ordering': "('team',)", 'unique_together': "(('team',),)", 'object_name': 'EnergyGoalSetting'},
'baseline_method': ('django.db.models.fields.CharField', [], {'default': "'Dynamic'", 'max_length': '20'}),
'data_storage': ('django.db.models.fields.CharField', [], {'default': "'Wattdepot'", 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'goal_percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'goal_points': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manual_entry': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manual_entry_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'realtime_meter_interval': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']"}),
'wattdepot_source_name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'resource_goal.waterbaselinedaily': {
'Meta': {'object_name': 'WaterBaselineDaily'},
'day': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'resource_goal.waterbaselinehourly': {
'Meta': {'object_name': 'WaterBaselineHourly'},
'day': ('django.db.models.fields.IntegerField', [], {}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'resource_goal.watergoal': {
'Meta': {'ordering': "('-date', 'team')", 'unique_together': "(('date', 'team'),)", 'object_name': 'WaterGoal'},
'actual_usage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'baseline_usage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'current_goal_percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateField', [], {}),
'goal_status': ('django.db.models.fields.CharField', [], {'default': "'Not available'", 'max_length': '20'}),
'goal_usage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 11, 21, 0, 0)', 'auto_now': 'True', 'blank': 'True'})
},
u'resource_goal.watergoalsetting': {
'Meta': {'ordering': "('team',)", 'unique_together': "(('team',),)", 'object_name': 'WaterGoalSetting'},
'baseline_method': ('django.db.models.fields.CharField', [], {'default': "'Dynamic'", 'max_length': '20'}),
'data_storage': ('django.db.models.fields.CharField', [], {'default': "'Wattdepot'", 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'goal_percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'goal_points': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manual_entry': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manual_entry_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'realtime_meter_interval': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']"}),
'wattdepot_source_name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'team_mgr.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'team_mgr.team': {
'Meta': {'ordering': "('group', 'name')", 'object_name': 'Team'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['resource_goal']
|
|
import math
from pandac.PandaModules import CollisionSphere, CollisionNode, Vec3, Point3, deg2Rad
from direct.interval.IntervalGlobal import Sequence, Func, Parallel, ActorInterval, Wait, Parallel, LerpHprInterval, ProjectileInterval, LerpPosInterval
from direct.directnotify import DirectNotifyGlobal
from toontown.building import ElevatorConstants
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.safezone import DistributedGolfKart
from toontown.building import DistributedElevatorExt
from toontown.building import ElevatorConstants
from toontown.distributed import DelayDelete
from direct.showbase import PythonUtil
from toontown.building import BoardingGroupShow
class DistributedCogKart(DistributedElevatorExt.DistributedElevatorExt):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCogKart')
JumpOutOffsets = ((6.5, -2, -0.025),
(-6.5, -2, -0.025),
(3.75, 5, -0.025),
(-3.75, 5, -0.025))
def __init__(self, cr):
DistributedElevatorExt.DistributedElevatorExt.__init__(self, cr)
self.type = ElevatorConstants.ELEVATOR_COUNTRY_CLUB
self.kartModelPath = 'phase_12/models/bossbotHQ/Coggolf_cart3.bam'
self.leftDoor = None
self.rightDoor = None
self.fillSlotTrack = None
return
def generate(self):
DistributedElevatorExt.DistributedElevatorExt.generate(self)
self.loader = self.cr.playGame.hood.loader
if self.loader:
self.notify.debug('Loader has been loaded')
self.notify.debug(str(self.loader))
else:
self.notify.debug('Loader has not been loaded')
self.golfKart = render.attachNewNode('golfKartNode')
self.kart = loader.loadModel(self.kartModelPath)
self.kart.setPos(0, 0, 0)
self.kart.setScale(1)
self.kart.reparentTo(self.golfKart)
self.golfKart.reparentTo(self.loader.geom)
self.wheels = self.kart.findAllMatches('**/wheelNode*')
self.numWheels = self.wheels.getNumPaths()
def announceGenerate(self):
DistributedElevatorExt.DistributedElevatorExt.announceGenerate(self)
angle = self.startingHpr[0]
angle -= 90
radAngle = deg2Rad(angle)
unitVec = Vec3(math.cos(radAngle), math.sin(radAngle), 0)
unitVec *= 45.0
self.endPos = self.startingPos + unitVec
self.endPos.setZ(0.5)
dist = Vec3(self.endPos - self.enteringPos).length()
wheelAngle = dist / (4.8 * 1.4 * math.pi) * 360
self.kartEnterAnimateInterval = Parallel(LerpHprInterval(self.wheels[0], 5.0, Vec3(self.wheels[0].getH(), wheelAngle, self.wheels[0].getR())), LerpHprInterval(self.wheels[1], 5.0, Vec3(self.wheels[1].getH(), wheelAngle, self.wheels[1].getR())), LerpHprInterval(self.wheels[2], 5.0, Vec3(self.wheels[2].getH(), wheelAngle, self.wheels[2].getR())), LerpHprInterval(self.wheels[3], 5.0, Vec3(self.wheels[3].getH(), wheelAngle, self.wheels[3].getR())), name='CogKartAnimate')
trolleyExitTrack1 = Parallel(LerpPosInterval(self.golfKart, 5.0, self.endPos), self.kartEnterAnimateInterval, name='CogKartExitTrack')
self.trolleyExitTrack = Sequence(trolleyExitTrack1)
self.trolleyEnterTrack = Sequence(LerpPosInterval(self.golfKart, 5.0, self.startingPos, startPos=self.enteringPos))
self.closeDoors = Sequence(self.trolleyExitTrack, Func(self.onDoorCloseFinish))
self.openDoors = Sequence(self.trolleyEnterTrack)
def delete(self):
DistributedElevatorExt.DistributedElevatorExt.delete(self)
if hasattr(self, 'elevatorFSM'):
del self.elevatorFSM
def setBldgDoId(self, bldgDoId):
self.bldg = None
self.setupElevatorKart()
return
def setupElevatorKart(self):
collisionRadius = ElevatorConstants.ElevatorData[self.type]['collRadius']
self.elevatorSphere = CollisionSphere(0, 0, 0, collisionRadius)
self.elevatorSphere.setTangible(1)
self.elevatorSphereNode = CollisionNode(self.uniqueName('elevatorSphere'))
self.elevatorSphereNode.setIntoCollideMask(ToontownGlobals.WallBitmask)
self.elevatorSphereNode.addSolid(self.elevatorSphere)
self.elevatorSphereNodePath = self.getElevatorModel().attachNewNode(self.elevatorSphereNode)
self.elevatorSphereNodePath.hide()
self.elevatorSphereNodePath.reparentTo(self.getElevatorModel())
self.elevatorSphereNodePath.stash()
self.boardedAvIds = {}
self.finishSetup()
def setColor(self, r, g, b):
pass
def getElevatorModel(self):
return self.golfKart
def enterWaitEmpty(self, ts):
DistributedElevatorExt.DistributedElevatorExt.enterWaitEmpty(self, ts)
def exitWaitEmpty(self):
DistributedElevatorExt.DistributedElevatorExt.exitWaitEmpty(self)
def forceDoorsOpen(self):
pass
def forceDoorsClosed(self):
pass
def setPosHpr(self, x, y, z, h, p, r):
self.startingPos = Vec3(x, y, z)
self.enteringPos = Vec3(x, y, z - 10)
self.startingHpr = Vec3(h, 0, 0)
self.golfKart.setPosHpr(x, y, z, h, 0, 0)
def enterClosing(self, ts):
if self.localToonOnBoard:
elevator = self.getPlaceElevator()
if elevator:
elevator.fsm.request('elevatorClosing')
self.closeDoors.start(ts)
def enterClosed(self, ts):
self.forceDoorsClosed()
self.kartDoorsClosed(self.getZoneId())
def kartDoorsClosed(self, zoneId):
if self.localToonOnBoard:
hoodId = ZoneUtil.getHoodId(zoneId)
doneStatus = {'loader': 'suitInterior',
'where': 'suitInterior',
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None}
elevator = self.elevatorFSM
del self.elevatorFSM
elevator.signalDone(doneStatus)
return
def setCountryClubInteriorZone(self, zoneId):
if self.localToonOnBoard:
hoodId = self.cr.playGame.hood.hoodId
countryClubId = self.countryClubId
if bboard.has('countryClubIdOverride'):
countryClubId = bboard.get('countryClubIdOverride')
doneStatus = {'loader': 'cogHQLoader',
'where': 'countryClubInterior',
'how': 'teleportIn',
'zoneId': zoneId,
'countryClubId': self.countryClubId,
'hoodId': hoodId}
self.cr.playGame.getPlace().elevator.signalDone(doneStatus)
def setCountryClubInteriorZoneForce(self, zoneId):
place = self.cr.playGame.getPlace()
if place:
place.fsm.request('elevator', [self, 1])
hoodId = self.cr.playGame.hood.hoodId
countryClubId = self.countryClubId
if bboard.has('countryClubIdOverride'):
countryClubId = bboard.get('countryClubIdOverride')
doneStatus = {'loader': 'cogHQLoader',
'where': 'countryClubInterior',
'how': 'teleportIn',
'zoneId': zoneId,
'countryClubId': self.countryClubId,
'hoodId': hoodId}
if hasattr(place, 'elevator') and place.elevator:
place.elevator.signalDone(doneStatus)
else:
self.notify.warning("setMintInteriorZoneForce: Couldn't find playGame.getPlace().elevator, zoneId: %s" % zoneId)
else:
self.notify.warning("setCountryClubInteriorZoneForce: Couldn't find playGame.getPlace(), zoneId: %s" % zoneId)
def setCountryClubId(self, countryClubId):
self.countryClubId = countryClubId
def getZoneId(self):
return 0
def fillSlot(self, index, avId, wantBoardingShow = 0):
self.notify.debug('%s.fillSlot(%s, %s, ... %s)' % (self.doId,
index,
avId,
globalClock.getRealTime()))
request = self.toonRequests.get(index)
if request:
self.cr.relatedObjectMgr.abortRequest(request)
del self.toonRequests[index]
if avId == 0:
pass
elif not self.cr.doId2do.has_key(avId):
func = PythonUtil.Functor(self.gotToon, index, avId)
self.toonRequests[index] = self.cr.relatedObjectMgr.requestObjects([avId], allCallback=func)
elif not self.isSetup:
self.deferredSlots.append((index, avId, wantBoardingShow))
else:
if avId == base.localAvatar.getDoId():
place = base.cr.playGame.getPlace()
if not place:
return
elevator = self.getPlaceElevator()
if elevator == None:
place.fsm.request('elevator')
elevator = self.getPlaceElevator()
if not elevator:
return
self.localToonOnBoard = 1
if hasattr(localAvatar, 'boardingParty') and localAvatar.boardingParty:
localAvatar.boardingParty.forceCleanupInviteePanel()
localAvatar.boardingParty.forceCleanupInviterPanels()
if hasattr(base.localAvatar, 'elevatorNotifier'):
base.localAvatar.elevatorNotifier.cleanup()
cameraTrack = Sequence()
cameraTrack.append(Func(elevator.fsm.request, 'boarding', [self.getElevatorModel()]))
cameraTrack.append(Func(elevator.fsm.request, 'boarded'))
toon = self.cr.doId2do[avId]
toon.stopSmooth()
toon.wrtReparentTo(self.golfKart)
sitStartDuration = toon.getDuration('sit-start')
jumpTrack = self.generateToonJumpTrack(toon, index)
track = Sequence(jumpTrack, Func(toon.setAnimState, 'Sit', 1.0), Func(self.clearToonTrack, avId), name=toon.uniqueName('fillElevator'), autoPause=1)
if wantBoardingShow:
boardingTrack, boardingTrackType = self.getBoardingTrack(toon, index, True)
track = Sequence(boardingTrack, track)
if avId == base.localAvatar.getDoId():
cameraWaitTime = 2.5
if boardingTrackType == BoardingGroupShow.TRACK_TYPE_RUN:
cameraWaitTime = 0.5
cameraTrack = Sequence(Wait(cameraWaitTime), cameraTrack)
if self.canHideBoardingQuitBtn(avId):
track = Sequence(Func(localAvatar.boardingParty.groupPanel.disableQuitButton), track)
if avId == base.localAvatar.getDoId():
track = Parallel(cameraTrack, track)
track.delayDelete = DelayDelete.DelayDelete(toon, 'CogKart.fillSlot')
self.storeToonTrack(avId, track)
track.start()
self.fillSlotTrack = track
self.boardedAvIds[avId] = None
return
def generateToonJumpTrack(self, av, seatIndex):
av.pose('sit', 47)
hipOffset = av.getHipsParts()[2].getPos(av)
def getToonJumpTrack(av, seatIndex):
def getJumpDest(av = av, node = self.golfKart):
dest = Point3(0, 0, 0)
if hasattr(self, 'golfKart') and self.golfKart:
dest = Vec3(self.golfKart.getPos(av.getParent()))
seatNode = self.golfKart.find('**/seat' + str(seatIndex + 1))
dest += seatNode.getPos(self.golfKart)
dna = av.getStyle()
dest -= hipOffset
if seatIndex < 2:
dest.setY(dest.getY() + 2 * hipOffset.getY())
dest.setZ(dest.getZ() + 0.1)
else:
self.notify.warning('getJumpDestinvalid golfKart, returning (0,0,0)')
return dest
def getJumpHpr(av = av, node = self.golfKart):
hpr = Point3(0, 0, 0)
if hasattr(self, 'golfKart') and self.golfKart:
hpr = self.golfKart.getHpr(av.getParent())
if seatIndex < 2:
hpr.setX(hpr.getX() + 180)
else:
hpr.setX(hpr.getX())
angle = PythonUtil.fitDestAngle2Src(av.getH(), hpr.getX())
hpr.setX(angle)
else:
self.notify.warning('getJumpHpr invalid golfKart, returning (0,0,0)')
return hpr
toonJumpTrack = Parallel(ActorInterval(av, 'jump'), Sequence(Wait(0.43), Parallel(LerpHprInterval(av, hpr=getJumpHpr, duration=0.9), ProjectileInterval(av, endPos=getJumpDest, duration=0.9))))
return toonJumpTrack
def getToonSitTrack(av):
toonSitTrack = Sequence(ActorInterval(av, 'sit-start'), Func(av.loop, 'sit'))
return toonSitTrack
toonJumpTrack = getToonJumpTrack(av, seatIndex)
toonSitTrack = getToonSitTrack(av)
jumpTrack = Sequence(Parallel(toonJumpTrack, Sequence(Wait(1), toonSitTrack)))
return jumpTrack
def emptySlot(self, index, avId, bailFlag, timestamp, timeSent = 0):
if self.fillSlotTrack:
self.fillSlotTrack.finish()
self.fillSlotTrack = None
if avId == 0:
pass
elif not self.isSetup:
newSlots = []
for slot in self.deferredSlots:
if slot[0] != index:
newSlots.append(slot)
self.deferredSlots = newSlots
elif self.cr.doId2do.has_key(avId):
if bailFlag == 1 and hasattr(self, 'clockNode'):
if timestamp < self.countdownTime and timestamp >= 0:
self.countdown(self.countdownTime - timestamp)
else:
self.countdown(self.countdownTime)
toon = self.cr.doId2do[avId]
toon.stopSmooth()
sitStartDuration = toon.getDuration('sit-start')
jumpOutTrack = self.generateToonReverseJumpTrack(toon, index)
track = Sequence(jumpOutTrack, Func(self.notifyToonOffElevator, toon), Func(self.clearToonTrack, avId), name=toon.uniqueName('emptyElevator'), autoPause=1)
if self.canHideBoardingQuitBtn(avId):
track.append(Func(localAvatar.boardingParty.groupPanel.enableQuitButton))
track.append(Func(localAvatar.boardingParty.enableGoButton))
track.delayDelete = DelayDelete.DelayDelete(toon, 'CogKart.emptySlot')
self.storeToonTrack(toon.doId, track)
track.start()
if avId == base.localAvatar.getDoId():
messenger.send('exitElevator')
if avId in self.boardedAvIds:
del self.boardedAvIds[avId]
else:
self.notify.warning('toon: ' + str(avId) + " doesn't exist, and" + ' cannot exit the elevator!')
return
def generateToonReverseJumpTrack(self, av, seatIndex):
self.notify.debug('av.getH() = %s' % av.getH())
def getToonJumpTrack(av, destNode):
def getJumpDest(av = av, node = destNode):
dest = node.getPos(av.getParent())
dest += Vec3(*self.JumpOutOffsets[seatIndex])
return dest
def getJumpHpr(av = av, node = destNode):
hpr = node.getHpr(av.getParent())
hpr.setX(hpr.getX() + 180)
angle = PythonUtil.fitDestAngle2Src(av.getH(), hpr.getX())
hpr.setX(angle)
return hpr
toonJumpTrack = Parallel(ActorInterval(av, 'jump'), Sequence(Wait(0.1), Parallel(ProjectileInterval(av, endPos=getJumpDest, duration=0.9))))
return toonJumpTrack
toonJumpTrack = getToonJumpTrack(av, self.golfKart)
jumpTrack = Sequence(toonJumpTrack, Func(av.loop, 'neutral'), Func(av.wrtReparentTo, render))
return jumpTrack
def startCountdownClock(self, countdownTime, ts):
DistributedElevatorExt.DistributedElevatorExt.startCountdownClock(self, countdownTime, ts)
self.clock.setH(self.clock.getH() + 180)
def rejectBoard(self, avId, reason = 0):
print 'rejectBoard %s' % reason
if hasattr(base.localAvatar, 'elevatorNotifier'):
if reason == ElevatorConstants.REJECT_SHUFFLE:
base.localAvatar.elevatorNotifier.showMe(TTLocalizer.ElevatorHoppedOff)
elif reason == ElevatorConstants.REJECT_MINLAFF:
base.localAvatar.elevatorNotifier.showMe(TTLocalizer.KartMinLaff % self.minLaff)
elif reason == ElevatorConstants.REJECT_PROMOTION:
base.localAvatar.elevatorNotifier.showMe(TTLocalizer.BossElevatorRejectMessage)
elif reason == ElevatorConstants.REJECT_NOT_YET_AVAILABLE:
base.localAvatar.elevatorNotifier.showMe(TTLocalizer.NotYetAvailable)
doneStatus = {'where': 'reject'}
elevator = self.getPlaceElevator()
if elevator:
elevator.signalDone(doneStatus)
def getDestName(self):
if self.countryClubId == ToontownGlobals.BossbotCountryClubIntA:
return TTLocalizer.ElevatorBossBotCourse0
elif self.countryClubId == ToontownGlobals.BossbotCountryClubIntB:
return TTLocalizer.ElevatorBossBotCourse1
elif self.countryClubId == ToontownGlobals.BossbotCountryClubIntC:
return TTLocalizer.ElevatorBossBotCourse2
|
|
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Unittest runner for Nova.
To run all tests
python run_tests.py
To run a single test:
python run_tests.py test_compute:ComputeTestCase.test_run_terminate
To run a single test module:
python run_tests.py test_compute
or
python run_tests.py api.test_wsgi
"""
import gettext
import heapq
import logging
import os
import unittest
import sys
import time
gettext.install('nova', unicode=1)
from nose import config
from nose import core
from nose import result
from proboscis import case
from proboscis import SkipTest
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
raise
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
from win32console import GetStdHandle, STD_OUT_HANDLE, \
FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \
FOREGROUND_INTENSITY
red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
FOREGROUND_BLUE, FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold
}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
def get_elapsed_time_color(elapsed_time):
if elapsed_time > 1.0:
return 'yellow'
elif elapsed_time > 0.25:
return 'cyan'
else:
return 'green'
class NovaTestResult(case.TestResult):
def __init__(self, *args, **kw):
self.show_elapsed = kw.pop('show_elapsed')
self.known_bugs = kw.pop('known_bugs', {})
super(NovaTestResult, self).__init__(*args, **kw)
self.num_slow_tests = 5
self.slow_tests = [] # this is a fixed-sized heap
self._last_case = None
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
stdout = sys.stdout
sys.stdout = sys.__stdout__
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported():
self.colorizer = colorizer(self.stream)
break
sys.stdout = stdout
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise,
# _handleElapsedTime will fail, causing the wrong error message to
# be outputted.
self.start_time = time.time()
def _intercept_known_bugs(self, test, err):
name = str(test)
excuse = self.known_bugs.get(name, None)
if excuse:
tracker_id, error_string = excuse
if error_string in str(err[1]):
skip = SkipTest("KNOWN BUG: %s\n%s"
% (tracker_id, str(err[1])))
self.onError(test)
super(NovaTestResult, self).addSkip(test, skip)
else:
result = (RuntimeError, RuntimeError(
'Test "%s" contains known bug %s.\n'
'Expected the following error string:\n%s\n'
'What was seen was the following:\n%s\n'
'If the bug is no longer happening, please change '
'the test config.'
% (name, tracker_id, error_string, str(err))), None)
self.onError(test)
super(NovaTestResult, self).addError(test, result)
return True
return False
def getDescription(self, test):
return str(test)
def _handleElapsedTime(self, test):
self.elapsed_time = time.time() - self.start_time
item = (self.elapsed_time, test)
# Record only the n-slowest tests using heap
if len(self.slow_tests) >= self.num_slow_tests:
heapq.heappushpop(self.slow_tests, item)
else:
heapq.heappush(self.slow_tests, item)
def _writeElapsedTime(self, test):
color = get_elapsed_time_color(self.elapsed_time)
self.colorizer.write(" %.2f" % self.elapsed_time, color)
def _writeResult(self, test, long_result, color, short_result, success):
if self.showAll:
self.colorizer.write(long_result, color)
if self.show_elapsed and success:
self._writeElapsedTime(test)
self.stream.writeln()
elif self.dots:
self.stream.write(short_result)
self.stream.flush()
# NOTE(vish): copied from unittest with edit to add color
def addSuccess(self, test):
if self._intercept_known_bugs(test, None):
return
unittest.TestResult.addSuccess(self, test)
self._handleElapsedTime(test)
self._writeResult(test, 'OK', 'green', '.', True)
# NOTE(vish): copied from unittest with edit to add color
def addFailure(self, test, err):
if self._intercept_known_bugs(test, err):
return
self.onError(test)
unittest.TestResult.addFailure(self, test, err)
self._handleElapsedTime(test)
self._writeResult(test, 'FAIL', 'red', 'F', False)
# NOTE(vish): copied from nose with edit to add color
def addError(self, test, err):
"""Overrides normal addError to add support for
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
if self._intercept_known_bugs(test, err):
return
self.onError(test)
self._handleElapsedTime(test)
stream = getattr(self, 'stream', None)
ec, ev, tb = err
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3 compat
exc_info = self._exc_info_to_string(err)
for cls, (storage, label, isfail) in self.errorClasses.items():
if result.isclass(ec) and issubclass(ec, cls):
if isfail:
test.passed = False
storage.append((test, exc_info))
# Might get patched into a streamless result
if stream is not None:
if self.showAll:
message = [label]
detail = result._exception_detail(err[1])
if detail:
message.append(detail)
stream.writeln(": ".join(message))
elif self.dots:
stream.write(label[:1])
return
self.errors.append((test, exc_info))
test.passed = False
if stream is not None:
self._writeResult(test, 'ERROR', 'red', 'E', False)
@staticmethod
def get_doc(cls_or_func):
"""Grabs the doc abbreviated doc string."""
try:
return cls_or_func.__doc__.split("\n")[0].strip()
except (AttributeError, IndexError):
return None
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.start_time = time.time()
test_name = None
try:
entry = test.test.__proboscis_case__.entry
if entry.method:
current_class = entry.method.im_class
test_name = self.get_doc(entry.home) or entry.home.__name__
else:
current_class = entry.home
except AttributeError:
current_class = test.test.__class__
if self.showAll:
if current_class.__name__ != self._last_case:
self.stream.writeln(current_class.__name__)
self._last_case = current_class.__name__
try:
doc = self.get_doc(current_class)
except (AttributeError, IndexError):
doc = None
if doc:
self.stream.writeln(' ' + doc)
if not test_name:
if hasattr(test.test, 'shortDescription'):
test_name = test.test.shortDescription()
if not test_name:
test_name = test.test._testMethodName
self.stream.write('\t%s' % str(test_name).ljust(60))
self.stream.flush()
class NovaTestRunner(core.TextTestRunner):
def __init__(self, *args, **kwargs):
self.show_elapsed = kwargs.pop('show_elapsed')
self.known_bugs = kwargs.pop('known_bugs', {})
self.__result = None
self.__finished = False
self.__start_time = None
super(NovaTestRunner, self).__init__(*args, **kwargs)
def _makeResult(self):
self.__result = NovaTestResult(
self.stream,
self.descriptions,
self.verbosity,
self.config,
show_elapsed=self.show_elapsed,
known_bugs=self.known_bugs)
self.__start_time = time.time()
return self.__result
def _writeSlowTests(self, result_):
# Pare out 'fast' tests
slow_tests = [item for item in result_.slow_tests
if get_elapsed_time_color(item[0]) != 'green']
if slow_tests:
slow_total_time = sum(item[0] for item in slow_tests)
self.stream.writeln("Slowest %i tests took %.2f secs:"
% (len(slow_tests), slow_total_time))
for elapsed_time, test in sorted(slow_tests, reverse=True):
time_str = "%.2f" % elapsed_time
self.stream.writeln(" %s %s" % (time_str.ljust(10), test))
def on_exit(self):
if self.__result is None:
print("Exiting before tests even started.")
else:
if not self.__finished:
msg = "Tests aborted, trying to print available results..."
print(msg)
stop_time = time.time()
self.__result.printErrors()
self.__result.printSummary(self.__start_time, stop_time)
self.config.plugins.finalize(self.__result)
if self.show_elapsed:
self._writeSlowTests(self.__result)
def run(self, test):
result_ = super(NovaTestRunner, self).run(test)
if self.show_elapsed:
self._writeSlowTests(result_)
self.__finished = True
return result_
if __name__ == '__main__':
logging.setup()
# If any argument looks like a test name but doesn't have "nova.tests" in
# front of it, automatically add that so we don't have to type as much
show_elapsed = True
argv = []
test_fixture = os.getenv("UNITTEST_FIXTURE", "trove")
for x in sys.argv:
if x.startswith('test_'):
argv.append('%s.tests.%s' % (test_fixture, x))
elif x.startswith('--hide-elapsed'):
show_elapsed = False
else:
argv.append(x)
testdir = os.path.abspath(os.path.join(test_fixture, "tests"))
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
workingDir=testdir,
plugins=core.DefaultPluginManager())
runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c,
show_elapsed=show_elapsed)
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
|
|
# -*- coding: utf-8 -*-
from django.apps import apps
from django.contrib.postgres import fields
from django.contrib.auth.models import Group
from typedmodels.models import TypedModel
from api.taxonomies.utils import optimize_subject_query
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from dirtyfields import DirtyFieldsMixin
from api.providers.permissions import GroupHelper, PERMISSIONS, GROUP_FORMAT, GROUPS
from framework import sentry
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.licenses import NodeLicense
from osf.models.mixins import ReviewProviderMixin
from osf.models.subject import Subject
from osf.models.notifications import NotificationSubscription
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import EncryptedTextField
from website import settings
from website.util import api_v2_url
class AbstractProvider(TypedModel, ObjectIDMixin, ReviewProviderMixin, DirtyFieldsMixin, BaseModel):
name = models.CharField(null=False, max_length=128) # max length on prod: 22
advisory_board = models.TextField(default='', blank=True)
description = models.TextField(default='', blank=True)
domain = models.URLField(blank=True, default='', max_length=200)
domain_redirect_enabled = models.BooleanField(default=False)
external_url = models.URLField(null=True, blank=True, max_length=200) # max length on prod: 25
email_contact = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 23
email_support = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 23
social_twitter = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
social_facebook = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
social_instagram = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
footer_links = models.TextField(default='', blank=True)
facebook_app_id = models.BigIntegerField(blank=True, null=True)
example = models.CharField(null=True, blank=True, max_length=20) # max length on prod: 5
licenses_acceptable = models.ManyToManyField(NodeLicense, blank=True, related_name='licenses_acceptable')
default_license = models.ForeignKey(NodeLicense, related_name='default_license',
null=True, blank=True, on_delete=models.CASCADE)
allow_submissions = models.BooleanField(default=True)
allow_commenting = models.BooleanField(default=False)
def __repr__(self):
return ('(name={self.name!r}, default_license={self.default_license!r}, '
'allow_submissions={self.allow_submissions!r}) with id {self.id!r}').format(self=self)
def __unicode__(self):
return '[{}] {} - {}'.format(self.readable_type, self.name, self.id)
@property
def all_subjects(self):
return self.subjects.all()
@property
def has_highlighted_subjects(self):
return self.subjects.filter(highlighted=True).exists()
@property
def highlighted_subjects(self):
if self.has_highlighted_subjects:
return self.subjects.filter(highlighted=True).order_by('text')[:10]
else:
return sorted(self.top_level_subjects, key=lambda s: s.text)[:10]
@property
def top_level_subjects(self):
if self.subjects.exists():
return optimize_subject_query(self.subjects.filter(parent__isnull=True))
return optimize_subject_query(Subject.objects.filter(parent__isnull=True, provider___id='osf'))
@property
def readable_type(self):
raise NotImplementedError
class CollectionProvider(AbstractProvider):
primary_collection = models.ForeignKey('Collection', related_name='+',
null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
permissions = (
# custom permissions for use in the OSF Admin App
('view_collectionprovider', 'Can view collection provider details'),
)
@property
def readable_type(self):
return 'collection'
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def absolute_api_v2_url(self):
path = '/providers/collections/{}/'.format(self._id)
return api_v2_url(path)
class PreprintProvider(AbstractProvider):
PUSH_SHARE_TYPE_CHOICES = (('Preprint', 'Preprint'),
('Thesis', 'Thesis'),)
PUSH_SHARE_TYPE_HELP = 'This SHARE type will be used when pushing publications to SHARE'
REVIEWABLE_RELATION_NAME = 'preprint_services'
share_publish_type = models.CharField(choices=PUSH_SHARE_TYPE_CHOICES,
default='Preprint',
help_text=PUSH_SHARE_TYPE_HELP,
max_length=32)
share_source = models.CharField(blank=True, max_length=200)
share_title = models.TextField(default='', blank=True)
additional_providers = fields.ArrayField(models.CharField(max_length=200), default=list, blank=True)
access_token = EncryptedTextField(null=True, blank=True)
doi_prefix = models.CharField(blank=True, max_length=32)
PREPRINT_WORD_CHOICES = (
('preprint', 'Preprint'),
('paper', 'Paper'),
('thesis', 'Thesis'),
('work', 'Work'),
('none', 'None')
)
preprint_word = models.CharField(max_length=10, choices=PREPRINT_WORD_CHOICES, default='preprint')
subjects_acceptable = DateTimeAwareJSONField(blank=True, default=list)
class Meta:
permissions = tuple(PERMISSIONS.items()) + (
# custom permissions for use in the OSF Admin App
('view_preprintprovider', 'Can view preprint provider details'),
)
@property
def readable_type(self):
return 'preprint'
@property
def all_subjects(self):
if self.subjects.exists():
return self.subjects.all()
else:
# TODO: Delet this when all PreprintProviders have a mapping
return rules_to_subjects(self.subjects_acceptable)
@property
def has_highlighted_subjects(self):
return self.subjects.filter(highlighted=True).exists()
@property
def highlighted_subjects(self):
if self.has_highlighted_subjects:
return self.subjects.filter(highlighted=True).order_by('text')[:10]
else:
return sorted(self.top_level_subjects, key=lambda s: s.text)[:10]
@property
def top_level_subjects(self):
if self.subjects.exists():
return optimize_subject_query(self.subjects.filter(parent__isnull=True))
else:
# TODO: Delet this when all PreprintProviders have a mapping
if len(self.subjects_acceptable) == 0:
return optimize_subject_query(Subject.objects.filter(parent__isnull=True, provider___id='osf'))
tops = set([sub[0][0] for sub in self.subjects_acceptable])
return [Subject.load(sub) for sub in tops]
@property
def landing_url(self):
return self.domain if self.domain else '{}preprints/{}'.format(settings.DOMAIN, self._id)
def get_absolute_url(self):
return '{}preprint_providers/{}'.format(self.absolute_api_v2_url, self._id)
@property
def absolute_api_v2_url(self):
path = '/providers/preprints/{}/'.format(self._id)
return api_v2_url(path)
def save(self, *args, **kwargs):
dirty_fields = self.get_dirty_fields()
old_id = dirty_fields.get('_id', None)
if old_id:
for permission_type in GROUPS.keys():
Group.objects.filter(
name=GROUP_FORMAT.format(provider_id=old_id, group=permission_type)
).update(
name=GROUP_FORMAT.format(provider_id=self._id, group=permission_type)
)
return super(PreprintProvider, self).save(*args, **kwargs)
def rules_to_subjects(rules):
if not rules:
return Subject.objects.filter(provider___id='osf')
q = []
for rule in rules:
parent_from_rule = Subject.load(rule[0][-1])
if rule[1]:
q.append(models.Q(parent=parent_from_rule))
if len(rule[0]) == 1:
potential_parents = Subject.objects.filter(parent=parent_from_rule)
for parent in potential_parents:
q.append(models.Q(parent=parent))
for sub in rule[0]:
q.append(models.Q(_id=sub))
return Subject.objects.filter(reduce(lambda x, y: x | y, q)) if len(q) > 1 else (Subject.objects.filter(q[0]) if len(q) else Subject.objects.all())
@receiver(post_save, sender=PreprintProvider)
def create_provider_auth_groups(sender, instance, created, **kwargs):
if created:
GroupHelper(instance).update_provider_auth_groups()
@receiver(post_save, sender=PreprintProvider)
def create_provider_notification_subscriptions(sender, instance, created, **kwargs):
if created:
NotificationSubscription.objects.get_or_create(
_id='{provider_id}_new_pending_submissions'.format(provider_id=instance._id),
event_name='new_pending_submissions',
provider=instance
)
@receiver(post_save, sender=CollectionProvider)
def create_primary_collection_for_provider(sender, instance, created, **kwargs):
if created:
Collection = apps.get_model('osf.Collection')
user = getattr(instance, '_creator', None) # Temp attr set in admin view
if user:
c = Collection(
title='{}\'s Collection'.format(instance.name),
creator=user,
provider=instance,
is_promoted=True,
is_public=True
)
c.save()
instance.primary_collection = c
instance.save()
else:
# A user is required for Collections / Groups
sentry.log_message('Unable to create primary_collection for CollectionProvider {}'.format(instance.name))
class WhitelistedSHAREPreprintProvider(BaseModel):
id = models.AutoField(primary_key=True)
provider_name = models.CharField(unique=True, max_length=200)
def __unicode__(self):
return self.provider_name
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_software_update
short_description: Manage the software update settings of a BIG-IP.
description:
- Manage the software update settings of a BIG-IP.
version_added: "2.4"
options:
auto_check:
description:
- Specifies whether to automatically check for updates on the F5
Networks downloads server.
required: False
default: None
choices:
- yes
- no
frequency:
description:
- Specifies the schedule for the automatic update check.
required: False
default: None
choices:
- daily
- monthly
- weekly
notes:
- Requires the f5-sdk Python package on the host This is as easy as pip
install f5-sdk
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.2.3
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'autoCheck': 'auto_check'
}
updatables = [
'auto_check', 'frequency'
]
returnables = [
'auto_check', 'frequency'
]
@property
def auto_check(self):
if self._values['auto_check'] is None:
return None
elif self._values['auto_check'] in [True, 'enabled']:
return 'enabled'
else:
return 'disabled'
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.network == 'default':
result['network'] = None
elif self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.sys.software.update.load()
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.sys.software.update.load()
result = resource.attrs
return Parameters(result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
auto_check=dict(
required=False,
default=None,
choices=BOOLEANS,
type='bool'
),
frequency=dict(
required=False,
default=None,
choices=['daily', 'monthly', 'weekly']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import
import os.path
import random
import pytz
import six
from uuid import uuid4
from datetime import datetime, timedelta
from django.utils import timezone
from sentry.constants import DATA_ROOT, INTEGRATION_ID_TO_PLATFORM_DATA
from sentry.event_manager import EventManager
from sentry.interfaces.user import User as UserInterface
from sentry.utils import json
from sentry.utils.dates import to_timestamp
from sentry.utils.canonical import CanonicalKeyDict
epoch = datetime.utcfromtimestamp(0)
def milliseconds_ago(now, milliseconds):
ago = now - timedelta(milliseconds=milliseconds)
return (ago - epoch).total_seconds()
def random_ip():
not_valid = [10, 127, 169, 172, 192]
first = random.randrange(1, 256)
while first in not_valid:
first = random.randrange(1, 256)
return ".".join(
(
six.text_type(first),
six.text_type(random.randrange(1, 256)),
six.text_type(random.randrange(1, 256)),
six.text_type(random.randrange(1, 256)),
)
)
def random_geo():
return random.choice(
[
{"country_code": "US", "region": "CA", "city": "San Francisco"},
{"country_code": "AU", "region": "VIC", "city": "Melbourne"},
{"country_code": "GB", "region": "H9", "city": "London"},
]
)
def random_username():
return random.choice(
[
"jess",
"david",
"chris",
"eric",
"katie",
"ben",
"armin",
"saloni",
"max",
"meredith",
"matt",
"sentry",
]
)
def name_for_username(username):
return {
"ben": "Ben Vinegar",
"chris": "Chris Jennings",
"david": "David Cramer",
"matt": "Matt Robenolt",
"jess": "Jess MacQueen",
"katie": "Katie Lundsgaard",
"saloni": "Saloni Dudziak",
"max": "Max Bittker",
"meredith": "Meredith Heller",
"eric": "Eric Feng",
"armin": "Armin Ronacher",
}.get(username, username.replace("_", " ").title())
def generate_user(username=None, email=None, ip_address=None, id=None):
if username is None and email is None:
username = random_username()
email = u"{}@example.com".format(username)
return UserInterface.to_python(
{
"id": id,
"username": username,
"email": email,
"ip_address": ip_address or random_ip(),
"name": name_for_username(username),
"geo": random_geo(),
}
).to_json()
def load_data(
platform,
default=None,
sample_name=None,
timestamp=None,
start_timestamp=None,
trace=None,
span=None,
):
# NOTE: Before editing this data, make sure you understand the context
# in which its being used. It is NOT only used for local development and
# has production consequences.
# * bin/load-mocks to generate fake data for local testing
# * When a new project is created, a fake event is generated as a "starter"
# event so it's not an empty project.
# * When a user clicks Test Configuration from notification plugin settings page,
# a fake event is generated to go through the pipeline.
data = None
language = None
platform_data = INTEGRATION_ID_TO_PLATFORM_DATA.get(platform)
if platform_data is not None and platform_data["type"] != "language":
language = platform_data["language"]
samples_root = os.path.join(DATA_ROOT, "samples")
all_samples = set(f for f in os.listdir(samples_root) if f.endswith(".json"))
for platform in (platform, language, default):
if not platform:
continue
# Verify by checking if the file is within our folder explicitly
# avoids being able to have a name that invokes traversing directories.
json_path = u"{}.json".format(platform)
if json_path not in all_samples:
continue
if not sample_name:
try:
sample_name = INTEGRATION_ID_TO_PLATFORM_DATA[platform]["name"]
except KeyError:
pass
# XXX: At this point, it's assumed that `json_path` was safely found
# within `samples_root` due to the check above and cannot traverse
# into paths.
with open(os.path.join(samples_root, json_path)) as fp:
data = json.load(fp)
break
if data is None:
return
data = CanonicalKeyDict(data)
if platform in ("csp", "hkpk", "expectct", "expectstaple"):
return data
# Generate a timestamp in the present.
if timestamp is None:
timestamp = timezone.now()
else:
timestamp = timestamp.replace(tzinfo=pytz.utc)
data.setdefault("timestamp", to_timestamp(timestamp))
if data.get("type") == "transaction":
if start_timestamp is None:
start_timestamp = timestamp - timedelta(seconds=3)
else:
start_timestamp = start_timestamp.replace(tzinfo=pytz.utc)
data["start_timestamp"] = to_timestamp(start_timestamp)
if trace is None:
trace = uuid4().hex
if span is None:
span = uuid4().hex[:16]
for tag in data["tags"]:
if tag[0] == "trace":
tag[1] = trace
elif tag[0] == "trace.ctx":
tag[1] = trace + "-" + span
elif tag[0] == "trace.span":
tag[1] = span
data["contexts"]["trace"]["trace_id"] = trace
data["contexts"]["trace"]["span_id"] = span
for span in data.get("spans", []):
# Use data to generate span timestamps consistently and based
# on event timestamp
duration = span.get("data", {}).get("duration", 10.0)
offset = span.get("data", {}).get("offset", 0)
span_start = data["start_timestamp"] + offset
span["trace_id"] = trace
span.setdefault("start_timestamp", span_start)
span.setdefault("timestamp", span_start + duration)
measurements = data.get("measurements")
if measurements:
measurement_markers = {}
for key, entry in measurements.items():
if key in ["fp", "fcp", "lcp", "fid"]:
measurement_markers["mark.{}".format(key)] = {
"value": round(data["start_timestamp"] + entry["value"] / 1000, 3)
}
measurements.update(measurement_markers)
data["platform"] = platform
# XXX: Message is a legacy alias for logentry. Do not overwrite if set.
if "message" not in data:
data["message"] = "This is an example %s exception" % (sample_name or platform,)
data.setdefault(
"user",
generate_user(ip_address="127.0.0.1", username="sentry", id=1, email="[email protected]"),
)
data.setdefault(
"extra",
{
"session": {"foo": "bar"},
"results": [1, 2, 3, 4, 5],
"emptyList": [],
"emptyMap": {},
"length": 10837790,
"unauthorized": False,
"url": "http://example.org/foo/bar/",
},
)
data.setdefault("modules", {"my.package": "1.0.0"})
data.setdefault(
"request",
{
"cookies": "foo=bar;biz=baz",
"url": "http://example.com/foo",
"headers": {
"Referer": "http://example.com",
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36",
},
"env": {"ENV": "prod"},
"query_string": "foo=bar",
"data": '{"hello": "world"}',
"method": "GET",
},
)
return data
def create_sample_event(
project,
platform=None,
default=None,
raw=True,
sample_name=None,
timestamp=None,
start_timestamp=None,
trace=None,
**kwargs
):
if not platform and not default:
return
data = load_data(platform, default, sample_name, timestamp, start_timestamp, trace)
if not data:
return
data.update(kwargs)
manager = EventManager(data)
manager.normalize()
return manager.save(project.id, raw=raw)
|
|
import os
from tempfile import mkdtemp
from contextlib import contextmanager
from fabric.operations import put
from fabric.api import env, local, sudo, run, cd, prefix, task, settings
env.roledefs = {
'staging_prep': ['[email protected]']
}
STAGING_HOST = 'staging.centralfitestoque.com'
CHEF_VERSION = '10.20.0'
env.root_dir = '/opt/centralfitestoque/apps/centralfitestoque'
env.venvs = '/opt/centralfitestoque/venvs'
env.virtualenv = '%s/centralfitestoque' % env.venvs
env.activate = 'source %s/bin/activate ' % env.virtualenv
env.code_dir = '%s/src' % env.root_dir
env.media_dir = '%s/media' % env.root_dir
@contextmanager
def _virtualenv():
with prefix(env.activate):
yield
def _manage_py(command):
run('python manage.py %s --settings=centralfitestoque.settings_server'
% command)
@task
def install_chef(latest=True):
"""
Install chef-solo on the server
"""
sudo('apt-get update', pty=True)
sudo('apt-get install -y git-core rubygems ruby ruby-dev', pty=True)
if latest:
sudo('gem install chef --no-ri --no-rdoc', pty=True)
else:
sudo('gem install chef --no-ri --no-rdoc --version {0}'.format(CHEF_VERSION), pty=True)
sudo('gem uninstall --no-all --no-executables --no-ignore-dependencies json')
sudo('gem install json --version 1.7.6')
def parse_ssh_config(text):
"""
Parse an ssh-config output into a Python dict.
Because Windows doesn't have grep, lol.
"""
try:
lines = text.split('\n')
lists = [l.split(' ') for l in lines]
lists = [filter(None, l) for l in lists]
tuples = [(l[0], ''.join(l[1:]).strip().strip('\r')) for l in lists]
return dict(tuples)
except IndexError:
raise Exception("Malformed input")
def set_env_for_user(user='centralfitestoque'):
if user == 'vagrant':
# set ssh key file for vagrant
result = local('vagrant ssh-config', capture=True)
data = parse_ssh_config(result)
try:
env.host_string = '[email protected]:%s' % data['Port']
env.key_filename = data['IdentityFile'].strip('"')
except KeyError:
raise Exception("Missing data from ssh-config")
@task
def up():
"""
Provision with Chef 11 instead of the default.
1. Bring up VM without provisioning
2. Remove all Chef and Moneta
3. Install latest Chef
4. Reload VM to recreate shared folders
5. Provision
"""
local('vagrant up --no-provision')
set_env_for_user('vagrant')
sudo('gem uninstall --no-all --no-executables --no-ignore-dependencies chef moneta')
install_chef(latest=False)
local('vagrant reload')
local('vagrant provision')
@task
def bootstrap():
set_env_for_user('vagrant')
# Bootstrap
run('test -e %s || ln -s /vagrant/src %s' % (env.code_dir, env.code_dir))
with cd(env.code_dir):
with _virtualenv():
run('pip install -r requirements.txt')
_manage_py('syncdb --noinput')
_manage_py('migrate')
_manage_py('createsuperuser')
@task
def push():
"""
Update application code on the server
"""
with settings(warn_only=True):
remote_result = local('git remote | grep %s' % env.remote)
if not remote_result.succeeded:
local('git remote add %s ssh://%s@%s:%s/opt/centralfitestoque/apps/centralfitestoque' %
(env.remote, env.user, env.host, env.port,))
result = local("git push %s %s" % (env.remote, env.branch))
# if push didn't work, the repository probably doesn't exist
# 1. create an empty repo
# 2. push to it with -u
# 3. retry
# 4. profit
if not result.succeeded:
# result2 = run("ls %s" % env.code_dir)
# if not result2.succeeded:
# run('mkdir %s' % env.code_dir)
with cd(env.root_dir):
run("git init")
run("git config --bool receive.denyCurrentBranch false")
local("git push %s -u %s" % (env.remote, env.branch))
with cd(env.root_dir):
# Really, git? Really?
run('git reset HEAD')
run('git checkout .')
run('git checkout %s' % env.branch)
sudo('chown -R www-data:deploy *')
sudo('chown -R www-data:deploy /opt/centralfitestoque/venvs')
sudo('chmod -R 0770 *')
@task
def deploy():
set_env_for_user(env.user)
push()
sudo('chmod -R 0770 %s' % env.virtualenv)
with cd(env.code_dir):
with _virtualenv():
run('pip install -r requirements.txt')
run('python manage.py collectstatic --clear --noinput --settings=centralfitestoque.settings_server')
run('python manage.py syncdb --noinput --settings=centralfitestoque.settings_server')
run('python manage.py migrate --settings=centralfitestoque.settings_server')
restart()
@task
def restart():
"""
Reload nginx/gunicorn
"""
with settings(warn_only=True):
sudo('supervisorctl restart app')
sudo('/etc/init.d/nginx reload')
@task
def vagrant(username):
# set ssh key file for vagrant
result = local('vagrant ssh-config', capture=True)
data = parse_ssh_config(result)
env.remote = 'vagrant'
env.branch = 'master'
env.host = '127.0.0.1'
env.port = data['Port']
try:
env.host_string = '%[email protected]:%s' % (username, data['Port'])
except KeyError:
raise Exception("Missing data from ssh-config")
@task
def staging(username):
env.remote = 'staging'
env.branch = 'master'
env.host = STAGING_HOST
env.port = 22
env.host_string = '%s@%s:%s' % (username, env.host, env.port)
def upload_project_sudo(local_dir=None, remote_dir=""):
"""
Copied from Fabric and updated to use sudo.
"""
local_dir = local_dir or os.getcwd()
# Remove final '/' in local_dir so that basename() works
local_dir = local_dir.rstrip(os.sep)
local_path, local_name = os.path.split(local_dir)
tar_file = "%s.tar.gz" % local_name
target_tar = os.path.join(remote_dir, tar_file)
tmp_folder = mkdtemp()
try:
tar_path = os.path.join(tmp_folder, tar_file)
local("tar -czf %s -C %s %s" % (tar_path, local_path, local_name))
put(tar_path, target_tar, use_sudo=True)
with cd(remote_dir):
try:
sudo("tar -xzf %s" % tar_file)
finally:
sudo("rm -f %s" % tar_file)
finally:
local("rm -rf %s" % tmp_folder)
@task
def sync_config():
sudo('mkdir -p /etc/chef')
upload_project_sudo(local_dir='./cookbooks', remote_dir='/etc/chef')
upload_project_sudo(local_dir='./roles/', remote_dir='/etc/chef')
@task
def provision():
"""
Run chef-solo
"""
sync_config()
node_name = "node_%s.json" % (env.roles[0].split('_')[0])
with cd('/etc/chef/cookbooks'):
sudo('chef-solo -c /etc/chef/cookbooks/solo.rb -j /etc/chef/cookbooks/%s' % node_name, pty=True)
@task
def prepare():
install_chef(latest=False)
provision()
|
|
"""Implements a Nvim host for python plugins."""
import imp
import inspect
import logging
import os
import os.path
import re
from functools import partial
from traceback import format_exc
from . import script_host
from ..api import decode_if_bytes, walk
from ..compat import IS_PYTHON3, find_module
from ..msgpack_rpc import ErrorResponse
from ..util import format_exc_skip
__all__ = ('Host')
logger = logging.getLogger(__name__)
error, debug, info, warn = (logger.error, logger.debug, logger.info,
logger.warning,)
class Host(object):
"""Nvim host for python plugins.
Takes care of loading/unloading plugins and routing msgpack-rpc
requests/notifications to the appropriate handlers.
"""
def __init__(self, nvim):
"""Set handlers for plugin_load/plugin_unload."""
self.nvim = nvim
self._specs = {}
self._loaded = {}
self._load_errors = {}
self._notification_handlers = {}
self._request_handlers = {
'poll': lambda: 'ok',
'specs': self._on_specs_request,
'shutdown': self.shutdown
}
# Decode per default for Python3
self._decode_default = IS_PYTHON3
def _on_async_err(self, msg):
self.nvim.err_write(msg, async_=True)
def start(self, plugins):
"""Start listening for msgpack-rpc requests and notifications."""
self.nvim.run_loop(self._on_request,
self._on_notification,
lambda: self._load(plugins),
err_cb=self._on_async_err)
def shutdown(self):
"""Shutdown the host."""
self._unload()
self.nvim.stop_loop()
def _wrap_function(self, fn, sync, decode, nvim_bind, name, *args):
if decode:
args = walk(decode_if_bytes, args, decode)
if nvim_bind is not None:
args.insert(0, nvim_bind)
try:
return fn(*args)
except Exception:
if sync:
msg = ("error caught in request handler '{} {}':\n{}"
.format(name, args, format_exc_skip(1)))
raise ErrorResponse(msg)
else:
msg = ("error caught in async handler '{} {}'\n{}\n"
.format(name, args, format_exc_skip(1)))
self._on_async_err(msg + "\n")
def _on_request(self, name, args):
"""Handle a msgpack-rpc request."""
if IS_PYTHON3:
name = decode_if_bytes(name)
handler = self._request_handlers.get(name, None)
if not handler:
msg = self._missing_handler_error(name, 'request')
error(msg)
raise ErrorResponse(msg)
debug('calling request handler for "%s", args: "%s"', name, args)
rv = handler(*args)
debug("request handler for '%s %s' returns: %s", name, args, rv)
return rv
def _on_notification(self, name, args):
"""Handle a msgpack-rpc notification."""
if IS_PYTHON3:
name = decode_if_bytes(name)
handler = self._notification_handlers.get(name, None)
if not handler:
msg = self._missing_handler_error(name, 'notification')
error(msg)
self._on_async_err(msg + "\n")
return
debug('calling notification handler for "%s", args: "%s"', name, args)
handler(*args)
def _missing_handler_error(self, name, kind):
msg = 'no {} handler registered for "{}"'.format(kind, name)
pathmatch = re.match(r'(.+):[^:]+:[^:]+', name)
if pathmatch:
loader_error = self._load_errors.get(pathmatch.group(1))
if loader_error is not None:
msg = msg + "\n" + loader_error
return msg
def _load(self, plugins):
for path in plugins:
err = None
if path in self._loaded:
error('{} is already loaded'.format(path))
continue
try:
if path == "script_host.py":
module = script_host
else:
directory, name = os.path.split(os.path.splitext(path)[0])
file, pathname, descr = find_module(name, [directory])
module = imp.load_module(name, file, pathname, descr)
handlers = []
self._discover_classes(module, handlers, path)
self._discover_functions(module, handlers, path)
if not handlers:
error('{} exports no handlers'.format(path))
continue
self._loaded[path] = {'handlers': handlers, 'module': module}
except Exception as e:
err = ('Encountered {} loading plugin at {}: {}\n{}'
.format(type(e).__name__, path, e, format_exc(5)))
error(err)
self._load_errors[path] = err
def _unload(self):
for path, plugin in self._loaded.items():
handlers = plugin['handlers']
for handler in handlers:
method_name = handler._nvim_rpc_method_name
if hasattr(handler, '_nvim_shutdown_hook'):
handler()
elif handler._nvim_rpc_sync:
del self._request_handlers[method_name]
else:
del self._notification_handlers[method_name]
self._specs = {}
self._loaded = {}
def _discover_classes(self, module, handlers, plugin_path):
for _, cls in inspect.getmembers(module, inspect.isclass):
if getattr(cls, '_nvim_plugin', False):
# create an instance of the plugin and pass the nvim object
plugin = cls(self._configure_nvim_for(cls))
# discover handlers in the plugin instance
self._discover_functions(plugin, handlers, plugin_path)
def _discover_functions(self, obj, handlers, plugin_path):
def predicate(o):
return hasattr(o, '_nvim_rpc_method_name')
specs = []
objdecode = getattr(obj, '_nvim_decode', self._decode_default)
for _, fn in inspect.getmembers(obj, predicate):
sync = fn._nvim_rpc_sync
decode = getattr(fn, '_nvim_decode', objdecode)
nvim_bind = None
if fn._nvim_bind:
nvim_bind = self._configure_nvim_for(fn)
method = fn._nvim_rpc_method_name
if fn._nvim_prefix_plugin_path:
method = '{}:{}'.format(plugin_path, method)
fn_wrapped = partial(self._wrap_function, fn,
sync, decode, nvim_bind, method)
self._copy_attributes(fn, fn_wrapped)
# register in the rpc handler dict
if sync:
if method in self._request_handlers:
raise Exception(('Request handler for "{}" is ' +
'already registered').format(method))
self._request_handlers[method] = fn_wrapped
else:
if method in self._notification_handlers:
raise Exception(('Notification handler for "{}" is ' +
'already registered').format(method))
self._notification_handlers[method] = fn_wrapped
if hasattr(fn, '_nvim_rpc_spec'):
specs.append(fn._nvim_rpc_spec)
handlers.append(fn_wrapped)
if specs:
self._specs[plugin_path] = specs
def _copy_attributes(self, fn, fn2):
# Copy _nvim_* attributes from the original function
for attr in dir(fn):
if attr.startswith('_nvim_'):
setattr(fn2, attr, getattr(fn, attr))
def _on_specs_request(self, path):
if IS_PYTHON3:
path = decode_if_bytes(path)
if path in self._load_errors:
self.nvim.out_write(self._load_errors[path] + '\n')
return self._specs.get(path, 0)
def _configure_nvim_for(self, obj):
# Configure a nvim instance for obj (checks encoding configuration)
nvim = self.nvim
decode = getattr(obj, '_nvim_decode', self._decode_default)
if decode:
nvim = nvim.with_decode(decode)
return nvim
|
|
from basic import Basic, S
from operations import AssocOp
from cache import cacheit
from logic import fuzzy_not
from numbers import Integer, Rational
from symbol import Symbol, Wild
from sympy.utilities.iterables import make_list
# internal marker to indicate:
# "there are still non-commutative objects -- don't forget to process them"
class NC_Marker:
is_Order = False
is_Mul = False
is_Number = False
is_Poly = False
is_commutative = False
class Mul(AssocOp):
__slots__ = []
is_Mul = True
@classmethod
def flatten(cls, seq):
# apply associativity, separate commutative part of seq
c_part = [] # out: commutative factors
nc_part = [] # out: non-commutative factors
nc_seq = []
coeff = S.One # standalone term
# e.g. 3 * ...
c_powers = [] # (base,exp) n
# e.g. (x,n) for x
num_exp = [] # (num-base, exp) y
# e.g. (3, y) for ... * 3 * ...
order_symbols = None
# --- PART 1 ---
#
# "collect powers and coeff":
#
# o coeff
# o c_powers
# o num_exp
#
# NOTE: this is optimized for all-objects-are-commutative case
for o in seq:
# O(x)
if o.is_Order:
o, order_symbols = o.as_expr_symbols(order_symbols)
# Mul([...])
if o.is_Mul:
if o.is_commutative:
seq.extend(o.args) # XXX zerocopy?
else:
# NCMul can have commutative parts as well
for q in o.args:
if q.is_commutative:
seq.append(q)
else:
nc_seq.append(q)
# append non-commutative marker, so we don't forget to
# process scheduled non-commutative objects
seq.append(NC_Marker)
continue
# 3
elif o.is_Number:
coeff *= o
continue
elif o.is_commutative:
# e
# o = b
b, e = o.as_base_exp()
# y
# 3
if o.is_Pow and b.is_Number:
# get all the factors with numeric base so they can be
# combined below
num_exp.append((b,e))
continue
# n n n
# (-3 + y) -> (-1) * (3 - y)
if not Basic.keep_sign and b.is_Add and e.is_Number:
#found factor (x+y)**number; split off initial coefficient
c, t = b.as_coeff_terms()
#last time I checked, Add.as_coeff_terms returns One or NegativeOne
#but this might change
if c.is_negative and not e.is_integer:
# extracting root from negative number: ignore sign
if c is not S.NegativeOne:
# make c positive (probably never occurs)
coeff *= (-c) ** e
assert len(t)==1,`t`
b = -t[0]
#else: ignoring sign from NegativeOne: nothing to do!
elif c is not S.One:
coeff *= c ** e
assert len(t)==1,`t`
b = t[0]
#else: c is One, so pass
c_powers.append((b,e))
# NON-COMMUTATIVE
# TODO: Make non-commutative exponents not combine automatically
else:
if o is not NC_Marker:
nc_seq.append(o)
# process nc_seq (if any)
while nc_seq:
o = nc_seq.pop(0)
if not nc_part:
nc_part.append(o)
continue
# b c b+c
# try to combine last terms: a * a -> a
o1 = nc_part.pop()
b1,e1 = o1.as_base_exp()
b2,e2 = o.as_base_exp()
if b1==b2:
o12 = b1 ** (e1 + e2)
# now o12 could be a commutative object
if o12.is_commutative:
seq.append(o12)
continue
else:
nc_seq.insert(0, o12)
else:
nc_part.append(o1)
nc_part.append(o)
# We do want a combined exponent if it would not be an Add, such as
# y 2y 3y
# x * x -> x
# We determine this if two exponents have the same term in as_coeff_terms
#
# Unfortunately, this isn't smart enough to consider combining into
# exponents that might already be adds, so things like:
# z - y y
# x * x will be left alone. This is because checking every possible
# combination can slow things down.
new_c_powers = []
common_b = {} # b:e
# First gather exponents of common bases
for b, e in c_powers:
co = e.as_coeff_terms()
if b in common_b:
if co[1] in common_b[b]:
common_b[b][co[1]] += co[0]
else:
common_b[b][co[1]] = co[0]
else:
common_b[b] = {co[1]:co[0]}
for b,e, in common_b.items():
for t, c in e.items():
new_c_powers.append((b,c*Mul(*t)))
c_powers = new_c_powers
# And the same for numeric bases
new_num_exp = []
common_b = {} # b:e
for b, e in num_exp:
co = e.as_coeff_terms()
if b in common_b:
if co[1] in common_b[b]:
common_b[b][co[1]] += co[0]
else:
common_b[b][co[1]] = co[0]
else:
common_b[b] = {co[1]:co[0]}
for b,e, in common_b.items():
for t, c in e.items():
new_num_exp.append((b,c*Mul(*t)))
num_exp = new_num_exp
# --- PART 2 ---
#
# o process collected powers (x**0 -> 1; x**1 -> x; otherwise Pow)
# o combine collected powers (2**x * 3**x -> 6**x)
# with numeric base
# ................................
# now we have:
# - coeff:
# - c_powers: (b, e)
# - num_exp: (2, e)
# 0 1
# x -> 1 x -> x
for b, e in c_powers:
if e is S.Zero:
continue
if e is S.One:
if b.is_Number:
coeff *= b
else:
c_part.append(b)
elif e.is_Integer and b.is_Number:
coeff *= b ** e
else:
c_part.append(Pow(b, e))
# x x x
# 2 * 3 -> 6
inv_exp_dict = {} # exp:Mul(num-bases) x x
# e.g. x:6 for ... * 2 * 3 * ...
for b,e in num_exp:
if e in inv_exp_dict:
inv_exp_dict[e] *= b
else:
inv_exp_dict[e] = b
for e,b in inv_exp_dict.items():
if e is S.Zero:
continue
if e is S.One:
if b.is_Number:
coeff *= b
else:
c_part.append(b)
elif e.is_Integer and b.is_Number:
coeff *= b ** e
else:
obj = b**e
if obj.is_Number:
coeff *= obj
else:
c_part.append(obj)
# oo, -oo
if (coeff is S.Infinity) or (coeff is S.NegativeInfinity):
new_c_part = []
for t in c_part:
if t.is_positive:
continue
if t.is_negative:
coeff = -coeff
continue
new_c_part.append(t)
c_part = new_c_part
new_nc_part = []
for t in nc_part:
if t.is_positive:
continue
if t.is_negative:
coeff = -coeff
continue
new_nc_part.append(t)
nc_part = new_nc_part
# 0, nan
elif (coeff is S.Zero) or (coeff is S.NaN):
# we know for sure the result will be the same as coeff (0 or nan)
return [coeff], [], order_symbols
elif coeff.is_Real:
if coeff == Real(0):
c_part, nc_part = [coeff], []
elif coeff == Real(1):
# change it to One, so it doesn't get inserted to slot0
coeff = S.One
# order commutative part canonically
c_part.sort(Basic.compare)
# current code expects coeff to be always in slot-0
if coeff is not S.One:
c_part.insert(0, coeff)
# we are done
if len(c_part)==2 and c_part[0].is_Number and c_part[1].is_Add:
# 2*(1+a) -> 2 + 2 * a
coeff = c_part[0]
c_part = [Add(*[coeff*f for f in c_part[1].args])]
return c_part, nc_part, order_symbols
def _eval_power(b, e):
if e.is_Number:
if b.is_commutative:
if e.is_Integer:
# (a*b)**2 -> a**2 * b**2
return Mul(*[s**e for s in b.args])
if e.is_rational:
coeff, rest = b.as_coeff_terms()
unk=[]
nonneg=[]
neg=[]
for bi in rest:
if not bi.is_negative is None: #then we know the sign
if bi.is_negative:
neg.append(bi)
else:
nonneg.append(bi)
else:
unk.append(bi)
if len(unk) == len(rest) or len(neg) == len(rest) == 1:
# if all terms were unknown there is nothing to pull
# out except maybe the coeff OR if there was only a
# single negative term then it shouldn't be pulled out
# either.
if coeff < 0:
coeff = -coeff
if coeff == S.One:
return None
b = b / coeff
return coeff ** e * b ** e
# otherwise return the new expression expanding out the
# known terms; those that are not known can be expanded
# out with separate() but this will introduce a lot of
# "garbage" that is needed to keep one on the same branch
# as the unexpanded expression. The negatives are brought
# out with a negative sign added and a negative left behind
# in the unexpanded terms.
if neg:
neg = [-w for w in neg]
if len(neg) % 2 and not coeff.is_negative:
unk.append(S.NegativeOne)
if coeff.is_negative:
coeff = -coeff
unk.append(S.NegativeOne)
return Mul(*[s**e for s in nonneg + neg + [coeff]])* \
Mul(*(unk)) ** e
coeff, rest = b.as_coeff_terms()
if coeff is not S.One:
# (2*a)**3 -> 2**3 * a**3
return coeff**e * Mul(*[s**e for s in rest])
elif e.is_Integer:
coeff, rest = b.as_coeff_terms()
l = [s**e for s in rest]
if e.is_negative:
l.reverse()
return coeff**e * Mul(*l)
c, t = b.as_coeff_terms()
if e.is_even and c.is_Number and c < 0:
return (-c * Mul(*t)) ** e
#if e.atoms(Wild):
# return Mul(*[t**e for t in b])
def _eval_evalf(self, prec):
return AssocOp._eval_evalf(self, prec).expand()
@cacheit
def as_two_terms(self):
args = self.args
if len(args) == 1:
return S.One, self
elif len(args) == 2:
return args
else:
return args[0], self._new_rawargs(*args[1:])
@cacheit
def as_coeff_terms(self, x=None):
if x is not None:
l1 = []
l2 = []
for f in self.args:
if f.has(x):
l2.append(f)
else:
l1.append(f)
return Mul(*l1), tuple(l2)
coeff = self.args[0]
if coeff.is_Number:
return coeff, self.args[1:]
return S.One, self.args
@staticmethod
def _expandsums(sums):
"""
Helper function for _eval_expand_mul.
sums must be a list of instances of Basic.
"""
L = len(sums)
if L == 1:
return sums[0].args
terms = []
left = Mul._expandsums(sums[:L//2])
right = Mul._expandsums(sums[L//2:])
terms = [Mul(a, b) for a in left for b in right]
added = Add(*terms)
return make_list(added, Add) #it may have collapsed down to one term
def _eval_expand_basic(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_exp(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_exp'):
newterm = term._eval_expand_power_exp(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_base(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_base'):
newterm = term._eval_expand_power_base(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_mul(self, deep=True, **hints):
plain, sums, rewrite = [], [], False
for factor in self.args:
if deep:
term = factor.expand(deep=deep, **hints)
if term != factor:
factor = term
rewrite = True
if factor.is_Add:
sums.append(factor)
rewrite = True
else:
if factor.is_commutative:
plain.append(factor)
else:
Wrapper = Basic
sums.append(Wrapper(factor))
if not rewrite:
return self
else:
if sums:
terms = Mul._expandsums(sums)
plain = Mul(*plain)
return Add(*[Mul(plain, term) for term in terms],
**self.assumptions0)
else:
return Mul(*plain, **self.assumptions0)
def _eval_expand_multinomial(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_multinomial'):
newterm = term._eval_expand_multinomial(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_log(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_complex(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_complex'):
newterm = term._eval_expand_complex(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_derivative(self, s):
terms = list(self.args)
factors = []
for i in xrange(len(terms)):
t = terms[i].diff(s)
if t is S.Zero:
continue
factors.append(Mul(*(terms[:i]+[t]+terms[i+1:])))
return Add(*factors)
def _matches_simple(self, expr, repl_dict):
# handle (w*3).matches('x*5') -> {w: x*5/3}
coeff, terms = self.as_coeff_terms()
if len(terms)==1:
return terms[0].matches(expr / coeff, repl_dict)
return
def matches(self, expr, repl_dict={}, evaluate=False):
expr = sympify(expr)
if self.is_commutative and expr.is_commutative:
return AssocOp._matches_commutative(self, expr, repl_dict, evaluate)
# todo for commutative parts, until then use the default matches method for non-commutative products
return self._matches(expr, repl_dict, evaluate)
def _matches(self, expr, repl_dict={}, evaluate=False):
# weed out negative one prefixes
sign = 1
if self.args[0] == -1:
self = -self; sign = -sign
if expr.is_Mul and expr.args[0] == -1:
expr = -expr; sign = -sign
if evaluate:
return self.subs(repl_dict).matches(expr, repl_dict)
expr = sympify(expr)
if not isinstance(expr, self.__class__):
# if we can omit the first factor, we can match it to sign * one
if Mul(*self.args[1:]) == expr:
return self.args[0].matches(Rational(sign), repl_dict, evaluate)
# two-factor product: if the 2nd factor matches, the first part must be sign * one
if len(self.args[:]) == 2:
dd = self.args[1].matches(expr, repl_dict, evaluate)
if dd == None:
return None
dd = self.args[0].matches(Rational(sign), dd, evaluate)
return dd
return None
if len(self.args[:])==0:
if self == expr:
return repl_dict
return None
d = repl_dict.copy()
# weed out identical terms
pp = list(self.args)
ee = list(expr.args)
for p in self.args:
if p in expr.args:
ee.remove(p)
pp.remove(p)
# only one symbol left in pattern -> match the remaining expression
if len(pp) == 1 and isinstance(pp[0], Wild):
if len(ee) == 1:
d[pp[0]] = sign * ee[0]
else:
d[pp[0]] = sign * (type(expr)(*ee))
return d
if len(ee) != len(pp):
return None
i = 0
for p, e in zip(pp, ee):
if i == 0 and sign != 1:
try:
e = sign * e
except TypeError:
return None
d = p.matches(e, d, evaluate=not i)
i += 1
if d is None:
return None
return d
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs/rhs, but treats arguments like symbols, so things like
oo/oo return 1, instead of a nan.
"""
if lhs == rhs:
return S.One
if lhs.is_Mul and rhs.is_Mul:
a = list(lhs.args)
b = [1]
for x in rhs.args:
if x in a:
a.remove(x)
else:
b.append(x)
return Mul(*a)/Mul(*b)
return lhs / rhs
def as_powers_dict(self):
return dict([ term.as_base_exp() for term in self ])
def as_numer_denom(self):
numers, denoms = [],[]
for t in self.args:
n,d = t.as_numer_denom()
numers.append(n)
denoms.append(d)
return Mul(*numers), Mul(*denoms)
@cacheit
def count_ops(self, symbolic=True):
if symbolic:
return Add(*[t.count_ops(symbolic) for t in self.args]) + \
Symbol('MUL') * (len(self.args) - 1)
return Add(*[t.count_ops(symbolic) for t in self.args]) + \
(len(self.args) - 1)
def _eval_is_polynomial(self, syms):
for term in self.args:
if not term._eval_is_polynomial(syms):
return False
return True
_eval_is_bounded = lambda self: self._eval_template_is_attr('is_bounded')
_eval_is_commutative = lambda self: self._eval_template_is_attr('is_commutative')
_eval_is_integer = lambda self: self._eval_template_is_attr('is_integer')
_eval_is_comparable = lambda self: self._eval_template_is_attr('is_comparable')
# I*I -> R, I*I*I -> -I
def _eval_is_real(self):
im_count = 0
is_neither = False
for t in self.args:
if t.is_imaginary:
im_count += 1
continue
t_real = t.is_real
if t_real:
continue
elif t_real is False:
if is_neither:
return None
else:
is_neither = True
else:
return None
if is_neither:
return False
return (im_count % 2 == 0)
def _eval_is_imaginary(self):
im_count = 0
is_neither = False
for t in self.args:
if t.is_imaginary:
im_count += 1
continue
t_real = t.is_real
if t_real:
continue
elif t_real is False:
if is_neither:
return None
else:
is_neither = True
else:
return None
if is_neither:
return False
return (im_count % 2 == 1)
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
return True
if a is None:
return
return False
def _eval_is_positive(self):
terms = [t for t in self.args if not t.is_positive]
if not terms:
return True
c = terms[0]
if len(terms)==1:
if c.is_nonpositive:
return False
return
r = Mul(*terms[1:])
if c.is_negative and r.is_negative:
return True
if r.is_negative and c.is_negative:
return True
# check for nonpositivity, <=0
if c.is_negative and r.is_nonnegative:
return False
if r.is_negative and c.is_nonnegative:
return False
if c.is_nonnegative and r.is_nonpositive:
return False
if r.is_nonnegative and c.is_nonpositive:
return False
def _eval_is_negative(self):
terms = [t for t in self.args if not t.is_positive]
if not terms:
# all terms are either positive -- 2*Symbol('n', positive=T)
# or unknown -- 2*Symbol('x')
if self.is_positive:
return False
else:
return None
c = terms[0]
if len(terms)==1:
return c.is_negative
r = Mul(*terms[1:])
# check for nonnegativity, >=0
if c.is_negative and r.is_nonpositive:
return False
if r.is_negative and c.is_nonpositive:
return False
if c.is_nonpositive and r.is_nonpositive:
return False
if c.is_nonnegative and r.is_nonnegative:
return False
def _eval_is_odd(self):
is_integer = self.is_integer
if is_integer:
r = True
for t in self.args:
if t.is_even:
return False
if t.is_odd is None:
r = None
return r
# !integer -> !odd
elif is_integer == False:
return False
def _eval_is_even(self):
is_integer = self.is_integer
if is_integer:
return fuzzy_not(self._eval_is_odd())
elif is_integer == False:
return False
def _eval_subs(self, old, new):
# base cases
# simplest
if self == old:
return new
# pass it off to its own class
if isinstance(old, FunctionClass):
return self.__class__(*[s._eval_subs(old, new) for s in self.args ])
# break up self and old into terms
coeff_self,terms_self = self.as_coeff_terms()
coeff_old,terms_old = old.as_coeff_terms()
# NEW - implementation of strict substitution
# if the coefficients are not the same, do not substitute.
# the only exception is if old has a coefficient of 1, then always to the sub.
if coeff_self != coeff_old and coeff_old != 1:
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
# break up powers, i.e., x**2 -> x*x
def breakup(terms):
temp = []
for t in terms:
if isinstance(t,Pow) and isinstance(t.exp, Integer):
if t.exp.is_positive:
temp.extend([t.base]*int(t.exp))
elif t.exp.is_negative:
temp.extend([1/t.base]*int(abs(t.exp)))
else:
temp.append(t)
return temp
terms_old = breakup(terms_old)
terms_self = breakup(terms_self)
# break up old and self terms into commutative and noncommutative lists
comm_old = []; noncomm_old = []
comm_self = []; noncomm_self = []
for o in terms_old:
if o.is_commutative:
comm_old.append(o)
else:
noncomm_old.append(o)
for s in terms_self:
if s.is_commutative:
comm_self.append(s)
else:
noncomm_self.append(s)
comm_old_len, noncomm_old_len = len(comm_old), len(noncomm_old)
comm_self_len, noncomm_self_len = len(comm_self), len(noncomm_self)
# if the noncommutative part of the 'to-be-replaced' expression is
# smaller than the noncommutative part of the whole expression, scan
# to see if the whole thing is there
if noncomm_old_len <= noncomm_self_len and noncomm_old_len > 0:
for i in range(noncomm_self_len):
if noncomm_self[i] == noncomm_old[0]:
for j in range(noncomm_old_len):
# make sure each noncommutative term matches in order
if (i+j) < noncomm_self_len and \
noncomm_self[i+j] == noncomm_old[j]:
# we only care once we've reached the end of old's
# noncommutative part.
if j == noncomm_old_len-1:
# get rid of noncommutative terms and
# substitute new expression into total
# expression
noncomms_final = noncomm_self[:i] + \
noncomm_self[i+j+1:]
noncomms_final.insert(i,new)
myFlag = True
comms_final = comm_self[:]
# check commutative terms
for ele in comm_old:
# flag to make sure all the commutative
# terms in old are in self
if ele not in comm_self:
myFlag = False
# collect commutative terms
else:
comms_final.remove(ele)
# continue only if all commutative terms in
# old are present
if myFlag == True:
expr = comms_final+noncomms_final
return Mul(coeff_self/coeff_old,
Mul(*expr)._eval_subs(old,new))
#*[e._eval_subs(old,new) for e in expr])
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
# but what if the noncommutative lists subexpression and the whole
# expression are both empty
elif noncomm_old_len == noncomm_self_len == 0:
# just check commutative parts then.
if comm_old_len > 0 and comm_old_len<=comm_self_len:
if comm_self == comm_old:
return Mul(coeff_self/coeff_old*new)
myFlag = True
comms_final = comm_self[:]
# check commutative terms
for ele in comm_old:
# flag to make sure all the commutative terms in old are
# in self
if ele not in comm_self:
myFlag = False
# collect commutative terms
else:
# needed if old has an element to an integer power
if ele in comms_final:
comms_final.remove(ele)
else:
myFlag = False
# continue only if all commutative terms in old are present
if myFlag == True:
return Mul(coeff_self/coeff_old, new,
Mul(*comms_final)._eval_subs(old,new))#*[c._eval_subs(old,new) for c in comms_final])
else:
return self.__class__(*[s._eval_subs(old, new) for
s in self.args])
# else the subexpression isn't in the totaly expression
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
def _eval_nseries(self, x, x0, n):
from sympy import powsimp
terms = [t.nseries(x, x0, n) for t in self.args]
return powsimp(Mul(*terms).expand(), combine='exp', deep=True)
def _eval_as_leading_term(self, x):
return Mul(*[t.as_leading_term(x) for t in self.args])
def _eval_conjugate(self):
return Mul(*[t.conjugate() for t in self.args])
def _sage_(self):
s = 1
for x in self.args:
s *= x._sage_()
return s
def as_Mul(self):
"""Returns `self` as it was `Mul` instance. """
return list(self.args)
from power import Pow
from numbers import Real
from function import FunctionClass
from sympify import sympify
from add import Add
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.benchmark.scenarios.nova import servers
from rally import exceptions as rally_exceptions
from rally import objects
from rally import osclients
from tests.unit import fakes
from tests.unit import test
NOVA_SERVERS_MODULE = "rally.benchmark.scenarios.nova.servers"
NOVA_SERVERS = NOVA_SERVERS_MODULE + ".NovaServers"
class NovaServersTestCase(test.TestCase):
def test_boot_rescue_unrescue(self):
actions = [{"rescue_unrescue": 5}]
fake_server = mock.MagicMock()
scenario = servers.NovaServers()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._rescue_server = mock.MagicMock()
scenario._unrescue_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.boot_and_bounce_server("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
self.assertEqual(5, scenario._rescue_server.call_count,
"Rescue not called 5 times")
self.assertEqual(5, scenario._unrescue_server.call_count,
"Unrescue not called 5 times")
scenario._rescue_server.assert_has_calls(server_calls)
scenario._unrescue_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_stop_start(self):
actions = [{"stop_start": 5}]
fake_server = mock.MagicMock()
scenario = servers.NovaServers()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._start_server = mock.MagicMock()
scenario._stop_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.boot_and_bounce_server("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
self.assertEqual(5, scenario._stop_server.call_count,
"Stop not called 5 times")
self.assertEqual(5, scenario._start_server.call_count,
"Start not called 5 times")
scenario._stop_server.assert_has_calls(server_calls)
scenario._start_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_multiple_bounce_actions(self):
actions = [{"hard_reboot": 5}, {"stop_start": 8}]
fake_server = mock.MagicMock()
scenario = servers.NovaServers()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._reboot_server = mock.MagicMock()
scenario._stop_and_start_server = mock.MagicMock()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario.boot_and_bounce_server("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
self.assertEqual(5, scenario._reboot_server.call_count,
"Reboot not called 5 times")
scenario._reboot_server.assert_has_calls(server_calls)
server_calls = []
for i in range(8):
server_calls.append(mock.call(fake_server))
self.assertEqual(8, scenario._stop_and_start_server.call_count,
"Stop/Start not called 8 times")
scenario._stop_and_start_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_validate_actions(self):
actions = [{"hardd_reboot": 6}]
scenario = servers.NovaServers()
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
actions = [{"hard_reboot": "no"}]
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
actions = {"hard_reboot": 6}
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
actions = {"hard_reboot": -1}
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
actions = {"hard_reboot": 0}
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.boot_and_bounce_server,
1, 1, actions=actions)
def _verify_reboot(self, soft=True):
actions = [{"soft_reboot" if soft else "hard_reboot": 5}]
fake_server = mock.MagicMock()
scenario = servers.NovaServers()
scenario._reboot_server = mock.MagicMock()
scenario._soft_reboot_server = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario.boot_and_bounce_server("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
if soft:
self.assertEqual(5, scenario._soft_reboot_server.call_count,
"Reboot not called 5 times")
scenario._soft_reboot_server.assert_has_calls(server_calls)
else:
self.assertEqual(5, scenario._reboot_server.call_count,
"Reboot not called 5 times")
scenario._reboot_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_soft_reboot(self):
self._verify_reboot(soft=True)
def test_boot_hard_reboot(self):
self._verify_reboot(soft=False)
def test_boot_and_delete_server(self):
fake_server = object()
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
scenario.boot_and_delete_server("img", 0, 10, 20, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_and_list_server(self):
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock()
scenario._list_servers = mock.MagicMock()
scenario.boot_and_list_server("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._list_servers.assert_called_once_with(True)
def test_suspend_and_resume_server(self):
fake_server = object()
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._suspend_server = mock.MagicMock()
scenario._resume_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.suspend_and_resume_server("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._suspend_server.assert_called_once_with(fake_server)
scenario._resume_server.assert_called_once_with(fake_server)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_pause_and_unpause_server(self):
fake_server = object()
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._pause_server = mock.MagicMock()
scenario._unpause_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.pause_and_unpause_server("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._pause_server.assert_called_once_with(fake_server)
scenario._unpause_server.assert_called_once_with(fake_server)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_list_servers(self):
scenario = servers.NovaServers()
scenario._list_servers = mock.MagicMock()
scenario.list_servers(True)
scenario._list_servers.assert_called_once_with(True)
def test_boot_server_from_volume_and_delete(self):
fake_server = object()
scenario = servers.NovaServers()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario.sleep_between = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
fake_volume = fakes.FakeVolumeManager().create()
fake_volume.id = "volume_id"
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.boot_server_from_volume_and_delete("img", 0, 5, 10, 20,
fakearg="f")
scenario._create_volume.assert_called_once_with(5, imageRef="img")
scenario._boot_server.assert_called_once_with(
"img", 0,
block_device_mapping={"vda": "volume_id:::1"},
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def _prepare_boot(self, mock_osclients, nic=None, assert_nic=False):
fake_server = mock.MagicMock()
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
nova = fakes.FakeNovaClient()
fc.nova = lambda: nova
user_endpoint = objects.Endpoint("url", "user", "password", "tenant")
clients = osclients.Clients(user_endpoint)
scenario = servers.NovaServers(clients=clients)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._generate_random_name = mock.MagicMock(return_value="name")
kwargs = {"fakearg": "f"}
expected_kwargs = {"fakearg": "f"}
assert_nic = nic or assert_nic
if nic:
kwargs["nics"] = nic
if assert_nic:
nova.networks.create("net-1")
expected_kwargs["nics"] = nic or [{"net-id": "net-2"}]
print(kwargs)
print(expected_kwargs)
return scenario, kwargs, expected_kwargs
def _verify_boot_server(self, mock_osclients, nic=None, assert_nic=False):
scenario, kwargs, expected_kwargs = self._prepare_boot(
mock_osclients=mock_osclients,
nic=nic, assert_nic=assert_nic)
scenario.boot_server("img", 0, **kwargs)
scenario._boot_server.assert_called_once_with(
"img", 0, auto_assign_nic=False, **expected_kwargs)
@mock.patch("rally.benchmark.scenarios.nova.servers.NovaServers.clients")
@mock.patch("rally.benchmark.runners.base.osclients")
def test_boot_server_no_nics(self, mock_osclients, mock_nova_clients):
mock_nova_clients.return_value = fakes.FakeNovaClient()
self._verify_boot_server(mock_osclients=mock_osclients,
nic=None, assert_nic=False)
@mock.patch("rally.benchmark.runners.base.osclients")
def test_boot_server_with_nic(self, mock_osclients):
self._verify_boot_server(mock_osclients=mock_osclients,
nic=[{"net-id": "net-1"}], assert_nic=True)
def test_snapshot_server(self):
fake_server = object()
fake_image = fakes.FakeImageManager()._create()
fake_image.id = "image_id"
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._create_image = mock.MagicMock(return_value=fake_image)
scenario._delete_server = mock.MagicMock()
scenario._delete_image = mock.MagicMock()
scenario.snapshot_server("i", 0, fakearg=2)
scenario._boot_server.assert_has_calls([
mock.call("i", 0, fakearg=2),
mock.call("image_id", 0, fakearg=2)])
scenario._create_image.assert_called_once_with(fake_server)
scenario._delete_server.assert_has_calls([
mock.call(fake_server, force=False),
mock.call(fake_server, force=False)])
scenario._delete_image.assert_called_once_with(fake_image)
def _test_resize(self, confirm=False):
fake_server = object()
fake_image = fakes.FakeImageManager()._create()
fake_image.id = "image_id"
flavor = mock.MagicMock()
to_flavor = mock.MagicMock()
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._resize = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
kwargs = {"confirm": confirm}
scenario.resize_server(fake_image, flavor, to_flavor, **kwargs)
scenario._resize.assert_called_once_with(fake_server, to_flavor)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server)
else:
scenario._resize_revert.assert_called_once_with(fake_server)
def test_resize_with_confirm(self):
self._test_resize(confirm=True)
def test_resize_with_revert(self):
self._test_resize(confirm=False)
def test_boot_and_live_migrate_server(self):
fake_server = mock.MagicMock()
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.sleep_between = mock.MagicMock()
scenario._find_host_to_migrate = mock.MagicMock(
return_value="host_name")
scenario._live_migrate = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.boot_and_live_migrate_server("img", 0, min_sleep=10,
max_sleep=20, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._find_host_to_migrate.assert_called_once_with(fake_server)
scenario._live_migrate.assert_called_once_with(fake_server,
"host_name",
False, False)
scenario._delete_server.assert_called_once_with(fake_server)
def test_boot_server_from_volume_and_live_migrate(self):
fake_server = mock.MagicMock()
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.sleep_between = mock.MagicMock()
scenario._find_host_to_migrate = mock.MagicMock(
return_value="host_name")
scenario._live_migrate = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
fake_volume = fakes.FakeVolumeManager().create()
fake_volume.id = "volume_id"
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.boot_server_from_volume_and_live_migrate("img", 0, 5,
min_sleep=10,
max_sleep=20,
fakearg="f")
scenario._create_volume.assert_called_once_with(5, imageRef="img")
scenario._boot_server.assert_called_once_with(
"img", 0,
block_device_mapping={"vda": "volume_id:::1"},
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._find_host_to_migrate.assert_called_once_with(fake_server)
scenario._live_migrate.assert_called_once_with(fake_server,
"host_name",
False, False)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_server_attach_created_volume_and_live_migrate(self):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
scenario = servers.NovaServers()
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
scenario._find_host_to_migrate = mock.MagicMock(
return_value="host_name")
scenario._live_migrate = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
image = "img"
flavor = "flavor"
size = 5
boot_kwargs = {"some_var": "asd"}
scenario.boot_server_attach_created_volume_and_live_migrate(
image, flavor, size, min_sleep=10, max_sleep=20,
boot_server_kwargs=boot_kwargs)
scenario._boot_server.assert_called_once_with(image, flavor,
**boot_kwargs)
scenario._create_volume.assert_called_once_with(size)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._live_migrate.assert_called_once_with(fake_server,
"host_name",
False, False)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server)
def _test_boot_and_migrate_server(self, confirm=False):
fake_server = mock.MagicMock()
scenario = servers.NovaServers()
scenario._generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._stop_server = mock.MagicMock()
scenario._migrate = mock.MagicMock()
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
kwargs = {"confirm": confirm}
scenario.boot_and_migrate_server("img", 0,
fakearg="fakearg", **kwargs)
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg",
confirm=confirm)
scenario._stop_server.assert_called_once_with(fake_server)
scenario._migrate.assert_called_once_with(fake_server)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server,
status="SHUTOFF")
else:
scenario._resize_revert.assert_called_once_with(fake_server,
status="SHUTOFF")
scenario._delete_server.assert_called_once_with(fake_server)
def test_boot_and_migrate_server_with_confirm(self):
self._test_boot_and_migrate_server(confirm=True)
def test_boot_and_migrate_server_with_revert(self):
self._test_boot_and_migrate_server(confirm=False)
def test_boot_and_rebuild_server(self):
scenario = servers.NovaServers()
scenario._boot_server = mock.Mock()
scenario._rebuild_server = mock.Mock()
scenario._delete_server = mock.Mock()
from_image = "img1"
to_image = "img2"
flavor = "flavor"
scenario.boot_and_rebuild_server(from_image, to_image, flavor,
fakearg="fakearg")
scenario._boot_server.assert_called_once_with(from_image, flavor,
fakearg="fakearg")
server = scenario._boot_server.return_value
scenario._rebuild_server.assert_called_once_with(server, to_image)
scenario._delete_server.assert_called_once_with(server)
|
|
import os
import json
import hashlib
import logging
import smtplib
from collections import defaultdict
from config import (
BUSINESS_MEMBER_RECIPIENT,
DEFAULT_MAIL_SENDER,
ENABLE_SLACK,
MAIL_PASSWORD,
MAIL_PORT,
MAIL_SERVER,
MAIL_USERNAME,
MULTIPLE_ACCOUNT_WARNING_MAIL_RECIPIENT,
SLACK_API_KEY,
SLACK_CHANNEL,
)
import requests
from npsp import SalesforceConnection, SalesforceException, DEFAULT_RDO_TYPE
def construct_slack_message(contact=None, opportunity=None, rdo=None, account=None):
if rdo and opportunity:
raise SalesforceException("rdo and opportunity can't both be specified")
reason = (
getattr(rdo, "encouraged_by", False)
or getattr(opportunity, "encouraged_by", False)
or ""
)
period = f"[{rdo.installment_period}]" if rdo else "[one-time]"
amount = getattr(rdo, "amount", False) or getattr(opportunity, "amount", "")
amount = float(amount)
reason = f"({reason})" if reason else ""
entity = account.name if account else contact.name
message = f"{entity} pledged ${amount:.0f} {period} {reason}"
logging.info(message)
return message
def notify_slack(contact=None, opportunity=None, rdo=None, account=None):
"""
Send a notification about a donation to Slack.
"""
text = construct_slack_message(
contact=contact, opportunity=opportunity, rdo=rdo, account=account
)
username = rdo.lead_source if rdo else opportunity.lead_source
message = {"text": text, "channel": SLACK_CHANNEL, "icon_emoji": ":moneybag:"}
send_slack_message(message, username=username)
def send_slack_message(message=None, username="moneybot"):
if not ENABLE_SLACK or not message:
return
message["token"] = SLACK_API_KEY
message["username"] = username
url = "https://slack.com/api/chat.postMessage"
try:
response = requests.post(url, params=message)
slack_response = json.loads(response.text)
if not slack_response["ok"]:
raise Exception(slack_response["error"])
except Exception as e:
logging.error(f"Failed to send Slack notification: {e}")
def construct_slack_attachment(
email=None,
donor=None,
source=None,
amount=None,
period=None,
donation_type=None,
reason=None,
):
"""
Not currently in use.
"""
grav_hash = hashlib.md5(email.lower().encode("utf-8")).hexdigest()
attachment = {
"fallback": "Donation",
"color": "good",
# "pretext": "optional and appears above attachment"
# # "text": "text",
"author_name": donor,
# author_link
"author_icon": f"https://www.gravatar.com/avatar/{grav_hash}?s=16&r=g&default=robohash",
"title": email,
# "title_link":
# "text": reason,
# title_link
"fields": [
{"title": "Source", "value": source, "short": True},
{"title": "Amount", "value": f"${amount}", "short": True},
{"title": "Period", "value": period, "short": True},
{"title": "Type", "value": donation_type, "short": True},
],
# "image_url":
# "thumb_url":
# "footer":
# "footer_icon":
# "ts":
}
if reason:
attachment["text"] = reason
return attachment
def send_multiple_account_warning(contact):
"""
Send the warnings about multiple accounts.
"""
body = f"""
Multiple accounts were found matching the email address <{contact.email}>
while inserting a transaction.
The transaction was attached to the first match found. You may want to
move the transaction to the proper account if the one chosen is not
correct. You may also want to delete or otherwise correct the duplicate
account(s).
"""
send_email(
recipient=MULTIPLE_ACCOUNT_WARNING_MAIL_RECIPIENT,
subject=f"Multiple accounts found for {contact.email}",
body=body,
)
def clean(form):
"""
Clean up a form by converting strings to their 'None' or boolean equivalents and converting string numbers to their native types. Also makes None the response if the form is asked for a missing key.
"""
result = defaultdict(lambda: None)
for k, v in form.items():
if v is None or v == "None":
result[k] = None
continue
if v is True or v == "True":
result[k] = True
continue
if v is False or v == "False":
result[k] = False
continue
if isinstance(v, (int, float)):
result[k] = v
continue
try:
result[k] = int(v)
continue
except ValueError:
try:
result[k] = float(v)
continue
except ValueError:
result[k] = v
return result
def send_email(recipient, subject, body, sender=None):
if sender is None:
FROM = DEFAULT_MAIL_SENDER
else:
FROM = sender
TO = recipient if type(recipient) is list else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """\From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (
FROM,
", ".join(TO),
SUBJECT,
TEXT,
)
try:
server = smtplib.SMTP(MAIL_SERVER, MAIL_PORT)
server.ehlo()
server.starttls()
server.login(MAIL_USERNAME, MAIL_PASSWORD)
server.sendmail(FROM, TO, message)
server.close()
logging.debug("successfully sent the mail")
except Exception as e:
logging.error(f"failed to send mail: {e}")
def send_email_new_business_membership(account, contact):
if not BUSINESS_MEMBER_RECIPIENT:
raise Exception("BUSINESS_MEMBER_RECIPIENT must be specified")
url = SalesforceConnection().instance_url
body = f"A new business membership has been received for {account.name}:\n\n"
body += f"{url}/{account.id}\n\n"
if account.created:
body += "A new account was created.\n\n"
body += f"It was created by {contact.name}:\n\n"
body += f"{url}/{contact.id}\n\n"
if contact.created:
body += "A new contact was created."
logging.info(body)
send_email(
recipient=BUSINESS_MEMBER_RECIPIENT,
subject="New business membership",
body=body,
)
def dir_last_updated(folder):
return str(max(os.path.getmtime(os.path.join(root_path, f))
for root_path, dirs, files in os.walk(folder)
for f in files))
def update_fees(query, log, donation_type):
sf = SalesforceConnection()
response = sf.query(query)
log.it('Found {} donations available to update fees.'.format(
len(response)))
for item in response:
# salesforce connect
path = item['attributes']['url']
url = '{}{}'.format(sf.instance_url, path)
if donation_type == 'recurring':
amount = float(item['npe03__Amount__c'])
else:
amount = float(item['Amount'])
opp_id = item.get('Id')
payment_type = item.get('payment_type')
if item.get('payment_type') == 'American Express' or item.get('Card_type__c') == 'American Express' or item.get('Stripe_Payment_Type__c') == 'amex':
payment_type = 'amex'
elif item.get('payment_type') == 'ach' or item.get('Stripe_Payment_Type__c') == 'bank_account' or item.get('Stripe_Bank_Account__c') is not None:
payment_type = 'bank_account'
fees = calculate_amount_fees(amount, payment_type, item.get('Stripe_Agreed_to_pay_fees__c', False))
log.it('---- Updating fee value for {} to ${}'.format(opp_id, fees))
update = {
'Stripe_Transaction_Fee__c': fees
}
resp = requests.patch(url, headers=sf.headers, data=json.dumps(update))
if resp.status_code == 204:
log.it('salesforce updated with fee value')
else:
log.it('error updating salesforce with fee value')
raise Exception('error')
continue
|
|
#!/usr/bin/env python3
# step through an executable under cosimulation and report errors
#import objgraph
import sys
import os
import json
import codecs
import traceback
import multiprocessing
import pickle
libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib')
sys.path.append(libdir)
import utils
import msp_fr5969_model as model
from mspdebug_driver import MSPdebug
from msp_emulator import Emulator
from msp_cosim import Cosim
from msp_isa import isa
import cosim as cosim_repl
import smt
def is_timer_read(fields, rdst):
return fields['words'] == [0x4210 | rdst, 0x0350]
def is_reg_store(fields, rsrc):
words = fields['words']
if len(words) != 2 or words[0] != 0x40c2 | rsrc << 8:
return -1
return words[1]
def is_reg_sub(fields, rsrc, rdst):
return fields['words'] == [0x8000 | rsrc << 8 | rdst]
run_max_steps = 20000
run_interval = 1
run_passes = 5
def check_elf(elfname, verbosity = 0):
mulator = Emulator(verbosity=verbosity, tracing=True)
mulator.prog(elfname)
fram_end = mulator.md(model.upper_start - 256, 256)
for byte in fram_end[:-2]:
if byte != 255:
print('Invalid prog write to reserved fram region:')
print(utils.triple_summarize(fram_end, model.upper_start - 256))
return False
resetvec = fram_end[-2:]
for i in range(run_passes):
success, steps = mulator.run(run_max_steps)
if not success:
return False
elif steps < run_max_steps:
fram_end = mulator.md(model.upper_start - 256, 256)
for byte in fram_end[:-2]:
if byte != 255:
print('Invalid run write to reserved fram region:')
print(utils.triple_summarize(fram_end, model.upper_start - 256))
return False
if fram_end[-2:] != resetvec:
print('Broke the reset vector?')
print('was', resetvec)
print('now', fram_end[-2:])
return False
upper = mulator.md(model.upper_start, model.upper_size)
for byte in upper:
if byte != 255:
print('Modified upper memory?')
print(utils.triple_summarize(upper, model.upper_start))
return False
touchdown = mulator.md(mulator.regs()[0], 16)
expected_touchdown = [0xff, 0x3f, 0xff, 0x3f, 0xff, 0x3f, 0xff, 0x3f, 0xff, 0x3f, 0xff, 0x3f, 0xff, 0x3f, 0xf8, 0x3f]
if touchdown != expected_touchdown:
print('Missed touchdown pad:')
print(' expecting', expected_touchdown)
print(' got', touchdown)
return False
if verbosity >= 0:
print(' checked {:s}, pass'.format(elfname))
return True
print('Did not complete?')
return False
def trace_elf(elfname, jname, tty = None, logname = None, verbosity = 0):
if logname is None:
with MSPdebug(tty=tty, logf=sys.stdout, verbosity=verbosity) as driver:
mulator = Emulator(verbosity=verbosity, tracing=True)
mmap = [(model.ram_start, model.ram_size), (model.fram_start, model.fram_size)]
cosim = Cosim([driver, mulator], [True, False], mmap)
master_idx = 0
cosim_repl.prog_and_sync(cosim, master_idx, elfname)
cosim.run(max_steps=run_max_steps, interval=run_interval, passes=run_passes)
diff = cosim.diff()
trace = mulator.trace
iotrace = mulator.iotrace2
else:
with open(logname, 'at') as f:
with MSPdebug(tty=tty, logf=f, verbosity=max(verbosity,1)) as driver:
mulator = Emulator(verbosity=verbosity, tracing=True)
mmap = [(model.ram_start, model.ram_size), (model.fram_start, model.fram_size)]
cosim = Cosim([driver, mulator], [True, False], mmap)
master_idx = 0
cosim_repl.prog_and_sync(cosim, master_idx, elfname)
cosim.run(max_steps=run_max_steps, interval=run_interval, passes=run_passes)
diff = cosim.diff()
trace = mulator.trace
iotrace = mulator.iotrace2
with utils.Write7z(jname) as f:
writer = codecs.getwriter('utf-8')
json.dump({'diff':diff, 'trace':trace, 'iotrace':iotrace}, writer(f))
if verbosity >= 0:
print(' traced {:s} to {:s}'.format(elfname, jname))
def retrace_elf(elfname, jname, tinfo, interesting_blocks, verbosity = 0):
if not os.path.isfile(jname):
print('skipping {:s}, no trace {:s}'.format(elfname, jname))
return True
timulator = Emulator(verbosity=verbosity, tracing=True, tinfo=tinfo)
mulator = Emulator(verbosity=verbosity, tracing=True)
mmap = [(model.ram_start, model.ram_size), (model.fram_start, model.fram_size)]
cosim = Cosim([timulator, mulator], [False, False], mmap)
master_idx = 0
cosim_repl.prog_and_sync(cosim, master_idx, elfname)
cosim.run(max_steps=run_max_steps, interval=run_interval, passes=run_passes)
tmp_jstr = json.dumps({'diff':cosim.diff(), 'trace':mulator.trace, 'iotrace':mulator.iotrace2})
tmp_jobj = json.loads(tmp_jstr)
diff = tmp_jobj['diff']
trace = tmp_jobj['trace']
iotrace = tmp_jobj['iotrace']
old_diff, old_trace, old_iotrace = load_trace(jname)
same = diff == old_diff
if verbosity >= 0:
print(' timed emulated {:s} against {:s}. Same? {:s}'
.format(elfname, jname, repr(same)))
# print('---ORIGINAL---')
# utils.explain_diff(old_diff)
# print('---EMULATOR---')
# utils.explain_diff(diff)
if not same:
old_blocks = []
old_mismatches = compute_mismatches(old_diff, verbosity=verbosity)
old_err = mismatches_to_blocks(old_trace, old_mismatches, old_blocks)
blocks = []
mismatches = compute_mismatches(diff, verbosity=verbosity)
err = mismatches_to_blocks(trace, mismatches, blocks)
if old_err and err:
print(' failures in both traces: {:s}'.format(elfname))
elif old_err:
print(' BAD: failures in hardware trace: {:s}'.format(elfname))
elif err:
print(' BAD: failures in emulator trace: {:s}'.format(elfname))
else:
print(' successful trace: {:s}'.format(elfname))
old_blocks_index = {addr: (x, y) for (addr, x, y) in old_blocks}
trace_errors = 0
uncovered = 0
for (addr, block, difference) in blocks:
if addr in old_blocks_index:
old_block, old_difference = old_blocks_index.pop(addr)
if block != old_block:
print(' BAD: trace difference at {:05x}'.format(addr))
trace_errors += 1
elif difference != old_difference:
interesting_blocks.append((addr, old_block, old_difference))
if verbosity >= 0:
print('timing difference for block at {:05x} of {:s}'.format(addr, elfname))
for fields in block:
ins = isa.decode(fields['words'][0])
fmt, name, smode, dmode = isa.instr_to_modes(ins)
if fmt == 'fmt1':
rsrc = fields['rsrc']
rdst = fields['rdst']
if 'isrc' in fields:
sval = ', {:#x}'.format(fields['isrc'])
else:
sval = ''
print('{:s}\t{:s} (R{:d}{:s}), {:s} (R{:d})'
.format(name, smode, rsrc, sval, dmode, rdst))
elif fmt == 'fmt2':
rsrc = fields['rsrc']
if 'isrc' in fields:
sval = ', {:#x}'.format(fields['isrc'])
else:
sval = ''
print('{:s}\t{:s} (R{:d}{:s})'
.format(name, smode, rsrc, sval))
elif fmt == 'jump':
print('{:s}\t{:d}, taken={:s}'
.format(name, fields['jump_offset'], str(fields['jump_taken'])))
else:
print('{:s}, {:s}, {:s}, {:s}'.format(fmt, name, smode, dmode))
utils.print_dict(fields)
print('hardware: {:s}, emulator: {:s}'
.format(repr(old_difference), repr(difference)))
print('')
else:
uncovered += 1
if trace_errors > 0:
print(' BAD: {:d} trace differences'.format(trace_errors))
if uncovered > 0 or len(old_blocks_index) > 0:
print(' BAD: {:d} blocks unique to hardware, {:d} to emulator'
.format(len(old_blocks_index), uncovered))
return same
def load_trace(jname):
with utils.Read7z(jname) as f:
reader = codecs.getreader('utf-8')
jobj = json.load(reader(f))
return jobj['diff'], jobj['trace'], jobj['iotrace']
trace_suffix = '.trace.json.7z'
def compute_mismatches(diff, verbosity):
mismatches = {}
for addr in diff:
if addr == 'regs':
if verbosity >= 1:
print('nonempty reg diff???')
utils.explain_diff(diff)
else:
mems = diff[addr]
assert len(mems) == 2
assert len(mems[0]) == len(mems[1])
for i in range(len(mems[0])):
if mems[0][i] != mems[1][i]:
mismatches[int(addr)+i] = (mems[0][i], mems[1][i])
return mismatches
# Consumes mismatches to add to blocks.
# Asserts that mismatches is empty when it finishes (no mismatches were not able
# to be explained by looking at the trace).
def mismatches_to_blocks(trace, mismatches, blocks):
current = []
in_region = False
in_store = False
missing = 0
err = False
for fields in trace:
if is_timer_read(fields, 14):
assert current == [] and in_region == False
in_region = True
elif is_timer_read(fields, 15):
assert len(current) > 0 and in_region == True
in_region = False
elif is_reg_sub(fields, 14, 15):
assert in_region == False
in_store = True
if in_store:
assert in_region == False
addr = is_reg_store(fields, 15)
if addr > 0:
if addr in mismatches:
blocks.append((addr, current, mismatches.pop(addr)))
else:
# print('MISSING {:s} | {:s}'.format(repr(addr), repr(current)))
missing += 1
err = True
current = []
in_store = False
elif in_region:
current.append(fields)
if err:
print('Unexpected blocks! {:d} blocks have no corresponding mismatches, {:d} unexplained mismatches in diff.'
.format(missing, len(mismatches)))
elif len(mismatches) > 0:
err = True
print('State mismatch! {:d} unexplained mismatches in diff.'
.format(len(mismatches)))
print(mismatches)
return err
def arff_header():
s = "@relation 'cycle_count'\n"
for i, ins in enumerate(isa.ids_ins):
s += '@attribute {:s} numeric\n'.format('_'.join(isa.idx_to_modes(i)))
return s + '@attribute cycles numeric\n@data'
def arff_entry(indices, cycles):
bins = {}
for k in indices:
bins[k] = bins.setdefault(k, 0) + 1
return ', '.join([str(bins[i]) if i in bins else '0' for i in range(len(isa.ids_ins))] + [str(cycles)])
def create_arff(blocks, arffname):
with open(arffname, 'wt') as f:
f.write(arff_header() + '\n')
for addr, block, difference in blocks:
assert(len(difference) == 2 and difference[1] == 0)
cycles = difference[0]
indices = []
description = []
for fields in block:
ins = isa.decode(fields['words'][0])
idx = isa.instr_to_idx(ins)
modes = isa.idx_to_modes(idx)
indices.append(idx)
description.append(' {:s} {:s}, {:s}'.format(*modes[1:]))
f.write(arff_entry(indices, cycles) + '\n')
def create_json(blocks, jname):
with open(jname, 'wt') as f:
json.dump({'blocks':blocks}, f)
def extract_json(jname):
if jname.endswith('.json.7z'):
with utils.Read7z(jname) as f:
reader = codecs.getreader('utf-8')
jobj = json.load(reader(f))
return jobj['blocks']
else:
with open(jname, 'rt') as f:
jobj = json.load(f)
return jobj['blocks']
def walk_par(fn, targetdir, cargs, n_procs = 1, verbosity = 0):
if os.path.isfile(targetdir):
if verbosity >= 0:
print('target is a single file, processing directly')
return [fn((0, [('', targetdir)], cargs))]
roots = set()
worklists = [(i, [], cargs) for i in range(n_procs)]
i = 0
for root, dirs, files in os.walk(targetdir, followlinks=True):
if root in roots:
del dirs[:]
continue
else:
roots.add(root)
for fname in files:
worklists[i%n_procs][1].append((root, fname))
i += 1
if verbosity >= 0:
if n_procs == 1:
print('found {:d} files under {:s}, running on main process'
.format(i, targetdir))
else:
print('found {:d} files under {:s}, splitting across {:d} processes'
.format(i, targetdir, n_procs))
if n_procs == 1:
return [fn(worklists[0])]
else:
pool = multiprocessing.Pool(processes=n_procs)
return pool.map(fn, worklists)
def process_micros(args):
(k, files, (check, execute, tinfo, suffix, abort_on_error, ttys, verbosity)) = args
i = 0
retrace_differences = []
blocks = []
if verbosity >= 1:
pid = os.getpid()
logname = 'pytrace.{:d}.log.txt'.format(pid)
else:
logname = None
for root, fname in files:
if fname.endswith(suffix):
i += 1
name = fname[:-len(suffix)]
elfpath = os.path.join(root, fname)
jpath = os.path.join(root, name + trace_suffix)
if check:
if not check_elf(elfpath, verbosity=verbosity):
print('Unexpected behavior! {:s}'.format(elfpath))
if abort_on_error:
break
else:
continue
if execute:
if ttys is None:
assert k == 0, 'specify multiple TTYs to run more than one process'
tty = None
else:
assert 0 <= k and k < len(ttys), 'must specify at least one TTY per process'
tty = ttys[k]
# MAKE THIS A REAL OPTION PLS
max_retries = 3
retries = 0
while retries < max_retries:
try:
trace_elf(elfpath, jpath, tty=tty, logname=logname, verbosity=verbosity)
break
except Exception:
traceback.print_exc()
retries += 1
if abort_on_error and retries >= max_retries:
break
if tinfo:
same = retrace_elf(elfpath, jpath, tinfo, blocks, verbosity=verbosity)
if not same:
retrace_differences.append(elfpath)
if verbosity >= 0:
print('processed {:d} microbenchmarks, done'.format(i))
return i, retrace_differences, blocks
def walk_micros(testdir, check, execute, tinfo, suffix = '.elf', abort_on_error = True,
ttys = None, verbosity = 0, n_procs = 1):
retrace_data = walk_par(process_micros, testdir,
(check, execute, tinfo, suffix, abort_on_error, ttys, verbosity),
n_procs=n_procs, verbosity=verbosity)
count = 0
count_differences = 0
printed_header = False
interesting_blocks = []
for i, differences, blocks in retrace_data:
count += i
interesting_blocks += blocks
for elfpath in differences:
if not printed_header:
print('Emulated timing model disagreed for traces:')
printed_header = True
print(' {:s}'.format(elfpath))
count_differences += 1
if n_procs > 1 and verbosity >= 0:
print('dispatched to {:d} cores, processed {:d} total microbenchmarks, {:d} timing differences'
.format(n_procs, count, count_differences))
if len(interesting_blocks) > 0 and verbosity >= 0:
print('recovered {:d} interesting blocks that differ in hardware and emulation'
.format(len(interesting_blocks)))
return interesting_blocks
def process_traces(args):
(k, files, (prefix, verbosity)) = args
i = 0
errs = 0
blocks = []
for root, fname in files:
if fname.startswith(prefix) and fname.endswith(trace_suffix):
i += 1
jname = os.path.join(root, fname)
diff, trace, iotrace = load_trace(jname)
mismatches = compute_mismatches(diff, verbosity=verbosity)
err = mismatches_to_blocks(trace, mismatches, blocks)
if err:
print(' failures in trace {:s}'.format(jname))
errs += 1
elif verbosity >= 1:
print(' successful trace {:s}'.format(jname))
# if verbosity >= 2:
# objgraph.show_growth()
# print('{:d} blocks total'.format(len(blocks)))
# print(utils.recursive_container_count(blocks))
if verbosity >= 0:
print('processed {:d} traces to {:d} observed blocks, {:d} failures, done'
.format(i, len(blocks), errs))
return i, blocks
def walk_traces(testdir, prefix = '', verbosity = 0, n_procs = 1):
results = walk_par(process_traces, testdir, (prefix, verbosity),
n_procs=n_procs, verbosity=verbosity)
count = 0
blocks = []
for i, subblocks in results:
count += i
blocks += subblocks
if n_procs > 1 and verbosity >= 0:
print('dispatched to {:d} cores, processed {:d} total traces to {:d} blocks'
.format(n_procs, count, len(blocks)))
return blocks
def main(args):
testdir = args.testdir
suffix = args.suffix
check = args.check
execute = args.execute
jin_list = args.jsonin
jout = args.jsonout
cjin_list = args.cjsonin
cjout = args.cjsonout
arffname = args.arff
smtround = args.smt
n_procs = args.ncores
abort_on_error = not args.noabort
trprefix = args.trprefix
tinfo_name = args.timing
ttys = args.tty
verbosity = args.verbose
did_work = False
if tinfo_name:
if tinfo_name == 'reference':
tinfo = 'reference'
else:
with open(tinfo_name, 'rb') as f:
tinfo = pickle.load(f)
else:
tinfo = None
if check or execute or tinfo:
did_work = True
interesting_blocks = walk_micros(testdir, check, execute, tinfo,
suffix=suffix, abort_on_error=abort_on_error,
n_procs=n_procs, ttys=ttys, verbosity=verbosity)
else:
interesting_blocks = None
if jout or cjout or arffname or smtround > 0:
did_work = True
if interesting_blocks is not None:
blocks = interesting_blocks
elif jin_list:
blocks = []
for jname in jin_list:
new_blocks = extract_json(jname)
if verbosity >= 0:
print('read {:d} blocks from {:s}'
.format(len(new_blocks), jname))
blocks += new_blocks
elif not cjin_list:
blocks = walk_traces(testdir, prefix=trprefix, verbosity=verbosity, n_procs=n_procs)
else:
blocks = []
if cjin_list:
smt_blocks = []
for jname in cjin_list:
new_smt_blocks = extract_json(jname)
if verbosity >= 0:
print('read {:d} smt blocks from {:s}'
.format(len(new_smt_blocks), jname))
smt_blocks += new_smt_blocks
else:
smt_blocks = []
if len(blocks) + len(smt_blocks) <= 0:
print('no blocks found, nothing else to do')
return
if jout:
create_json(blocks, jout)
if verbosity >= 0:
print('wrote {:d} blocks to {:s}'.format(len(blocks), jout))
if arffname:
create_arff(blocks, arffname)
if verbosity >= 0:
print('wrote {:d} blocks to {:s}'.format(len(blocks), arffname))
if cjout:
smt_blocks += smt.compress_blocks(blocks)
blocks = []
create_json(smt_blocks, cjout)
if verbosity >= 0:
print('wrote {:d} smt blocks to {:s}'.format(len(smt_blocks), cjout))
if smtround > 0:
# destructive
smt_blocks += smt.compress_blocks(blocks)
if smtround == 1:
smt.round_1(smt_blocks)
elif smtround == 2:
smt_blocks.reverse()
smt.round_2(smt_blocks)
elif smtround == 3:
smt.round_3(smt_blocks)
elif smtround == 4:
smt.round_4(smt_blocks)
elif smtround == 5:
#smt_blocks.reverse()
smt.round_5(smt_blocks)
elif smtround == 6:
smt.round_6(smt_blocks)
elif smtround == 7:
smt.round_7(smt_blocks)
elif smtround == 8:
smt.round_8(smt_blocks)
elif smtround == 9:
smt.round_9(smt_blocks)
elif smtround == 10:
smt.round_10(smt_blocks)
if not did_work:
print('Nothing to do.')
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('testdir', nargs='?', default='.',
help='directory to look for files in')
parser.add_argument('suffix', nargs='?', default='.elf',
help='suffix for executable micro files')
parser.add_argument('-c', '--check', action='store_true',
help='check micros for incorrect behavior under emulation')
parser.add_argument('-e', '--execute', action='store_true',
help='execute micros against real hardware')
parser.add_argument('-ji', '--jsonin', nargs='+',
help='read constraint blocks from json files')
parser.add_argument('-jo', '--jsonout',
help='accumulate constraint blocks into raw json file')
parser.add_argument('-cji', '--cjsonin', nargs='+',
help='read constraint blocks from smt-compressed json files')
parser.add_argument('-cjo', '--cjsonout',
help='smt compress blocks into json file')
parser.add_argument('-a', '--arff',
help='accumulate data into arff file')
parser.add_argument('-s', '--smt', type=int, default=0,
help='run analysis round with smt solver')
parser.add_argument('-t', '--timing',
help='use this pickled timing model to emulate Timer_A')
parser.add_argument('-v', '--verbose', type=int, default=0,
help='verbosity level')
parser.add_argument('-noabort', action='store_true',
help='do not abort after first failure')
parser.add_argument('-ncores', type=int, default=1,
help='run in parallel on this many cores')
parser.add_argument('-trprefix', default='',
help='only read traces with this prefix')
parser.add_argument('-tty', default=None, nargs='+',
help='connect to mspdebug on these TTYs')
args = parser.parse_args()
main(args)
exit(0)
|
|
#!/usr/bin/env python
### this is the python script created by Ferris Cheung 2017-01-01
### It is used to get the shared Cache Reuse Distance Distribution which is added up by diff core's RDD-----L2R,and it's output as name-reuse.txt
### and it gets the shared Cache Stack Distance Distribution which is collected in the l2 cache port ----L2E,and it's output as name-stack.txt
### This Script is the to get the data from the same thread
import sys
import re
import os
###
### input file and output file define
###
inFilename = sys.argv[1]
inFile2name = sys.argv[2]
threadname = sys.argv[3]
if os.path.isfile(inFilename):
namelength = inFilename.rfind(".")
name = inFilename[0:namelength]
exten = inFilename[namelength:]
ReuseDH = name+"-reuse-"+threadname+exten
StackDH = name+"-stack-"+threadname+exten
print "inFilename:", inFilename,inFile2name
print "outFilename:", ReuseDH,StackDH
print "thread name:",threadname
#the input file stats.txt
fpRead = open(inFilename, "r")
#the input file system.tasks.txt
tkRead = open(inFile2name,"r")
#the output file Reuse Distance Distribution
RDHWrite = open(ReuseDH, "w+")
#the output file Stack Distance Distribution
SDHWrite = open(StackDH,"w+")
#thread name pattern
threadnamePattern = re.compile(r'.*(next_task=%s).*' % threadname)
#core0ReuseDis pattern
core0ReuseDisPattern = re.compile(r'.*(system.l2.core0ReuseDis::)([0-9]+)\s+([0-9]+)')
core0ReuseSamplePattern = re.compile(r'.*(system.l2.core0ReuseDis::sample).* ([0-9]+)')
#core1ReuseDis pattern
core1ReuseDisPattern = re.compile(r'.*(system.l2.core1ReussDis::)([0-9]+)\s+([0-9]+)')
core1ReuseSamplePattern = re.compile(r'.*(system.l2.core1ReuseDis::sample).* ([0-9]+)')
#l2Stack historgram Pattern
l2StackHisPattern = re.compile(r'.*(system.l2.l2StackHis).*([0-9]+)\s+([0-9]+)')
#l2 cachehits Pattern
l2cachehitsPattern = re.compile(r'.*(system.l2.cachehits).*([0-9|\.]+)')
#core memread pattern
core0memreadPattern = re.compile(r'.*(system.switch_cpus0.commit.op_class_0::MemRead).* ([0-9]+)\s+.*')
core1memreadPattern = re.compile(r'.*(system.switch_cpus1.commit.op_class_0::MemRead).* ([0-9]+)\s+.*')
#core memwrite Pattern
core0memwritePattern = re.compile(r'.*(system.switch_cpus0.commit.op_class_0::MemWrite).* ([0-9]+)\s+.*')
core1memwritePattern = re.compile(r'.*(system.switch_cpus1.commit.op_class_0::MemWrite).* ([0-9]+)\s+.*')
#core commit total Pattern
core0commitPattern = re.compile(r'.*(system.switch_cpus0.commit.op_class_0::total).* ([0-9|\.]+)')
core1commitPattern = re.compile(r'.*(system.switch_cpus1.commit.op_class_0::total).* ([0-9|\.]+)')
#core cpi pattern
core0cpiPattern = re.compile(r'.*(system.switch_cpus0.cpi).* (([0-9|\.]+)|(nan))')
core1cpiPattern = re.compile(r'.*(system.switch_cpus1.cpi).* (([0-9|\.]+)|(nan))')
#thread pattern
threadbeginPattern = re.compile(r'.*Begin Simulation Statistics.*')
threadendPattern =re.compile(r'.*End Simulation Statistics.*')
lines = fpRead.readline()
#debug is the flag to denote which thread is working on
debug = 1
#thread num
threadnum = 1
#stats num
statsnum = 1
tasklines = tkRead.readline()
###
### func check is mean to check the distribution is continuous or not
###
def check(a,b):
if b-a==1:
return 0
else:
c = b-a
return c
while tasklines:
threadnamematch = threadnamePattern.search(tasklines)
if threadnamematch:
while 1:
if threadnum == statsnum:
print threadnum
threadflag = False
##du stats data
while lines:
threadbeginmatch = threadbeginPattern.search(lines)
if threadbeginmatch:
threadflag = True
#this three flag is to denote the distribution is first time to work
#when it is true which means we are collect this thread's first distribution
#once we collect the thread's first distribution ,we set the flag to false
core0flag = True
core1flag = True
l2flag = True
#this three list is the container of distrubtion
core0=[]
core1=[]
l2 = []
#this thres pos is the pointer of the last distribution we collected
#use it to check the distribution is continuous or not
core0pos=1
core1pos=1
l2pos = 1
l2R = []
l2ES=[]
#this three pos is the pointer of the Distribution we are collecting
pos0=0
pos1=0
pos2=0
threadlines = fpRead.readline()
threadendmatch = threadendPattern.match(threadlines)
while threadlines:
core0Dismatch = core0ReuseDisPattern.search(threadlines)
core1Dismatch = core1ReuseDisPattern.search(threadlines)
core0samplematch = core0ReuseSamplePattern.search(threadlines)
core1samplematch = core1ReuseSamplePattern.search(threadlines)
l2Hismatch = l2StackHisPattern.search(threadlines)
l2cachehitsmatch = l2cachehitsPattern.search(threadlines)
core0readmatch = core0memreadPattern.search(threadlines)
core0writematch = core0memwritePattern.search(threadlines)
core0commitmatch = core0commitPattern.search(threadlines)
core0cpimatch = core0cpiPattern.search(threadlines)
core1readmatch = core1memreadPattern.search(threadlines)
core1writematch = core1memwritePattern.search(threadlines)
core1commitmatch = core1commitPattern.search(threadlines)
core1cpimatch = core1cpiPattern.search(threadlines)
threadendmatch = threadendPattern.search(threadlines)
if core0samplematch:
core0sample = core0samplematch.group(2)
if core0Dismatch:
pos0 = int(core0Dismatch.group(2))
#this part add the 0 to the distribution
#when our distribtuion begin with the number bigger than 1
if core0flag:
core0flag = False
core0pos = pos0
dis0 = pos0
while(dis0-1) > 0:
core0.append(0)
dis0 = dis0-1
val0 = int(core0Dismatch.group(3))
#this part the add the 0 to the distribution
#when our distribution is not continous
dis0 = check(core0pos,pos0)
if dis0!=0:
while (dis0-1) > 0:
core0.append(0)
dis0 = dis0-1
core0.append(val0)
core0pos = pos0
if core1samplematch:
core1sample = core1samplematch.group(2)
if core1Dismatch:
pos1 = int(core1Dismatch.group(2))
if core1flag:
core1flag = False
core1pos = pos1
dis1 = pos1
while(dis1-1) >0 :
core1.append(0)
dis1 = dis1-1
val1 = int(core1Dismatch.group(3))
dis1 = check(core1pos,pos1)
if dis1!=0:
while (dis1-1) > 0:
core1.append(0)
dis1 = dis1-1
core1.append(val1)
core1pos = pos1
if l2Hismatch:
pos2 = int(l2Hismatch.group(2))
if l2flag:
l2flag = False
l2pos = pos2
dis2 = pos2
while(dis2-1) > 0:
l2.append(0)
dis2 = dis2-1
val2 = int(l2Hismatch.group(3))
dis2 = check(l2pos,pos2)
if dis2!=0:
while (dis2-1) > 0:
l2.append(0)
dis2 = dis2-1
l2.append(val2)
l2pos = pos2
if l2cachehitsmatch:
cachehits = l2cachehitsmatch.group(2)
if core0readmatch:
read0 = int(core0readmatch.group(2))
if core0writematch:
write0 = int(core0writematch.group(2))
if core0commitmatch:
commit0 = float(core0commitmatch.group(2))
if core0cpimatch:
cpi0 = core0cpimatch.group(2)
if(cpi0!='nan'):
cpi0=float(cpi0)
else:
cpi0=0
if core1readmatch:
read1 = int(core1readmatch.group(2))
if core1writematch:
write1 = int(core1writematch.group(2))
if core1commitmatch:
commit1 = float(core1commitmatch.group(2))
if core1cpimatch:
cpi1 = core1cpimatch.group(2)
if(cpi1!='nan'):
cpi1=float(cpi1)
else:
cpi1=0
if threadendmatch:
statsnum = statsnum+1
#full the histogram
dis0 = check(pos0,300)
dis1 = check(pos1,300)
dis2 = check(pos2,30)
if (dis0==0):
if pos0==299:
core0.append(0)
else:
while dis0 > 0 :
core0.append(0)
dis0 = dis0-1
if (dis1==0):
if pos1 == 299:
core1.append(0)
else:
while dis1 > 0:
core1.append(0)
dis1 = dis1-1
if (dis2==0):
if pos2 == 29:
l2.append(0)
else:
while dis2 > 0:
l2.append(0)
dis2 = dis2-1
assert len(core0)==300, "core0 len error"
assert len(core1)==300, "core1 len error"
assert len(l2)==30,"l2 len error"
##
##this part is to calc the added up reuse distance distribution
##when the cpi0 and cpi1 are both exist we do the calc
##when it's not ,add the direct
##
if ((cpi1!=0) and (cpi0!=0)):
fac0=(read0+write0)/commit0
fac1=(read1+write1)/commit1
cpic0=cpi0/cpi1
cpic1=cpi1/cpi0
temp0 = (fac1/fac0)*cpic0
temp1 = (fac0/fac1)*cpic1
core0coe = float('%.2f'%temp0)
core1coe = float('%.2f'%temp1)
for i in range(300):
core0[i] = core0[i]*(1+core0coe)
core1[i] = core1[i]*(1+core1coe)
l2R.append(int(core0[i]+core1[i]))
else:
for j in range(300):
l2R.append(int(core0[j]+core1[j]))
assert len(l2R)==300, "l2R error"
for k in range(300):
RDHWrite.write("%d" %l2R[k])
for m in range(30):
SDHWrite.write("%d " %l2[m])
RDHWrite.write('\n')
SDHWrite.write("\n")
print "thread ",debug,"done"
debug =debug +1
break
threadlines = fpRead.readline()
if threadflag:
break
else:
lines = fpRead.readline()
break
else:
lines = fpRead.readline()
while lines:
threadmatch = threadendPattern.search(lines)
samplematch = core0ReuseSamplePattern.search(lines)
if threadmatch:
statsnum = statsnum+1
break
lines = fpRead.readline()
threadnum = threadnum+1
else:
threadnum = threadnum+1
tasklines=tkRead.readline()
tkRead.close()
fpRead.close()
RDHWrite.close()
SDHWrite.close()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import numpy
import scipy
import time
import sys
from scipy.spatial.distance import cdist
from nearpy.utils import numpy_array_from_list_or_numpy_array
class RecallPrecisionExperiment(object):
"""
Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
perform_experiment() returns list of (recall, precision, search_time)
tuple. These are the averaged values over all request vectors. search_time
is the average retrieval/search time compared to the average exact search
time.
coverage_ratio determines how many of the vectors are used as query
vectors for exact andapproximated search. Because the search comparance
overhead is quite large, it is best with large data sets (>10000) to
use a low coverage_ratio (like 0.1) to make the experiment fast. A
coverage_ratio of 0.1 makes the experiment use 10% of all the vectors
for querying, that is, it looks for 10% of all vectors for the nearest
neighbours.
"""
def __init__(self, N, vectors, coverage_ratio=0.2):
"""
Performs exact nearest neighbour search on the data set.
vectors can either be a numpy matrix with all the vectors
as columns OR a python array containing the individual
numpy vectors.
"""
# We need a dict from vector string representation to index
self.vector_dict = {}
self.N = N
self.coverage_ratio = coverage_ratio
# Get numpy array representation of input
self.vectors = numpy_array_from_list_or_numpy_array(vectors)
# Build map from vector string representation to vector
for index in range(self.vectors.shape[1]):
self.vector_dict[self.__vector_to_string(
self.vectors[:, index])] = index
# Get transposed version of vector matrix, so that the rows
# are the vectors (needed by cdist)
vectors_t = numpy.transpose(self.vectors)
# Determine the indices of query vectors used for comparance
# with approximated search.
query_count = numpy.floor(self.coverage_ratio *
self.vectors.shape[1])
self.query_indices = []
for k in range(int(query_count)):
index = numpy.floor(k*(self.vectors.shape[1]/query_count))
index = min(index, self.vectors.shape[1]-1)
self.query_indices.append(int(index))
print('\nStarting exact search (query set size=%d)...\n' % query_count)
# For each query vector get the closest N neighbours
self.closest = {}
self.exact_search_time_per_vector = 0.0
for index in self.query_indices:
v = vectors_t[index, :].reshape(1, self.vectors.shape[0])
exact_search_start_time = time.time()
D = cdist(v, vectors_t, 'euclidean')
self.closest[index] = scipy.argsort(D)[0, 1:N+1]
# Save time needed for exact search
exact_search_time = time.time() - exact_search_start_time
self.exact_search_time_per_vector += exact_search_time
print('\Done with exact search...\n')
# Normalize search time
self.exact_search_time_per_vector /= float(len(self.query_indices))
def perform_experiment(self, engine_list):
"""
Performs nearest neighbour recall experiments with custom vector data
for all engines in the specified list.
Returns self.result contains list of (recall, precision, search_time)
tuple. All are the averaged values over all request vectors.
search_time is the average retrieval/search time compared to the
average exact search time.
"""
# We will fill this array with measures for all the engines.
result = []
# For each engine, first index vectors and then retrieve neighbours
for engine in engine_list:
print('Engine %d / %d' % (engine_list.index(engine),
len(engine_list)))
# Clean storage
engine.clean_all_buckets()
# Use this to compute average recall
avg_recall = 0.0
# Use this to compute average precision
avg_precision = 0.0
# Use this to compute average search time
avg_search_time = 0.0
# Index all vectors and store them
for index in range(self.vectors.shape[1]):
engine.store_vector(self.vectors[:, index],
'data_%d' % index)
# Look for N nearest neighbours for query vectors
for index in self.query_indices:
# Get indices of the real nearest as set
real_nearest = set(self.closest[index])
# We have to time the search
search_time_start = time.time()
# Get nearest N according to engine
nearest = engine.neighbours(self.vectors[:, index])
# Get search time
search_time = time.time() - search_time_start
# For comparance we need their indices (as set)
nearest = set([self.__index_of_vector(x[0]) for x in nearest])
# Remove query index from search result to make sure that
# recall and precision make sense in terms of "neighbours".
# If ONLY the query vector is retrieved, we want recall to be
# zero!
nearest.remove(index)
# If the result list is empty, recall and precision are 0.0
if len(nearest) == 0:
recall = 0.0
precision = 0.0
else:
# Get intersection count
inter_count = float(len(real_nearest.intersection(
nearest)))
# Normalize recall for this vector
recall = inter_count/float(len(real_nearest))
# Normalize precision for this vector
precision = inter_count/float(len(nearest))
# Add to accumulator
avg_recall += recall
# Add to accumulator
avg_precision += precision
# Add to accumulator
avg_search_time += search_time
# Normalize recall over query set
avg_recall = avg_recall / float(len(self.query_indices))
# Normalize precision over query set
avg_precision = avg_precision / float(len(self.query_indices))
# Normalize search time over query set
avg_search_time = avg_search_time / float(len(self.query_indices))
# Normalize search time with respect to exact search
avg_search_time /= self.exact_search_time_per_vector
print(' recall=%f, precision=%f, time=%f' % (avg_recall,
avg_precision,
avg_search_time))
result.append((avg_recall, avg_precision, avg_search_time))
# Return (recall, precision, search_time) tuple
return result
def __vector_to_string(self, vector):
""" Returns string representation of vector. """
return numpy.array_str(vector)
def __index_of_vector(self, vector):
""" Returns index of specified vector from test data set. """
return self.vector_dict[self.__vector_to_string(vector)]
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-08-25 08:45:45
from __future__ import unicode_literals, division, absolute_import, print_function
from datetime import datetime
import codecs
import os
import sys
import re
import requests
import shutil
import string
import time
import random
from .compat import basestring, json, urlparse, unquote, unicode_str, to_text, to_binary, OrderedDict
RE_WORDS = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.S)
RE_CHARS = re.compile(r'<.*?>|(.)', re.S)
RE_TAG = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
RE_NEWLINES = re.compile(r'\r\n|\r') # Used in normalize_newlines
RE_CAMEL_CASE = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
FILENAME_UNSAFE_CHARS = r'[%&:;!<>\\\/\*\?\"\'\|\^\+]'
def import_src(name, fpath):
import os
import imp
p = fpath if os.path.isabs(fpath) \
else os.path.join(os.path.dirname(__file__), fpath)
return imp.load_source(name, p)
def now():
format_str = to_binary('%Y-%m-%d %H:%M:%S')
return datetime.now().strftime(format_str)
def load_json_preserve_order(s):
return json.loads(s, object_pairs_hook=OrderedDict)
############################################################
#
# OS and System Functions
#
############################################################
def check_port_open(port, addr='127.0.0.1'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((addr, port))
if result == 0:
return True
else:
return False
def get_user_home():
home = os.curdir
if 'HOME' in os.environ:
home = os.environ['HOME']
elif os.name == 'posix':
home = os.path.expanduser("~/")
elif os.name == 'nt':
if 'HOMEPATH' in os.environ and 'HOMEDRIVE' in os.environ:
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
else:
import pwd
home = pwd.getpwuid(os.getuid()).pw_dir
return home
def get_current_user():
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If not user from os.environ.get()
import pwd
return pwd.getpwuid(os.getuid())[0]
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept
if possible.
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+',
lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s):
"""Sanitizes and normalizes path on Windows"""
if sys.platform != 'win32':
return s
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
norm_path = os.path.normpath(remove_start(
s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub(
r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
return os.path.join(*sanitized_path)
############################################################
#
# String/List/Dict Functions
#
############################################################
def pprint(obj):
print(json.dumps(obj, ensure_ascii=False,
indent=2, sort_keys=True))
def slice_list(l, n):
"""Yield successive n-sized chunks from l."""
# for i in xrange(0, len(l), n):
# yield l[i:i + n]
return [l[i:i + n] for i in range(0, len(l), n)]
def distinct_list(source_list, sort=False, reverse=False):
result_list = OrderedDict(
(x, True) for x in source_list).keys()
return sorted(result_list, reverse=reverse) if sort else result_list
def flatten_list(source_list):
result_list = []
for item in source_list:
if isinstance(item, list):
result_list.extend(item)
else:
result_list.append(item)
return [r for r in result_list if r]
############################################################
#
# File Functions
#
############################################################
def write_list(name, ls):
if not ls:
return
with codecs.open(name, 'w', 'utf-8') as f:
for s in ls:
f.write(s + '\n')
def read_list(name):
if not os.path.isfile(name):
return []
with codecs.open(name, 'r', 'utf-8') as f:
return list(filter(bool, [line.strip() for line in f]))
def write_file(name, data):
if not data:
return
with codecs.open(name, 'w', 'utf-8') as f:
f.write(to_text(data))
def read_file(name):
if not os.path.isfile(name):
return None
with codecs.open(name, 'r', 'utf-8') as f:
return to_text(f.read())
def file_size(src):
total_size = 0
if os.path.isdir(src):
for f in os.listdir(src):
if os.path.isfile(f):
total_size += os.path.getsize(f)
elif os.path.isfile(src):
total_size = os.path.getsize(src)
return total_size
def files_size(files):
return sum([os.path.getsize(f) for f in files])
def write_json(filename, data):
if not data:
return
with codecs.open(filename, 'w', 'utf-8') as f:
json.dump(data, f, ensure_ascii=False,
indent=4, sort_keys=True)
def read_json(filename):
if not os.path.isfile(filename):
return {}
with codecs.open(filename, 'r', 'utf-8') as f:
return json.load(f)
def write_dict(filename, data):
return write_json(filename, data)
def read_dict(filename):
return read_json(filename)
def humanize_bytes(n, precision=2):
# Author: Doug Latornell
# Licence: MIT
# URL: http://code.activestate.com/recipes/577081/
abbrevs = [
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'B')
]
if n == 1:
return '1 B'
for factor, suffix in abbrevs:
if n >= factor:
break
# noinspection PyUnboundLocalVariable
return '%.*f %s' % (precision, n / factor, suffix)
############################################################
#
# HTTP Functions
#
############################################################
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = to_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w\(\)_.]', '', s)
def get_url_last_path(url):
if url.endswith('/'):
url = url[:-1]
return os.path.basename(urlparse(url).path)
def url_to_filename(url):
filename = get_url_last_path(url)
return get_valid_filename(filename)
def unquote_url(url):
return unquote(url)
def requests_to_curl(r):
req = r.request
method = req.method
uri = req.url
ct = req.headers.get('Content-Type')
data = '[multipart]' if ct and 'multipart/form-data' in ct else (
req.body or '')
headers = ["'{0}: {1}'".format(k, v) for k, v in req.headers.items()]
headers = " -H ".join(headers)
command = "curl -X {method} -H {headers} -d '{data}' '{uri}'"
return command.format(method=method, headers=headers, data=data, uri=uri)
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
############################################################
#
# Misc Functions
#
############################################################
def aes_encrypt(data, secret='P2wH6eFqd8x4abnf'):
# https://pypi.python.org/pypi/pycrypto
from Crypto.Cipher import AES
aes = AES.new(secret, AES.MODE_CBC, b'2017011720370117')
if isinstance(data, unicode):
data = data.encode('utf-8')
if len(data) % 16 != 0:
data = data + str((16 - len(data) % 16) * '\0')
return aes.encrypt(data)
def aes_decrypt(data, secret='P2wH6eFqd8x4abnf'):
# https://pypi.python.org/pypi/pycrypto
from Crypto.Cipher import AES
aes = AES.new(secret, AES.MODE_CBC, b'2017011720370117')
return aes.decrypt(data).rstrip('\0')
def salted_hmac(key_salt, value, secret=None):
"""
Return the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = hashlib.sha1(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
|
|
#!/usr/bin/env python
# mammon - a useless ircd
#
# Copyright (c) 2015, William Pitcock <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import asyncio
import time
import socket
import copy
from ircreactor.envelope import RFC1459Message
from .channel import Channel
from .utility import CaseInsensitiveDict, uniq
from .property import user_property_items, user_mode_items
from .server import eventmgr_rfc1459, eventmgr_core, get_context
from . import __version__
class ClientHistoryEntry(object):
def __init__(self, cli):
self.nickname = cli.nickname
self.username = cli.username
self.hostname = cli.hostname
self.realname = cli.realname
self.account = cli.account
self.ctx = cli.ctx
def register(self):
self.ctx.client_history[self.nickname] = self
# XXX - handle ping timeout
# XXX - quit() could eventually be handled using self.eventmgr.dispatch()
class ClientProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.ctx = get_context()
self.peername = transport.get_extra_info('peername')
self.transport = transport
self.recvq = list()
self.channels = list()
self.nickname = '*'
self.username = str()
self.hostname = self.peername[0] # XXX - handle rdns...
self.realaddr = self.peername[0]
self.realname = '<unregistered>'
self.props = CaseInsensitiveDict()
self.caps = CaseInsensitiveDict()
self.user_set_metadata = []
self.metadata = CaseInsensitiveDict()
self.servername = self.ctx.conf.name
self.away_message = str()
self._role_name = None
self.account = None # XXX - update if needed when account objects are implemented
self.connected = True
self.registered = False
self.registration_lock = set()
self.push_registration_lock('NICK', 'USER', 'DNS')
self.ctx.logger.debug('new inbound connection from {}'.format(self.peername))
self.eventmgr = eventmgr_rfc1459
self.tls = self.transport.get_extra_info('sslcontext', default=None) is not None
if self.tls:
self.props['special:tls'] = True
asyncio.async(self.do_rdns_check())
def update_idle(self):
self.last_event_ts = self.ctx.current_ts
@property
def role(self):
return self.ctx.roles.get(self._role_name)
@role.setter
def role(self, value):
self._role_name = value
@property
def idle_time(self):
return int(self.ctx.current_ts - self.last_event_ts)
def able_to_edit_metadata(self, target):
"""True if we're able to edit metadata on the given target, False otherwise."""
if self == target:
return True
if isinstance(target, ClientProtocol):
if not self.role:
return False
if 'metadata:set_global' in self.role.capabilities:
return True
if self.servername == target.servername and 'metadata:set_local' in self.role.capabilities:
return True
if isinstance(target, Channel):
# XXX - hook up channel ACL when we have that
return False
def connection_lost(self, exc):
"""Handle loss of connection if it was already not handled.
Calling quit() can cause this function to be called recursively, so we use IClient.connected
as a property to determine whether or not the client is still connected. If we have already handled
this connection loss (most likely by inducing it in quit()), then IClient.connected will be
False.
Side effects: IProtocol.quit() is called by this function."""
if not self.connected:
return
if not exc:
self.quit('Connection closed')
return
self.quit('Connection error: ' + repr(exc))
def do_rdns_check(self):
"""Handle looking up the client's reverse DNS and validating it as a coroutine."""
self.dump_notice('Looking up your hostname...')
rdns = yield from self.ctx.eventloop.getnameinfo(self.peername)
if rdns[0] == self.realaddr:
self.dump_notice('Could not find your hostname...')
self.release_registration_lock('DNS')
return
try:
fdns = yield from self.ctx.eventloop.getaddrinfo(rdns[0], rdns[1], proto=socket.IPPROTO_TCP)
for fdns_e in fdns:
if fdns_e[4][0] == self.realaddr:
self.dump_notice('Found your hostname: ' + rdns[0])
self.hostname = rdns[0]
self.release_registration_lock('DNS')
return
except:
pass
self.dump_notice('Could not find your hostname...')
self.release_registration_lock('DNS')
def data_received(self, data):
[self.message_received(m) for m in data.splitlines()]
def message_received(self, data):
m = RFC1459Message.from_message(data.decode('UTF-8', 'replace').strip('\r\n'))
m.client = self
# logging.debug('client {0} --> {1}'.format(repr(self.__dict__), repr(m.serialize())))
if len(self.recvq) > self.ctx.conf.recvq_len:
self.quit('Excess flood')
return
self.recvq.append(m)
# XXX - drain_queue should be called on all objects at once to enforce recvq limits
self.drain_queue()
def drain_queue(self):
while self.recvq:
m = self.recvq.pop(0)
self.eventmgr.dispatch(*m.to_event())
# handle a mandatory side effect resulting from rfc1459.
def handle_side_effect(self, msg, params=[]):
m = RFC1459Message.from_data(msg, source=self.hostmask, params=params)
m.client = self
self.eventmgr.dispatch(*m.to_event())
def dump_message(self, m):
"""Dumps an RFC1459 format message to the socket.
Side effect: we actually operate on a copy of the message, because the message may have different optional
mutations depending on capabilities and broadcast target."""
out_m = copy.deepcopy(m)
out_m.client = self
eventmgr_core.dispatch('outbound message postprocess', out_m)
self.transport.write(bytes(out_m.to_message() + '\r\n', 'UTF-8'))
def dump_numeric(self, numeric, params):
"""Dump a numeric to a connected client.
This includes the `target` field that numerics have for routing. You do *not* need to include it."""
msg = RFC1459Message.from_data(numeric, source=self.ctx.conf.name, params=[self.nickname] + params)
self.dump_message(msg)
def dump_notice(self, message):
"Dump a NOTICE to a connected client."
msg = RFC1459Message.from_data('NOTICE', source=self.ctx.conf.name, params=[self.nickname, '*** ' + message])
self.dump_message(msg)
@property
def hostmask(self):
if not self.registered:
return None
hm = self.nickname
if self.username:
hm += '!' + self.username
if self.hostname:
hm += '@' + self.hostname
return hm
@property
def status(self):
st = str()
if self.away_message:
st += 'G'
else:
st += 'H'
if self.props.get('special:oper', False):
st += '*'
return st
def kill(self, source, reason):
m = RFC1459Message.from_data('KILL', source=source.hostmask, params=[self.nickname, reason])
self.dump_message(m)
self.quit('Killed ({source} ({reason}))'.format(source=source.nickname, reason=reason))
def quit(self, message):
m = RFC1459Message.from_data('QUIT', source=self.hostmask, params=[message])
self.sendto_common_peers(m)
self.exit()
def exit(self):
self.connected = False
self.transport.close()
if not self.registered:
return
while self.channels:
i = self.channels.pop(0)
i.channel.part(self)
self.ctx.clients.pop(self.nickname)
ClientHistoryEntry(self).register()
def push_registration_lock(self, *locks):
if self.registered:
return
self.registration_lock |= set(locks)
def release_registration_lock(self, *locks):
if self.registered:
return
self.registration_lock -= set(locks)
if not self.registration_lock:
self.register()
@property
def legacy_modes(self):
out = '+'
for i in self.props.keys():
if self.props[i] and i in user_property_items:
out += user_property_items[i]
return out
def set_legacy_modes(self, in_str):
before = copy.deepcopy(self.props)
mod = False
for i in in_str:
if i == '+':
mod = True
elif i == '-':
mod = False
else:
if i == 'o' and mod == True:
continue
if i not in user_mode_items:
self.dump_numeric('501', [i, 'Unknown MODE flag'])
continue
prop = user_mode_items[i]
self.props[prop] = mod
self.flush_legacy_mode_change(before, self.props)
def flush_legacy_mode_change(self, before, after):
out = str()
mod = 0
for i in user_property_items.keys():
if before.get(i, False) and not after.get(i, False):
if mod == 1:
out += user_property_items[i]
else:
mod = 1
out += '-'
out += user_property_items[i]
elif not before.get(i, False) and after.get(i, False):
if mod == 2:
out += user_property_items[i]
else:
mod = 2
out += '+'
out += user_property_items[i]
msg = RFC1459Message.from_data('MODE', source=self.hostmask, params=[self.nickname, out])
self.dump_message(msg)
def sendto_common_peers(self, message, exclude=[], cap=None):
if cap:
base = [i.client for m in self.channels for i in m.channel.members if i.client not in exclude and cap in i.client.caps] + [self]
else:
base = [i.client for m in self.channels for i in m.channel.members if i.client not in exclude] + [self]
peerlist = uniq(base)
if self in exclude:
peerlist.remove(self)
[i.dump_message(message) for i in peerlist]
def dump_isupport(self):
isupport_tokens = {
'NETWORK': self.ctx.conf.network,
'CLIENTVER': '3.2',
'CASEMAPPING': 'ascii',
'CHARSET': 'utf-8',
'SAFELIST': True,
'METADATA': self.ctx.conf.metadata.get('limit', True),
'CHANTYPES': '#',
}
# XXX - split into multiple 005 lines if > 13 tokens
def format_token(k, v):
if isinstance(v, bool):
return k
return '{0}={1}'.format(k, v)
self.dump_numeric('005', [format_token(k, v) for k, v in isupport_tokens.items()] + ['are supported by this server'])
def register(self):
self.registered = True
self.ctx.clients[self.nickname] = self
self.registration_ts = self.ctx.current_ts
self.update_idle()
if self.tls:
cipher = self.transport.get_extra_info('cipher')
self.dump_notice('You are connected using {1}-{0}-{2}'.format(*cipher))
self.dump_numeric('001', ['Welcome to the ' + self.ctx.conf.network + ' IRC Network, ' + self.hostmask])
self.dump_numeric('002', ['Your host is ' + self.ctx.conf.name + ', running version mammon-' + str(__version__)])
self.dump_numeric('003', ['This server was started at ' + self.ctx.startstamp])
self.dump_numeric('004', [self.ctx.conf.name, 'mammon-' + str(__version__), ''.join(user_mode_items.keys())])
self.dump_isupport()
# XXX - LUSERS isn't implemented.
# self.handle_side_effect('LUSERS')
self.handle_side_effect('MOTD')
|
|
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import HasProps
from ..properties import Any, Int, String, Instance, List, Dict, Either
class DataSource(PlotObject):
""" A base class for data source types. ``DataSource`` is
not generally useful to instantiate on its own.
"""
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
selected = List(Int, help="""
A list of selected indices on this DataSource.
""")
def columns(self, *columns):
""" Returns a ColumnsRef object for a column or set of columns
on this data source.
Args:
*columns
Returns:
ColumnsRef
"""
return ColumnsRef(source=self, columns=list(columns))
class ColumnsRef(HasProps):
""" A utility object to allow referring to a collection of columns
from a specified data source, all together.
"""
source = Instance(DataSource, help="""
A data source to reference.
""")
columns = List(String, help="""
A list of column names to reference from ``source``.
""")
class ColumnDataSource(DataSource):
""" Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single
argument that is a dict, that argument is used as the value for
the "data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
"""
data = Dict(String, Any, help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""")
def __init__(self, *args, **kw):
""" If called with a single argument that is a dict, treat
that implicitly as the "data" attribute.
"""
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
import pandas as pd
if isinstance(raw_data, pd.DataFrame):
raw_data = self.from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
for name, data in raw_data.items():
self.add(data, name)
super(ColumnDataSource, self).__init__(**kw)
# TODO: (bev) why not just return a ColumnDataSource?
@classmethod
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict(str, list)
"""
index = data.index
new_data = {}
for colname in data:
new_data[colname] = data[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
def to_df(self):
""" Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
"""
import pandas as pd
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
""" Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name go the form "Series ####"
Returns:
str: the column name used
"""
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def remove(self, name):
""" Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
"""
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def push_notebook(self):
""" Update date for a plot in the IPthon notebook in place.
This function can be be used to update data in plot data sources
in the IPython notebook, without having to use the Bokeh server.
Returns:
None
.. warning::
The current implementation leaks memory in the IPython notebook,
due to accumulating JS code. This function typically works well
with light UI interactions, but should not be used for continuously
updating data. See :bokeh-issue:`1732` for more details and to
track progress on potential fixes.
"""
from IPython.core import display
from bokeh.protocol import serialize_json
id = self.ref['id']
model = self.ref['type']
json = serialize_json(self.vm_serialize())
js = """
var ds = Bokeh.Collections('{model}').get('{id}');
var data = {json};
ds.set(data);
""".format(model=model, id=id, json=json)
display.display_javascript(js, raw=True)
class ServerDataSource(DataSource):
""" A data source that referes to data located on a Bokeh server.
The data from the server is loaded on-demand by the client.
"""
data_url = String(help="""
The URL to the Bokeh server endpoint for the data.
""")
owner_username = String(help="""
A username to use for authentication when Bokeh server is operating
in multi-user mode.
""")
data = Dict(String, Any, help="""
Additional data to include directly in this data source object. The
columns provided here are merged with those from the Bokeh server.
""")
# Paramters of data transformation operations
# The 'Any' is used to pass primtives around.
# TODO: (jc) Find/create a property type for 'any primitive/atomic value'
transform = Dict(String,Either(Instance(PlotObject), Any), help="""
Paramters of the data transformation operations.
The associated valuse is minimally a tag that says which downsample routine
to use. For some downsamplers, parameters are passed this way too.
""")
|
|
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import date
import graphviz
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.validators import ValidationError
from django.db import models
from django.db.models import Q
from django.http import HttpResponse
from django.template import Template, Context
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
from utilities.utils import foreground_color
from .constants import *
#
# Custom fields
#
class CustomFieldModel(object):
def cf(self):
"""
Name-based CustomFieldValue accessor for use in templates
"""
if not hasattr(self, 'get_custom_fields'):
return dict()
return {field.name: value for field, value in self.get_custom_fields().items()}
def get_custom_fields(self):
"""
Return a dictionary of custom fields for a single object in the form {<field>: value}.
"""
# Find all custom fields applicable to this type of object
content_type = ContentType.objects.get_for_model(self)
fields = CustomField.objects.filter(obj_type=content_type)
# If the object exists, populate its custom fields with values
if hasattr(self, 'pk'):
values = CustomFieldValue.objects.filter(obj_type=content_type, obj_id=self.pk).select_related('field')
values_dict = {cfv.field_id: cfv.value for cfv in values}
return OrderedDict([(field, values_dict.get(field.pk)) for field in fields])
else:
return OrderedDict([(field, None) for field in fields])
@python_2_unicode_compatible
class CustomField(models.Model):
obj_type = models.ManyToManyField(ContentType, related_name='custom_fields', verbose_name='Object(s)',
limit_choices_to={'model__in': CUSTOMFIELD_MODELS},
help_text="The object(s) to which this field applies.")
type = models.PositiveSmallIntegerField(choices=CUSTOMFIELD_TYPE_CHOICES, default=CF_TYPE_TEXT)
name = models.CharField(max_length=50, unique=True)
label = models.CharField(max_length=50, blank=True, help_text="Name of the field as displayed to users (if not "
"provided, the field's name will be used)")
description = models.CharField(max_length=100, blank=True)
required = models.BooleanField(default=False, help_text="Determines whether this field is required when creating "
"new objects or editing an existing object.")
is_filterable = models.BooleanField(default=True, help_text="This field can be used to filter objects.")
default = models.CharField(max_length=100, blank=True, help_text="Default value for the field. Use \"true\" or "
"\"false\" for booleans. N/A for selection "
"fields.")
weight = models.PositiveSmallIntegerField(default=100, help_text="Fields with higher weights appear lower in a "
"form")
class Meta:
ordering = ['weight', 'name']
def __str__(self):
return self.label or self.name.replace('_', ' ').capitalize()
def serialize_value(self, value):
"""
Serialize the given value to a string suitable for storage as a CustomFieldValue
"""
if value is None:
return ''
if self.type == CF_TYPE_BOOLEAN:
return str(int(bool(value)))
if self.type == CF_TYPE_DATE:
# Could be date/datetime object or string
try:
return value.strftime('%Y-%m-%d')
except AttributeError:
return value
if self.type == CF_TYPE_SELECT:
# Could be ModelChoiceField or TypedChoiceField
return str(value.id) if hasattr(value, 'id') else str(value)
return value
def deserialize_value(self, serialized_value):
"""
Convert a string into the object it represents depending on the type of field
"""
if serialized_value is '':
return None
if self.type == CF_TYPE_INTEGER:
return int(serialized_value)
if self.type == CF_TYPE_BOOLEAN:
return bool(int(serialized_value))
if self.type == CF_TYPE_DATE:
# Read date as YYYY-MM-DD
return date(*[int(n) for n in serialized_value.split('-')])
if self.type == CF_TYPE_SELECT:
return self.choices.get(pk=int(serialized_value))
return serialized_value
@python_2_unicode_compatible
class CustomFieldValue(models.Model):
field = models.ForeignKey('CustomField', related_name='values', on_delete=models.CASCADE)
obj_type = models.ForeignKey(ContentType, related_name='+', on_delete=models.PROTECT)
obj_id = models.PositiveIntegerField()
obj = GenericForeignKey('obj_type', 'obj_id')
serialized_value = models.CharField(max_length=255)
class Meta:
ordering = ['obj_type', 'obj_id']
unique_together = ['field', 'obj_type', 'obj_id']
def __str__(self):
return '{} {}'.format(self.obj, self.field)
@property
def value(self):
return self.field.deserialize_value(self.serialized_value)
@value.setter
def value(self, value):
self.serialized_value = self.field.serialize_value(value)
def save(self, *args, **kwargs):
# Delete this object if it no longer has a value to store
if self.pk and self.value is None:
self.delete()
else:
super(CustomFieldValue, self).save(*args, **kwargs)
@python_2_unicode_compatible
class CustomFieldChoice(models.Model):
field = models.ForeignKey('CustomField', related_name='choices', limit_choices_to={'type': CF_TYPE_SELECT},
on_delete=models.CASCADE)
value = models.CharField(max_length=100)
weight = models.PositiveSmallIntegerField(default=100, help_text="Higher weights appear lower in the list")
class Meta:
ordering = ['field', 'weight', 'value']
unique_together = ['field', 'value']
def __str__(self):
return self.value
def clean(self):
if self.field.type != CF_TYPE_SELECT:
raise ValidationError("Custom field choices can only be assigned to selection fields.")
def delete(self, using=None, keep_parents=False):
# When deleting a CustomFieldChoice, delete all CustomFieldValues which point to it
pk = self.pk
super(CustomFieldChoice, self).delete(using, keep_parents)
CustomFieldValue.objects.filter(field__type=CF_TYPE_SELECT, serialized_value=str(pk)).delete()
#
# Graphs
#
@python_2_unicode_compatible
class Graph(models.Model):
type = models.PositiveSmallIntegerField(choices=GRAPH_TYPE_CHOICES)
weight = models.PositiveSmallIntegerField(default=1000)
name = models.CharField(max_length=100, verbose_name='Name')
source = models.CharField(max_length=500, verbose_name='Source URL')
link = models.URLField(verbose_name='Link URL', blank=True)
class Meta:
ordering = ['type', 'weight', 'name']
def __str__(self):
return self.name
def embed_url(self, obj):
template = Template(self.source)
return template.render(Context({'obj': obj}))
def embed_link(self, obj):
if self.link is None:
return ''
template = Template(self.link)
return template.render(Context({'obj': obj}))
#
# Export templates
#
@python_2_unicode_compatible
class ExportTemplate(models.Model):
content_type = models.ForeignKey(
ContentType, limit_choices_to={'model__in': EXPORTTEMPLATE_MODELS}, on_delete=models.CASCADE
)
name = models.CharField(max_length=100)
description = models.CharField(max_length=200, blank=True)
template_code = models.TextField()
mime_type = models.CharField(max_length=15, blank=True)
file_extension = models.CharField(max_length=15, blank=True)
class Meta:
ordering = ['content_type', 'name']
unique_together = [
['content_type', 'name']
]
def __str__(self):
return '{}: {}'.format(self.content_type, self.name)
def to_response(self, context_dict, filename):
"""
Render the template to an HTTP response, delivered as a named file attachment
"""
template = Template(self.template_code)
mime_type = 'text/plain' if not self.mime_type else self.mime_type
output = template.render(Context(context_dict))
# Replace CRLF-style line terminators
output = output.replace('\r\n', '\n')
response = HttpResponse(output, content_type=mime_type)
if self.file_extension:
filename += '.{}'.format(self.file_extension)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
return response
#
# Topology maps
#
@python_2_unicode_compatible
class TopologyMap(models.Model):
name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(unique=True)
site = models.ForeignKey('dcim.Site', related_name='topology_maps', blank=True, null=True, on_delete=models.CASCADE)
device_patterns = models.TextField(
help_text="Identify devices to include in the diagram using regular expressions, one per line. Each line will "
"result in a new tier of the drawing. Separate multiple regexes within a line using semicolons. "
"Devices will be rendered in the order they are defined."
)
description = models.CharField(max_length=100, blank=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@property
def device_sets(self):
if not self.device_patterns:
return None
return [line.strip() for line in self.device_patterns.split('\n')]
def render(self, img_format='png'):
from circuits.models import CircuitTermination
from dcim.models import CONNECTION_STATUS_CONNECTED, Device, InterfaceConnection
# Construct the graph
graph = graphviz.Graph()
graph.graph_attr['ranksep'] = '1'
for i, device_set in enumerate(self.device_sets):
subgraph = graphviz.Graph(name='sg{}'.format(i))
subgraph.graph_attr['rank'] = 'same'
# Add a pseudonode for each device_set to enforce hierarchical layout
subgraph.node('set{}'.format(i), label='', shape='none', width='0')
if i:
graph.edge('set{}'.format(i - 1), 'set{}'.format(i), style='invis')
# Add each device to the graph
devices = []
for query in device_set.split(';'): # Split regexes on semicolons
devices += Device.objects.filter(name__regex=query).select_related('device_role')
for d in devices:
bg_color = '#{}'.format(d.device_role.color)
fg_color = '#{}'.format(foreground_color(d.device_role.color))
subgraph.node(d.name, style='filled', fillcolor=bg_color, fontcolor=fg_color, fontname='sans')
# Add an invisible connection to each successive device in a set to enforce horizontal order
for j in range(0, len(devices) - 1):
subgraph.edge(devices[j].name, devices[j + 1].name, style='invis')
graph.subgraph(subgraph)
# Compile list of all devices
device_superset = Q()
for device_set in self.device_sets:
for query in device_set.split(';'): # Split regexes on semicolons
device_superset = device_superset | Q(name__regex=query)
# Add all interface connections to the graph
devices = Device.objects.filter(*(device_superset,))
connections = InterfaceConnection.objects.filter(
interface_a__device__in=devices, interface_b__device__in=devices
)
for c in connections:
style = 'solid' if c.connection_status == CONNECTION_STATUS_CONNECTED else 'dashed'
graph.edge(c.interface_a.device.name, c.interface_b.device.name, style=style)
# Add all circuits to the graph
for termination in CircuitTermination.objects.filter(term_side='A', interface__device__in=devices):
peer_termination = termination.get_peer_termination()
if (peer_termination is not None and peer_termination.interface is not None and
peer_termination.interface.device in devices):
graph.edge(termination.interface.device.name, peer_termination.interface.device.name, color='blue')
return graph.pipe(format=img_format)
#
# Image attachments
#
def image_upload(instance, filename):
path = 'image-attachments/'
# Rename the file to the provided name, if any. Attempt to preserve the file extension.
extension = filename.rsplit('.')[-1].lower()
if instance.name and extension in ['bmp', 'gif', 'jpeg', 'jpg', 'png']:
filename = '.'.join([instance.name, extension])
elif instance.name:
filename = instance.name
return '{}{}_{}_{}'.format(path, instance.content_type.name, instance.object_id, filename)
@python_2_unicode_compatible
class ImageAttachment(models.Model):
"""
An uploaded image which is associated with an object.
"""
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey('content_type', 'object_id')
image = models.ImageField(upload_to=image_upload, height_field='image_height', width_field='image_width')
image_height = models.PositiveSmallIntegerField()
image_width = models.PositiveSmallIntegerField()
name = models.CharField(max_length=50, blank=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['name']
def __str__(self):
if self.name:
return self.name
filename = self.image.name.rsplit('/', 1)[-1]
return filename.split('_', 2)[2]
def delete(self, *args, **kwargs):
_name = self.image.name
super(ImageAttachment, self).delete(*args, **kwargs)
# Delete file from disk
self.image.delete(save=False)
# Deleting the file erases its name. We restore the image's filename here in case we still need to reference it
# before the request finishes. (For example, to display a message indicating the ImageAttachment was deleted.)
self.image.name = _name
@property
def size(self):
"""
Wrapper around `image.size` to suppress an OSError in case the file is inaccessible.
"""
try:
return self.image.size
except OSError:
return None
#
# User actions
#
class UserActionManager(models.Manager):
# Actions affecting a single object
def log_action(self, user, obj, action, message):
self.model.objects.create(
content_type=ContentType.objects.get_for_model(obj),
object_id=obj.pk,
user=user,
action=action,
message=message,
)
def log_create(self, user, obj, message=''):
self.log_action(user, obj, ACTION_CREATE, message)
def log_edit(self, user, obj, message=''):
self.log_action(user, obj, ACTION_EDIT, message)
def log_delete(self, user, obj, message=''):
self.log_action(user, obj, ACTION_DELETE, message)
# Actions affecting multiple objects
def log_bulk_action(self, user, content_type, action, message):
self.model.objects.create(
content_type=content_type,
user=user,
action=action,
message=message,
)
def log_import(self, user, content_type, message=''):
self.log_bulk_action(user, content_type, ACTION_IMPORT, message)
def log_bulk_create(self, user, content_type, message=''):
self.log_bulk_action(user, content_type, ACTION_BULK_CREATE, message)
def log_bulk_edit(self, user, content_type, message=''):
self.log_bulk_action(user, content_type, ACTION_BULK_EDIT, message)
def log_bulk_delete(self, user, content_type, message=''):
self.log_bulk_action(user, content_type, ACTION_BULK_DELETE, message)
@python_2_unicode_compatible
class UserAction(models.Model):
"""
A record of an action (add, edit, or delete) performed on an object by a User.
"""
time = models.DateTimeField(auto_now_add=True, editable=False)
user = models.ForeignKey(User, related_name='actions', on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(blank=True, null=True)
action = models.PositiveSmallIntegerField(choices=ACTION_CHOICES)
message = models.TextField(blank=True)
objects = UserActionManager()
class Meta:
ordering = ['-time']
def __str__(self):
if self.message:
return '{} {}'.format(self.user, self.message)
return '{} {} {}'.format(self.user, self.get_action_display(), self.content_type)
def icon(self):
if self.action in [ACTION_CREATE, ACTION_BULK_CREATE, ACTION_IMPORT]:
return mark_safe('<i class="glyphicon glyphicon-plus text-success"></i>')
elif self.action in [ACTION_EDIT, ACTION_BULK_EDIT]:
return mark_safe('<i class="glyphicon glyphicon-pencil text-warning"></i>')
elif self.action in [ACTION_DELETE, ACTION_BULK_DELETE]:
return mark_safe('<i class="glyphicon glyphicon-remove text-danger"></i>')
else:
return ''
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.osconfig_v1.types import patch_deployments
from google.cloud.osconfig_v1.types import patch_jobs
from google.protobuf import empty_pb2 # type: ignore
from .base import OsConfigServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import OsConfigServiceGrpcTransport
class OsConfigServiceGrpcAsyncIOTransport(OsConfigServiceTransport):
"""gRPC AsyncIO backend transport for OsConfigService.
OS Config API
The OS Config service is a server-side component that you can
use to manage package installations and patch jobs for virtual
machine instances.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "osconfig.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "osconfig.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def execute_patch_job(
self,
) -> Callable[[patch_jobs.ExecutePatchJobRequest], Awaitable[patch_jobs.PatchJob]]:
r"""Return a callable for the execute patch job method over gRPC.
Patch VM instances by creating and running a patch
job.
Returns:
Callable[[~.ExecutePatchJobRequest],
Awaitable[~.PatchJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "execute_patch_job" not in self._stubs:
self._stubs["execute_patch_job"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/ExecutePatchJob",
request_serializer=patch_jobs.ExecutePatchJobRequest.serialize,
response_deserializer=patch_jobs.PatchJob.deserialize,
)
return self._stubs["execute_patch_job"]
@property
def get_patch_job(
self,
) -> Callable[[patch_jobs.GetPatchJobRequest], Awaitable[patch_jobs.PatchJob]]:
r"""Return a callable for the get patch job method over gRPC.
Get the patch job. This can be used to track the
progress of an ongoing patch job or review the details
of completed jobs.
Returns:
Callable[[~.GetPatchJobRequest],
Awaitable[~.PatchJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_patch_job" not in self._stubs:
self._stubs["get_patch_job"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/GetPatchJob",
request_serializer=patch_jobs.GetPatchJobRequest.serialize,
response_deserializer=patch_jobs.PatchJob.deserialize,
)
return self._stubs["get_patch_job"]
@property
def cancel_patch_job(
self,
) -> Callable[[patch_jobs.CancelPatchJobRequest], Awaitable[patch_jobs.PatchJob]]:
r"""Return a callable for the cancel patch job method over gRPC.
Cancel a patch job. The patch job must be active.
Canceled patch jobs cannot be restarted.
Returns:
Callable[[~.CancelPatchJobRequest],
Awaitable[~.PatchJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_patch_job" not in self._stubs:
self._stubs["cancel_patch_job"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/CancelPatchJob",
request_serializer=patch_jobs.CancelPatchJobRequest.serialize,
response_deserializer=patch_jobs.PatchJob.deserialize,
)
return self._stubs["cancel_patch_job"]
@property
def list_patch_jobs(
self,
) -> Callable[
[patch_jobs.ListPatchJobsRequest], Awaitable[patch_jobs.ListPatchJobsResponse]
]:
r"""Return a callable for the list patch jobs method over gRPC.
Get a list of patch jobs.
Returns:
Callable[[~.ListPatchJobsRequest],
Awaitable[~.ListPatchJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_patch_jobs" not in self._stubs:
self._stubs["list_patch_jobs"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/ListPatchJobs",
request_serializer=patch_jobs.ListPatchJobsRequest.serialize,
response_deserializer=patch_jobs.ListPatchJobsResponse.deserialize,
)
return self._stubs["list_patch_jobs"]
@property
def list_patch_job_instance_details(
self,
) -> Callable[
[patch_jobs.ListPatchJobInstanceDetailsRequest],
Awaitable[patch_jobs.ListPatchJobInstanceDetailsResponse],
]:
r"""Return a callable for the list patch job instance
details method over gRPC.
Get a list of instance details for a given patch job.
Returns:
Callable[[~.ListPatchJobInstanceDetailsRequest],
Awaitable[~.ListPatchJobInstanceDetailsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_patch_job_instance_details" not in self._stubs:
self._stubs[
"list_patch_job_instance_details"
] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/ListPatchJobInstanceDetails",
request_serializer=patch_jobs.ListPatchJobInstanceDetailsRequest.serialize,
response_deserializer=patch_jobs.ListPatchJobInstanceDetailsResponse.deserialize,
)
return self._stubs["list_patch_job_instance_details"]
@property
def create_patch_deployment(
self,
) -> Callable[
[patch_deployments.CreatePatchDeploymentRequest],
Awaitable[patch_deployments.PatchDeployment],
]:
r"""Return a callable for the create patch deployment method over gRPC.
Create an OS Config patch deployment.
Returns:
Callable[[~.CreatePatchDeploymentRequest],
Awaitable[~.PatchDeployment]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_patch_deployment" not in self._stubs:
self._stubs["create_patch_deployment"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/CreatePatchDeployment",
request_serializer=patch_deployments.CreatePatchDeploymentRequest.serialize,
response_deserializer=patch_deployments.PatchDeployment.deserialize,
)
return self._stubs["create_patch_deployment"]
@property
def get_patch_deployment(
self,
) -> Callable[
[patch_deployments.GetPatchDeploymentRequest],
Awaitable[patch_deployments.PatchDeployment],
]:
r"""Return a callable for the get patch deployment method over gRPC.
Get an OS Config patch deployment.
Returns:
Callable[[~.GetPatchDeploymentRequest],
Awaitable[~.PatchDeployment]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_patch_deployment" not in self._stubs:
self._stubs["get_patch_deployment"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/GetPatchDeployment",
request_serializer=patch_deployments.GetPatchDeploymentRequest.serialize,
response_deserializer=patch_deployments.PatchDeployment.deserialize,
)
return self._stubs["get_patch_deployment"]
@property
def list_patch_deployments(
self,
) -> Callable[
[patch_deployments.ListPatchDeploymentsRequest],
Awaitable[patch_deployments.ListPatchDeploymentsResponse],
]:
r"""Return a callable for the list patch deployments method over gRPC.
Get a page of OS Config patch deployments.
Returns:
Callable[[~.ListPatchDeploymentsRequest],
Awaitable[~.ListPatchDeploymentsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_patch_deployments" not in self._stubs:
self._stubs["list_patch_deployments"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/ListPatchDeployments",
request_serializer=patch_deployments.ListPatchDeploymentsRequest.serialize,
response_deserializer=patch_deployments.ListPatchDeploymentsResponse.deserialize,
)
return self._stubs["list_patch_deployments"]
@property
def delete_patch_deployment(
self,
) -> Callable[
[patch_deployments.DeletePatchDeploymentRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the delete patch deployment method over gRPC.
Delete an OS Config patch deployment.
Returns:
Callable[[~.DeletePatchDeploymentRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_patch_deployment" not in self._stubs:
self._stubs["delete_patch_deployment"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/DeletePatchDeployment",
request_serializer=patch_deployments.DeletePatchDeploymentRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_patch_deployment"]
@property
def update_patch_deployment(
self,
) -> Callable[
[patch_deployments.UpdatePatchDeploymentRequest],
Awaitable[patch_deployments.PatchDeployment],
]:
r"""Return a callable for the update patch deployment method over gRPC.
Update an OS Config patch deployment.
Returns:
Callable[[~.UpdatePatchDeploymentRequest],
Awaitable[~.PatchDeployment]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_patch_deployment" not in self._stubs:
self._stubs["update_patch_deployment"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/UpdatePatchDeployment",
request_serializer=patch_deployments.UpdatePatchDeploymentRequest.serialize,
response_deserializer=patch_deployments.PatchDeployment.deserialize,
)
return self._stubs["update_patch_deployment"]
@property
def pause_patch_deployment(
self,
) -> Callable[
[patch_deployments.PausePatchDeploymentRequest],
Awaitable[patch_deployments.PatchDeployment],
]:
r"""Return a callable for the pause patch deployment method over gRPC.
Change state of patch deployment to "PAUSED".
Patch deployment in paused state doesn't generate patch
jobs.
Returns:
Callable[[~.PausePatchDeploymentRequest],
Awaitable[~.PatchDeployment]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "pause_patch_deployment" not in self._stubs:
self._stubs["pause_patch_deployment"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/PausePatchDeployment",
request_serializer=patch_deployments.PausePatchDeploymentRequest.serialize,
response_deserializer=patch_deployments.PatchDeployment.deserialize,
)
return self._stubs["pause_patch_deployment"]
@property
def resume_patch_deployment(
self,
) -> Callable[
[patch_deployments.ResumePatchDeploymentRequest],
Awaitable[patch_deployments.PatchDeployment],
]:
r"""Return a callable for the resume patch deployment method over gRPC.
Change state of patch deployment back to "ACTIVE".
Patch deployment in active state continues to generate
patch jobs.
Returns:
Callable[[~.ResumePatchDeploymentRequest],
Awaitable[~.PatchDeployment]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resume_patch_deployment" not in self._stubs:
self._stubs["resume_patch_deployment"] = self.grpc_channel.unary_unary(
"/google.cloud.osconfig.v1.OsConfigService/ResumePatchDeployment",
request_serializer=patch_deployments.ResumePatchDeploymentRequest.serialize,
response_deserializer=patch_deployments.PatchDeployment.deserialize,
)
return self._stubs["resume_patch_deployment"]
def close(self):
return self.grpc_channel.close()
__all__ = ("OsConfigServiceGrpcAsyncIOTransport",)
|
|
#
# Copyright 2014 Rackspace, Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import jinja2
from oslo.config import cfg
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import utils
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
PXE_CFG_DIR_NAME = 'pxelinux.cfg'
def get_root_dir():
"""Returns the directory where the config files and images will live."""
if CONF.pxe.ipxe_enabled:
return CONF.pxe.http_root
else:
return CONF.pxe.tftp_root
def _ensure_config_dirs_exist(node_uuid):
"""Ensure that the node's and PXE configuration directories exist.
:param node_uuid: the UUID of the node.
"""
root_dir = get_root_dir()
fileutils.ensure_tree(os.path.join(root_dir, node_uuid))
fileutils.ensure_tree(os.path.join(root_dir, PXE_CFG_DIR_NAME))
def _build_pxe_config(pxe_options, template):
"""Build the PXE boot configuration file.
This method builds the PXE boot configuration file by rendering the
template with the given parameters.
:param pxe_options: A dict of values to set on the configuration file.
:param template: The PXE configuration template.
:returns: A formatted string with the file content.
"""
tmpl_path, tmpl_file = os.path.split(template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'pxe_options': pxe_options,
'ROOT': '{{ ROOT }}'})
def _link_mac_pxe_configs(task):
"""Link each MAC address with the PXE configuration file.
:param task: A TaskManager instance.
"""
pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
for mac in driver_utils.get_node_mac_addresses(task):
mac_path = _get_pxe_mac_path(mac)
utils.unlink_without_raise(mac_path)
utils.create_link_without_raise(pxe_config_file_path, mac_path)
def _link_ip_address_pxe_configs(task):
"""Link each IP address with the PXE configuration file.
:param task: A TaskManager instance.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
api = dhcp_factory.DHCPFactory().provider
ip_addrs = api.get_ip_addresses(task)
if not ip_addrs:
raise exception.FailedToGetIPAddressOnPort(_(
"Failed to get IP address for any port on node %s.") %
task.node.uuid)
for port_ip_address in ip_addrs:
ip_address_path = _get_pxe_ip_address_path(port_ip_address)
utils.unlink_without_raise(ip_address_path)
utils.create_link_without_raise(pxe_config_file_path,
ip_address_path)
def _get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name.
:param mac: A MAC address string in the format xx:xx:xx:xx:xx:xx.
:returns: the path to the config file.
"""
if CONF.pxe.ipxe_enabled:
mac_file_name = mac.replace(':', '').lower()
else:
mac_file_name = "01-" + mac.replace(":", "-").lower()
return os.path.join(get_root_dir(), PXE_CFG_DIR_NAME, mac_file_name)
def _get_pxe_ip_address_path(ip_address):
"""Convert an ipv4 address into a PXE config file name.
:param ip_address: A valid IPv4 address string in the format 'n.n.n.n'.
:returns: the path to the config file.
"""
ip = ip_address.split('.')
hex_ip = '{0:02X}{1:02X}{2:02X}{3:02X}'.format(*map(int, ip))
return os.path.join(
CONF.pxe.tftp_root, hex_ip + ".conf"
)
def get_deploy_kr_info(node_uuid, driver_info):
"""Get href and tftp path for deploy kernel and ramdisk.
Note: driver_info should be validated outside of this method.
"""
root_dir = get_root_dir()
image_info = {}
for label in ('deploy_kernel', 'deploy_ramdisk'):
# the values for these keys will look like "glance://image-uuid"
image_info[label] = (
str(driver_info[label]),
os.path.join(root_dir, node_uuid, label)
)
return image_info
def get_pxe_config_file_path(node_uuid):
"""Generate the path for the node's PXE configuration file.
:param node_uuid: the UUID of the node.
:returns: The path to the node's PXE configuration file.
"""
return os.path.join(get_root_dir(), node_uuid, 'config')
def create_pxe_config(task, pxe_options, template=None):
"""Generate PXE configuration file and MAC address links for it.
This method will generate the PXE configuration file for the task's
node under a directory named with the UUID of that node. For each
MAC address (port) of that node, a symlink for the configuration file
will be created under the PXE configuration directory, so regardless
of which port boots first they'll get the same PXE configuration.
:param task: A TaskManager instance.
:param pxe_options: A dictionary with the PXE configuration
parameters.
:param template: The PXE configuration template. If no template is
given the CONF.pxe.pxe_config_template will be used.
"""
LOG.debug("Building PXE config for node %s", task.node.uuid)
if template is None:
template = CONF.pxe.pxe_config_template
_ensure_config_dirs_exist(task.node.uuid)
pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
pxe_config = _build_pxe_config(pxe_options, template)
utils.write_to_file(pxe_config_file_path, pxe_config)
if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
_link_ip_address_pxe_configs(task)
else:
_link_mac_pxe_configs(task)
def clean_up_pxe_config(task):
"""Clean up the TFTP environment for the task's node.
:param task: A TaskManager instance.
"""
LOG.debug("Cleaning up PXE config for node %s", task.node.uuid)
if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
api = dhcp_factory.DHCPFactory().provider
ip_addresses = api.get_ip_addresses(task)
if not ip_addresses:
return
for port_ip_address in ip_addresses:
try:
ip_address_path = _get_pxe_ip_address_path(port_ip_address)
except exception.InvalidIPv4Address:
continue
utils.unlink_without_raise(ip_address_path)
else:
for mac in driver_utils.get_node_mac_addresses(task):
utils.unlink_without_raise(_get_pxe_mac_path(mac))
utils.rmtree_without_raise(os.path.join(get_root_dir(),
task.node.uuid))
def dhcp_options_for_instance(task):
"""Retrieves the DHCP PXE boot options.
:param task: A TaskManager instance.
"""
dhcp_opts = []
if CONF.pxe.ipxe_enabled:
script_name = os.path.basename(CONF.pxe.ipxe_boot_script)
ipxe_script_url = '/'.join([CONF.pxe.http_url, script_name])
dhcp_provider_name = dhcp_factory.CONF.dhcp.dhcp_provider
# if the request comes from dumb firmware send them the iPXE
# boot image.
if dhcp_provider_name == 'neutron':
# Neutron use dnsmasq as default DHCP agent, add extra config
# to neutron "dhcp-match=set:ipxe,175" and use below option
dhcp_opts.append({'opt_name': 'tag:!ipxe,bootfile-name',
'opt_value': CONF.pxe.pxe_bootfile_name})
else:
# !175 == non-iPXE.
# http://ipxe.org/howto/dhcpd#ipxe-specific_options
dhcp_opts.append({'opt_name': '!175,bootfile-name',
'opt_value': CONF.pxe.pxe_bootfile_name})
# If the request comes from iPXE, direct it to boot from the
# iPXE script
dhcp_opts.append({'opt_name': 'bootfile-name',
'opt_value': ipxe_script_url})
else:
if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
boot_file = CONF.pxe.uefi_pxe_bootfile_name
else:
boot_file = CONF.pxe.pxe_bootfile_name
dhcp_opts.append({'opt_name': 'bootfile-name',
'opt_value': boot_file})
dhcp_opts.append({'opt_name': 'server-ip-address',
'opt_value': CONF.pxe.tftp_server})
dhcp_opts.append({'opt_name': 'tftp-server',
'opt_value': CONF.pxe.tftp_server})
return dhcp_opts
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from parler.views import ViewUrlMixin
from rest_framework import status
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.response import Response
from rest_framework.settings import api_settings
from shop.views.catalog import AddToCartView as AddToCartViewBase
from shop.views.catalog import ProductListView as BaseProductListView
from shop.views.catalog import ProductRetrieveView
from shopit.conf import app_settings
from shopit.models.cart import Cart, CartItem
from shopit.models.product import Attribute, Product
from shopit.rest.renderers import ModifiedCMSPageRenderer
from shopit.serializers import (AddToCartSerializer, CartItemSerializer, ProductDetailSerializer,
ProductSummarySerializer, ReviewSerializer, WatchItemSerializer)
CATEGORIES_VAR = 'c'
BRANDS_VAR = 'b'
MANUFACTURERS_VAR = 'm'
FLAGS_VAR = 'f'
MODIFIERS_VAR = 'd'
PRICE_FROM_VAR = 'pf'
PRICE_TO_VAR = 'pt'
SORT_VAR = 's'
class ProductListView(BaseProductListView):
serializer_class = ProductSummarySerializer
renderer_classes = [ModifiedCMSPageRenderer] + api_settings.DEFAULT_RENDERER_CLASSES
def get(self, request, *args, **kwargs):
"""
If products are loaded asynchronously, controlled by
`ASYNC_PRODUCT_LIST` setting, render template without any data.
"""
if app_settings.ASYNC_PRODUCT_LIST and request.accepted_renderer.format == 'html':
return Response({})
return super(ProductListView, self).get(request, *args, **kwargs)
def list(self, request, *args, **kwargs):
"""
Return all products count when `count` exists in GET, applicable
only when format is not html.
"""
if request.GET.get('get_count', None) and request.accepted_renderer.format != 'html':
count = self.filter_queryset(self.get_queryset()).count()
return Response({'count': count})
return super(ProductListView, self).list(request, *args, **kwargs)
def get_queryset(self):
return Product.objects.translated().active().top_level()
def filter_queryset(self, queryset):
queryset = super(ProductListView, self).filter_queryset(queryset)
categories = list(filter(None, self.request.GET.get(CATEGORIES_VAR, '').split(','))) or None
brands = list(filter(None, self.request.GET.get(BRANDS_VAR, '').split(','))) or None
manufacturers = list(filter(None, self.request.GET.get(MANUFACTURERS_VAR, '').split(','))) or None
queryset = queryset.filter_categorization(categories, brands, manufacturers)
flags = list(filter(None, self.request.GET.get(FLAGS_VAR, '').split(','))) or None
queryset = queryset.filter_flags(flags)
modifiers = list(filter(None, self.request.GET.get(MODIFIERS_VAR, '').split(','))) or None
queryset = queryset.filter_modifiers(modifiers)
attrs = Attribute.objects.active()
attr_codes = attrs.values_list('code', flat=True)
attr_filters = [(x[0], x[1]) for x in self.request.GET.items() if x[0] in attr_codes]
# Remove null values from attributes that are not nullable.
for f in [x for x in attr_filters if not x[1]]:
if not attrs.get(code=f[0]).nullable:
attr_filters.remove(f)
queryset = queryset.filter_attributes(attr_filters)
price_from = self.request.GET.get(PRICE_FROM_VAR, None)
price_to = self.request.GET.get(PRICE_TO_VAR, None)
queryset = queryset.filter_price(price_from, price_to)
sort = self.request.GET.get(SORT_VAR, None)
if not sort and app_settings.DEFAULT_PRODUCT_ORDER:
sort = app_settings.DEFAULT_PRODUCT_ORDER
sort_map = {
'name': 'translations__name',
'-name': '-translations__name',
'price': '_unit_price',
'-price': '-_unit_price',
}
if sort in sort_map:
queryset = queryset.order_by(sort_map[sort])
return queryset
def get_template_names(self):
return ['shopit/catalog/product_list.html']
def get_renderer_context(self):
"""
Add `product_list` renderer context if format is 'html'. Check against
`ADD_PRODUCT_LIST_TO_CONTEXT` setting if allowed.
"""
context = super(ProductListView, self).get_renderer_context()
if app_settings.ADD_PRODUCT_LIST_TO_CONTEXT and context['request'].accepted_renderer.format == 'html':
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
context.update(self.paginator.get_html_context())
context['product_list'] = page or queryset
return context
class ProductDetailView(ViewUrlMixin, ProductRetrieveView):
serializer_class = ProductDetailSerializer
renderer_classes = [ModifiedCMSPageRenderer] + api_settings.DEFAULT_RENDERER_CLASSES
def get(self, request, *args, **kwargs):
response = super(ProductDetailView, self).get(request, *args, **kwargs)
if request.accepted_renderer.format == 'html':
product_id = self.get_object().pk
menu = request.toolbar.get_or_create_menu('shopit-menu', _('Shopit'))
menu.add_break()
menu.add_modal_item(_('Edit Product'), url=reverse('admin:shopit_product_change', args=[product_id]))
menu.add_sideframe_item(_('Delete Product'), url=reverse('admin:shopit_product_delete', args=[product_id]))
return response
def get_object(self):
if not hasattr(self, '_product'):
self._product = get_object_or_404(Product.objects.translated(slug=self.kwargs['slug']))
return self._product
def get_template_names(self):
return ['shopit/catalog/product_detail.html']
def get_view_url(self):
"""
Return object view url. Used in `get_translated_url` templatetag from parler.
"""
return self.get_object().get_absolute_url()
class ProductReviewMixin(object):
"""
Mixin used in product reviews.
"""
def get_queryset(self, include_inactive=False):
return self.get_product().get_reviews(language=self.request.LANGUAGE_CODE, include_inactive=include_inactive)
def get_product(self):
if not hasattr(self, '_product'):
self._product = get_object_or_404(Product.objects.translated(slug=self.kwargs.get('slug')))
return self._product
class ProductReviewListView(ProductReviewMixin, ListCreateAPIView):
"""
View that handles listing and creatign reviews on a product.
"""
serializer_class = ReviewSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
@method_decorator(never_cache)
def get(self, request, *args, **kwargs):
return super(ProductReviewListView, self).get(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""
Check that customer is registered, and that the review is not already
written for this product by the registered customer.
"""
if not request.customer.is_authenticated():
errors = {'not-registered': [_('Please register to submit a review.')]}
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
if self.get_queryset(include_inactive=True).filter(customer=request.customer).exists():
errors = {'exists': [_('Review already written for this Product.')]}
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
data = dict(list(request.data.items()), customer=request.customer, language=request.LANGUAGE_CODE, active=app_settings.REVIEW_ACTIVE_DEFAULT) # noqa
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save(product=self.get_product())
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class ProductReviewDetailView(ProductReviewMixin, RetrieveUpdateDestroyAPIView):
"""
View that handles getting, updating and deleting the review.
"""
serializer_class = ReviewSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
@method_decorator(never_cache)
def get(self, request, *args, **kwargs):
return super(ProductReviewDetailView, self).get(request, *args, **kwargs)
def get_object(self):
if not hasattr(self, '_object'):
self._object = get_object_or_404(self.get_queryset(include_inactive=True).filter(id=self.kwargs.get('pk')))
return self._object
def update(self, request, *args, **kwargs):
"""
Only allow update for the review owner.
"""
if self.get_object().customer != self.request.customer:
errors = {'not-allowed': [_('You can only update your own reviews.')]}
return Response(errors, status=status.HTTP_403_FORBIDDEN)
data = dict(list(request.data.items()), customer=request.customer, language=request.LANGUAGE_CODE)
serializer = self.get_serializer(self.get_object(), data=data, partial=kwargs.pop('partial', False))
serializer.is_valid(raise_exception=True)
serializer.save(product=self.get_product())
return Response(serializer.data)
def delete(self, request, *args, **kwargs):
"""
Only allow delete for the review owner.
"""
if self.get_object().customer != self.request.customer:
errors = {'not-allowed': [_('You can only delete your own reviews.')]}
return Response(errors, status=status.HTTP_403_FORBIDDEN)
return super(ProductReviewDetailView, self).delete(self.request, *args, **kwargs)
class AddToCartView(AddToCartViewBase):
"""
View that handles adding product to the cart.
"""
serializer_class = AddToCartSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
def get_context(self, request, **kwargs):
product = get_object_or_404(Product.objects.translated(slug=self.kwargs['slug']))
return {'product': product, 'request': request}
def post(self, request, *args, **kwargs):
"""
Override to add the product to cart.
"""
errors = {}
cart = Cart.objects.get_or_create_from_request(request)
context = self.get_context(request, **kwargs)
product = context.pop('product')
quantity = int(request.data.get('quantity', 1))
if product.is_group:
errors['variant'] = [_("You can't add a group product to the cart.")]
else:
total_quantity = getattr(product.is_in_cart(cart), 'quantity', 0) + quantity
available, diff = product.is_available(total_quantity)
if available:
item, created = CartItem.objects.get_or_create(
cart=cart, product=product, quantity=quantity, product_code=product.product_code)
serializer_class = WatchItemSerializer if total_quantity == 0 else CartItemSerializer
serializer = serializer_class(item, context=context)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
errors['quantity'] = [_('Product not available for given quantity, there is %d left.') % (quantity + diff)]
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
|
|
import os
import re
import subprocess
import random
import string
from tempfile import mkstemp
class PdftkError(Exception):
pass
class DuplicateFormFieldError(Exception):
pass
class MissingFormFieldError(Exception):
pass
class InvalidOptionError(Exception):
pass
class TooManyPDFsError(Exception):
pass
class InvalidAnswersError(Exception):
pass
class UnsupportedFieldTypeError(Exception):
pass
class PDFTKWrapper:
supported_field_types = ['text', 'button', 'choice']
def __init__(self, encoding='latin-1', tmp_path=None, clean_up=True):
self.encoding = encoding
self.TEMP_FOLDER_PATH = tmp_path
self._tmp_files = []
self._cache_fdf_for_filling = False
self._fdf_cache = None
self.clean_up = clean_up
self.PDFTK_PATH = os.environ.get('PDFTK_PATH', 'pdftk')
def _coerce_to_file_path(self, path_or_file_or_bytes):
"""This converst file-like objects and `bytes` into
existing files and returns a filepath
if strings are passed in, it is assumed that they are existing
files
"""
if not isinstance(path_or_file_or_bytes, str):
if isinstance(path_or_file_or_bytes, bytes):
return self._write_tmp_file(
bytestring=path_or_file_or_bytes)
else:
return self._write_tmp_file(
file_obj=path_or_file_or_bytes)
return path_or_file_or_bytes
def _write_tmp_file(self, file_obj=None, bytestring=None):
"""Take a file-like object or a bytestring,
create a temporary file and return a file path.
file-like objects will be read and written to the tempfile
bytes objects will be written directly to the tempfile
"""
tmp_path = self.TEMP_FOLDER_PATH
os_int, tmp_fp = mkstemp(dir=tmp_path)
with open(tmp_fp, 'wb') as tmp_file:
if file_obj:
tmp_file.write(file_obj.read())
elif bytestring:
tmp_file.write(bytestring)
self._tmp_files.append(tmp_fp)
return tmp_fp
def clean_up_tmp_files(self):
if not self._tmp_files:
return
for i in range(len(self._tmp_files)):
path = self._tmp_files.pop()
os.remove(path)
def _get_file_contents(self, path, decode=False, encoding=None):
"""given a file path, return the contents of the file
if decode is True, the contents will be decoded using the default
encoding
"""
bytestring = open(path, 'rb').read()
if decode:
return bytestring.decode(encoding or self.encoding)
return bytestring
def run_command(self, args):
"""Run a command to pdftk on the command line.
`args` is a list of command line arguments.
This method is reponsible for handling errors that arise from
pdftk's CLI
"""
if args[0] != self.PDFTK_PATH:
args.insert(0, self.PDFTK_PATH)
process = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if err:
raise PdftkError(err.decode('utf-8'))
return out.decode('utf-8')
def parse_fdf_fields(self, fdf_str):
'''Yields a series of tuples, using the escaped name of the field
followed by a dict with useful meta information about the match
https://regex101.com/r/iL6hW3/5
'''
field_pattern = re.compile(
r'\/V\ (?P<value>.*)\n\/T\ \((?P<name>.*)\)')
for match in re.finditer(field_pattern, fdf_str):
# it's necessary to deal with escape slashes in the field name
# because it may otherwise fail to match the field name extracted
# from the data field dump
datum = {
'name': match.group('name'),
'escaped_name': match.group('name').replace('\\', ''),
'name_span': match.span('name'),
'value_template': match.group('value'),
'value_template_span': match.span('value')
}
yield (datum['escaped_name'], datum)
def parse_data_fields(self, data_str):
'''Pulls out field data from the resulting string of
`pdftk dump_data_fields_utf8`
'''
field_opts_key = 'FieldStateOption'
field_name_key = 'FieldName'
for field_text in data_str.split('---'):
datum = {}
for line in field_text.split('\n'):
if line.strip():
propName, value = line.split(':')
if propName == field_opts_key:
if field_opts_key not in datum:
datum[field_opts_key] = [value.strip()]
else:
datum[field_opts_key].append(value.strip())
else:
datum[propName] = value.strip()
if datum:
yield (datum[field_name_key], datum)
def get_fdf(self, pdf_file_path):
"""Given a path to a pdf form, this returns the decoded
text of an output fdf file
"""
pdf_file_path = self._coerce_to_file_path(pdf_file_path)
tmp_outfile = self._write_tmp_file()
self.run_command([pdf_file_path, 'generate_fdf',
'output', tmp_outfile])
contents = self._get_file_contents(
tmp_outfile, decode=True)
if self._cache_fdf_for_filling:
self._fdf_cache = contents
return contents
def get_data_fields(self, pdf_file_path):
pdf_file_path = self._coerce_to_file_path(pdf_file_path)
tmp_outfile = self._write_tmp_file()
self.run_command([pdf_file_path, 'dump_data_fields_utf8',
'output', tmp_outfile])
contents = self._get_file_contents(
tmp_outfile, decode=True, encoding='utf-8')
return contents
def _get_full_form_field_data(self, pdf_file_path):
# fdf_data & field_data are generators
fdf_data = self.parse_fdf_fields(
self.get_fdf(pdf_file_path))
field_data = self.parse_data_fields(
self.get_data_fields(pdf_file_path))
fields = {}
for name, datum in field_data:
if name in fields:
raise DuplicateFormFieldError(
"Duplicate field data: '{}'".format(name))
fields[name] = datum
for name, datum in fdf_data:
if name not in fields:
raise MissingFormFieldError(
"No matching data for field: '{}'".format(name))
elif 'fdf' in fields[name]:
raise DuplicateFormFieldError(
"Duplicate fdf field: '{}'".format(name))
fields[name]['fdf'] = datum
return fields
def get_field_data(self, pdf_file_path):
full_data = self._get_full_form_field_data(
pdf_file_path)
data = []
for key in full_data:
full_datum = full_data[key]
datum = {
'name': key,
'type': full_datum['FieldType'].lower(),
}
if 'FieldValue' in full_datum:
datum['value'] = full_datum['FieldValue']
if 'FieldStateOption' in full_datum:
datum['options'] = full_datum['FieldStateOption']
if 'value' in datum:
if datum['value'] not in datum['options']:
datum['options'].append(datum['value'])
if datum['type'] not in self.supported_field_types:
raise UnsupportedFieldTypeError(
"Unsupported field type: '{}'".format(datum['type']))
data.append(datum)
return sorted(data, key=lambda d: d['name'])
def _build_answer_insertion(self, value, field):
value = str(value)
field_type = field['FieldType'].lower()
options = field.get('FieldStateOption', [])
if field_type == 'button':
span = field['fdf']['value_template_span']
start = span[0] + 1
end = span[1]
if value not in options:
raise InvalidOptionError(
"'{}' is not in options for '{}': {}".format(
value,
field['FieldName'], str(options)))
return (start, end, value)
else: # 'choice' and 'text' types
span = field['fdf']['value_template_span']
start = span[0] + 1
end = span[1] - 1
# we could check options here, but that would exclude
# custom other values
return (start, end, value)
def _generate_answer_insertions(self, pdf_path, answers):
fields = self._get_full_form_field_data(pdf_path)
insertions = []
for key in answers:
if key in fields:
insertion = self._build_answer_insertion(
answers[key], fields[key])
insertions.append(insertion)
if not insertions:
raise InvalidAnswersError("""No valid answers were found.
Answer Keys: {}
Available Fields: {}
""".format(
str(list(answers.keys())),
str(list(fields.keys()))
))
insertions.sort(key=lambda i: i[0])
return insertions
def _patch_fdf_with_insertions(self, insertions, fdf_str=None):
if not fdf_str:
fdf_str = self._fdf_cache
fdf = []
position = 0
for start, end, value in insertions:
fdf.append(fdf_str[position:start])
fdf.append(value)
position = end
fdf.append(fdf_str[position:])
return ''.join(fdf)
def _load_patched_fdf_into_pdf(self, pdf_file_path, fdf_str):
filled_fdf_path = self._write_tmp_file(
bytestring=fdf_str.encode(self.encoding))
tmp_pdf_path = self._write_tmp_file()
self.run_command([
pdf_file_path,
'fill_form', filled_fdf_path,
'output', tmp_pdf_path
])
return tmp_pdf_path
def join_pdfs(self, pdf_paths):
"""
pdftk A=in1.pdf B=in2.pdf cat A1 B2-20even output out.pdf
"""
if len(pdf_paths) > 99999:
raise TooManyPDFsError(
"I'm worred about combining more than 99,999 pdfs")
pdf_paths = [self._coerce_to_file_path(p) for p in pdf_paths]
combined_pdf_path = self._write_tmp_file()
handle_length = 4
pdftk_args = []
handles = []
for i, path in enumerate(pdf_paths):
idxs = [int(n) for n in "{num:05d}".format(num=i)]
handle = ''.join(
string.ascii_uppercase[idx]
for idx in idxs
)
handles.append(handle)
pdftk_args.append(
"{}={}".format(handle, path)
)
pdftk_args.append('cat')
pdftk_args.extend(handles)
pdftk_args.extend([
'output', combined_pdf_path
])
self.run_command(pdftk_args)
result = open(combined_pdf_path, 'rb').read()
if self.clean_up:
self.clean_up_tmp_files()
return result
def fill_pdf_many(self, pdf_path, multiple_answers):
pdfs = []
_clean_up_setting = self.clean_up
# don't clean up while filling multiple pdfs
self.clean_up = False
pdf_path = self._coerce_to_file_path(pdf_path)
for answer in multiple_answers:
filled_pdf = self.fill_pdf(pdf_path, answer)
pdf_path = self._write_tmp_file(bytestring=filled_pdf)
pdfs.append(pdf_path)
# restore the clean up setting
self.clean_up = _clean_up_setting
return self.join_pdfs(pdfs)
def fill_pdf(self, pdf_path, answers):
self._cache_fdf_for_filling = True
pdf_path = self._coerce_to_file_path(pdf_path)
insertions = self._generate_answer_insertions(pdf_path, answers)
patched_fdf_str = self._patch_fdf_with_insertions(insertions)
output_path = self._load_patched_fdf_into_pdf(
pdf_path, patched_fdf_str)
result = open(output_path, 'rb').read()
if self.clean_up:
self.clean_up_tmp_files()
return result
|
|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate schedule."""
import math
import jax
from jax import numpy as jnp
from lingvo.jax import py_utils
from lingvo.jax import pytypes
import optax
JTensor = pytypes.JTensor
InstantiableParams = py_utils.InstantiableParams
class BaseSchedule:
"""Base class for all schedules."""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = InstantiableParams(cls)
return p
def __init__(self, params: InstantiableParams) -> None:
self._params = params.Copy()
@property
def params(self) -> InstantiableParams:
return self._params
def value(self, count: JTensor) -> JTensor: # pylint:disable=invalid-name
"""Returns the value of schedule at step 'count'.
Args:
count: a scalar uint32 array.
Returns:
A float32 value of the schedule at step 'count' as a scalar array.
"""
raise NotImplementedError()
class Constant(BaseSchedule):
"""A schedule whose value is a constant."""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define('value', 1., 'The constant value.')
return p
def value(self, count: JTensor) -> JTensor:
del count
return jnp.array(self.params.value, dtype=jnp.float32)
class Polynomial(BaseSchedule):
"""Polynomial learning rate schedule.
If x < x0, returns y0. If x >= x1, returns y1. Otherwise, interpolate with
a polynomial between (x0, y0) and (x1, y1).
"""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define('power', 1, 'Polynomial power.')
p.Define('start', (0, 1.), '(x0, y0)')
p.Define('limit', (1, 1.), '(x1, y1)')
p.Define('origin', 'start',
'Origin of the polynomial. Can be "start" or "limit".')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
if len(p.start) != 2:
raise ValueError(f'{p.start} must be of length 2.')
if len(p.limit) != 2:
raise ValueError(f'{p.limit} must be of length 2.')
x0, _ = p.start
x1, _ = p.limit
if x0 >= x1:
raise ValueError(f'{x0} must be < {x1}')
if p.origin not in {'start', 'limit'}:
raise ValueError('Invalid parameter origin: %s' % p.origin)
def value(self, count: JTensor) -> JTensor:
p = self.params
x = jnp.array(count).astype(jnp.float32)
x0, y0 = p.start
x1, y1 = p.limit
ratio = (x - x0) / (x1 - x0)
if p.origin == 'start':
f_x = ratio**p.power
elif p.origin == 'limit':
f_x = 1 - (1 - ratio)**p.power
y = y0 + f_x * (y1 - y0)
return jnp.where(x < x0, y0, jnp.where(x >= x1, y1, y))
class Linear(Polynomial):
"""Linear learning rate schedule.
If x < x0, returns y0. If x >= x1, returns y1. Otherwise, interpolate
linearly between (x0, y0) and (x1, y1).
"""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
return super().Params().Set(power=1)
class Exponential(BaseSchedule):
"""Exponential learning rate schedule."""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define('start', (0, 1.), '(x0, y0)')
p.Define('limit', (1, 0.5), '(x1, y1)')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
x0, y0 = p.start
x1, y1 = p.limit
assert x0 < x1, '%s must be < %s' % (x0, x1)
assert y0 > 0, '%s must be > 0' % y0
assert y1 > 0, '%s must be > 0' % y1
self.linear = Linear.Params().Set(
start=(x0, math.log(y0)), limit=(x1, math.log(y1))).Instantiate()
def value(self, count: JTensor) -> JTensor:
return jnp.exp(self.linear.value(count))
class Cosine(BaseSchedule):
"""Cosine learning rate schedule."""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define('initial_value', 1.0, 'Initial decay value.')
p.Define('final_value', 0., 'Final decay value.')
p.Define('total_steps', 0, 'Number of steps to reach full decay.')
return p
def value(self, count: JTensor) -> JTensor:
p = self.params
decay_gap = p.initial_value - p.final_value
return p.final_value + 0.5 * decay_gap * (1 + jnp.cos(math.pi * jnp.minimum(
1.0,
jnp.array(count, dtype=jnp.float32) / p.total_steps)))
class PiecewiseConstant(BaseSchedule):
"""A schedule with piecewise constants rate decay."""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define('boundaries', None, 'Boundaries at which learning rate drops.')
p.Define(
'values', None,
'Values in each interval. The number of values must be equal to the '
'the number of boundaries plus 1.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
if p.boundaries is None or p.values is None:
raise ValueError(
'The parameters `boundaries` and `values` must not be None.')
if len(p.values) != len(p.boundaries) + 1:
raise ValueError(
f'The number of values ({len(p.values)}) is expected to be equal '
f'to the number of boundaries plus 1 ({len(p.boundaries) + 1}).')
if sorted(p.boundaries) != list(p.boundaries):
raise ValueError(f'The boundaries ({p.boundaries}) must be sorted.')
def value(self, count: JTensor) -> JTensor:
p = self.params
# Map the step/boundaries to jnp.float32.
boundaries = [jnp.array(v, dtype=jnp.float32) for v in p.boundaries]
values = [jnp.array(v, dtype=jnp.float32) for v in p.values]
count = count.astype(jnp.float32)
if not boundaries:
assert len(values) == 1
return values[0]
v = 0
for i, threshold in enumerate(boundaries):
indicator = jnp.maximum(0., jnp.sign(threshold - count))
v = jnp.where(v > 0, v, indicator * values[i])
# Check if step is greater equal to the last value.
indicator = jnp.maximum(0., jnp.sign(1 + count - boundaries[-1]))
v = jnp.where(v > 0, v, indicator * values[-1])
return v
class Transformer(BaseSchedule):
"""Inverse-decay learning rate until warmup_steps, then decay."""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define(
'warmup_steps', 4000,
'Increase the learning rate linearly for the first warmup_steps '
'training steps.')
p.Define(
'model_dim', 512,
'Model dimension that applies to embedding layers and all Transformer '
'layers.')
p.Define('worker_replicas', 1, 'Number of worker replicas.')
p.Define('decay_end', None,
'Ends the learning rate decay at decay_end-th step.')
return p
def value(self, count: JTensor) -> JTensor:
"""Returns the current learning rate decay."""
p = self.params
current_step = count.astype(jnp.float32)
model_dim = jnp.array(p.model_dim, dtype=jnp.float32)
warmup_steps = jnp.array(
p.warmup_steps * p.worker_replicas, dtype=jnp.float32)
if p.decay_end is not None:
decay_end = jnp.array(p.decay_end, dtype=jnp.float32)
current_step = jnp.where(current_step < decay_end, current_step,
decay_end)
return model_dim**-0.5 * jnp.minimum(
(current_step + 1) * warmup_steps**-1.5, (current_step + 1)**-0.5)
class SqrtDecay(BaseSchedule):
"""Square root decay learning rate after warmup_steps."""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define(
'warmup_steps', 10000, 'Increase the learning rate linearly for '
'the first warmup_steps training steps.')
p.Define('multiplier', 1., 'Multiplier.')
p.Define('offset', 0., 'Offset.')
return p
def value(self, count: JTensor) -> JTensor:
"""Returns the current learning rate decay."""
p = self.params
current_step = count.astype(jnp.float32)
offset = jnp.array(p.offset, dtype=jnp.float32)
warmup_steps = jnp.array(p.warmup_steps, dtype=jnp.float32)
multiplier = jnp.array(p.multiplier, dtype=jnp.float32)
return jax.lax.rsqrt(jnp.maximum(current_step - offset,
warmup_steps)) * multiplier
class LinearRampupExponentialDecay(BaseSchedule):
"""Learning rate that first linearly ramps up to max and exponentially decays."""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define(
'warmup', 0,
'Increases the learning rate linearly before warmup * num_splits '
'steps.')
p.Define('decay_start', 0,
'Starts the learning rate decay at decay_start-th step.')
p.Define('decay_end', 0,
'Ends the learning rate decay at decay_end-th step.')
p.Define('min_ratio', 0.01, 'After decay_end, the multiplier stays at min.')
p.Define('max', 0, 'The schedule is never larger than this value.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
assert p.decay_start >= p.warmup, ('decay_start must greater than warmup.')
assert p.decay_end >= p.decay_start, (
'decay_end must be greater than decay_start')
assert p.max > 0, 'Must set max.'
# Offset the boundaries, since each schedule passed to
# optax.join_schedules() will receive a step count indicating the number
# of steps since the previous boundary transition.
self._schedules = []
self._boundaries = []
if p.warmup > 0:
self._schedules.append(Linear.Params().Set(
start=(0, 0.0), limit=(p.warmup, p.max)).Instantiate())
self._boundaries.append(p.warmup)
if p.decay_start > p.warmup:
self._schedules.append(Linear.Params().Set(
start=(0, p.max),
limit=(p.decay_start - p.warmup, p.max)).Instantiate())
self._boundaries.append(p.decay_start)
self._schedules.append(Exponential.Params().Set(
start=(0, p.max),
limit=(p.decay_end - p.decay_start, p.max * p.min_ratio)).Instantiate())
def value(self, value: JTensor) -> JTensor:
return jnp.array(
optax.join_schedules([s.value for s in self._schedules],
self._boundaries)(value), jnp.float32)
class LinearRampupPiecewiseConstant(BaseSchedule):
"""A learning rate schedule that does the following.
1. The multiplier ramps up linearly from 0 to the peak(lrs[0]) at
boundaries[0].
2. After peak, the multiplier stays values[i] when step falls into
[boundaries[i], boundaries[i+1]).
3. When step is more than boundaries[-1], then the multiplier is values[-1].
"""
@classmethod
def Params(cls) -> InstantiableParams: # pylint:disable=invalid-name
p = super().Params()
p.Define('boundaries', [], 'Boundaries at which learning rate changes.')
p.Define(
'values', [],
'The learning rate values for the PiecewiseConstant schedule and if '
'the step is between boundaries[i] and boundaries[i + 1] then '
'values[i] is returned, except when it is linearly ramping up from to '
'values[0].')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
assert len(p.boundaries) >= 1 and len(p.boundaries) == len(p.values)
self.p0 = Linear.Params().Set(
start=(0, 0.0), limit=(p.boundaries[0], p.values[0])).Instantiate()
# Offset the boundaries, since each schedule passed to
# optax.join_schedules() will receive a step count indicating the number
# of steps since the previous boundary transition.
boundaries_pc = [b - p.boundaries[0] for b in p.boundaries[1:]]
self.p1 = PiecewiseConstant.Params().Set(
boundaries=boundaries_pc, values=p.values).Instantiate()
def value(self, value: JTensor) -> JTensor:
p = self.params
return jnp.array(
optax.join_schedules([self.p0.value, self.p1.value],
p.boundaries[:1])(value), jnp.float32)
class PiecewiseSchedule(BaseSchedule):
"""Piecewise schedule composed of sub-schedules."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('boundaries', None, 'Boundaries between subschedules.')
p.Define(
'schedules', None, 'A list of sub-schedules. '
'The length must be len(boundaries) + 1. '
'schedules[i] starts at boundaries[i-1] (inclusive) and ends at '
'boundaries[i] (exclusive). '
'The *relative* step in each interval will be passed to the '
'sub-schedule for Value.')
return p
def __init__(self, params) -> None:
super().__init__(params)
p = self.params
prev_boundary = 0
for boundary in p.boundaries:
if boundary < prev_boundary:
raise ValueError('Invalid boundary %s < %s' % (boundary, prev_boundary))
prev_boundary = boundary
if len(p.schedules) != len(p.boundaries) + 1:
raise ValueError('len(schedules) != len(boundaries) + 1: %s vs %s' %
(len(p.schedules), len(p.boundaries)))
self._schedules = [s.Instantiate() for s in p.schedules]
def value(self, count: JTensor) -> JTensor:
p = self.params
return jnp.array(
optax.join_schedules([s.value for s in self._schedules],
p.boundaries)(count), jnp.float32)
class CycleSchedule(BaseSchedule):
"""Piecewise schedule composed of sub-schedules in a cycle."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define(
'schedules', None, 'A list of sub-schedules. Unlike PiecewiseSchedule, '
'the absolute step is passed to the sub-schedule.')
p.Define('steps', None, 'The number of steps to run each sub-schedule.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
if len(p.schedules) != len(p.steps):
raise ValueError('len(schedules) != len(steps): %s vs %s' %
(len(p.schedules), len(p.steps)))
self._schedules = [s.Instantiate() for s in p.schedules]
boundaries = [0]
for step in p.steps:
boundaries.append(boundaries[-1] + step)
self._period = boundaries[-1]
self._boundaries = boundaries[1:-1]
def value(self, count: JTensor) -> JTensor:
relative_step = jnp.mod(count, self._period)
output = self._schedules[0].value(count)
for boundary, schedule in zip(self._boundaries, self._schedules[1:]):
output = jnp.where(relative_step < boundary, output,
schedule.value(count))
return output
|
|
import unittest
from django.test import TransactionTestCase
from django.conf import settings
from django.utils import timezone
from datetime import timedelta, datetime
import sys
if sys.version_info >= (3, 0):
unicode = str
from background_task.tasks import tasks, TaskSchedule, TaskProxy
from background_task.models import Task, CompletedTask
from background_task import background
_recorded = []
def empty_task():
pass
def record_task(*arg, **kw):
_recorded.append((arg, kw))
class TestBackgroundDecorator(unittest.TestCase):
def test_get_proxy(self):
proxy = tasks.background()(empty_task)
self.assertNotEqual(proxy, empty_task)
self.assertTrue(isinstance(proxy, TaskProxy))
# and alternate form
proxy = tasks.background(empty_task)
self.assertNotEqual(proxy, empty_task)
self.assertTrue(isinstance(proxy, TaskProxy))
def test_default_name(self):
proxy = tasks.background()(empty_task)
self.assertEqual(proxy.name, 'background_task.tests.task_tests.empty_task')
proxy = tasks.background()(record_task)
self.assertEqual(proxy.name, 'background_task.tests.task_tests.record_task')
proxy = tasks.background(empty_task)
# print proxy
self.assertTrue(isinstance(proxy, TaskProxy))
self.assertEqual(proxy.name, 'background_task.tests.task_tests.empty_task')
def test_specified_name(self):
proxy = tasks.background(name='mytask')(empty_task)
self.assertEqual(proxy.name, 'mytask')
def test_task_function(self):
proxy = tasks.background()(empty_task)
self.assertEqual(proxy.task_function, empty_task)
proxy = tasks.background()(record_task)
self.assertEqual(proxy.task_function, record_task)
def test_default_schedule(self):
proxy = tasks.background()(empty_task)
self.assertEqual(TaskSchedule(), proxy.schedule)
def test_schedule(self):
proxy = tasks.background(schedule=10)(empty_task)
self.assertEqual(TaskSchedule(run_at=10), proxy.schedule)
def test__unicode__(self):
proxy = tasks.background()(empty_task)
self.assertEqual(u'TaskProxy(background_task.tests.task_tests.empty_task)',
unicode(proxy))
def test_shortcut(self):
'''check shortcut to decorator works'''
proxy = background()(empty_task)
self.failIfEqual(proxy, empty_task)
self.assertEqual(proxy.task_function, empty_task)
class TestTaskProxy(unittest.TestCase):
def setUp(self):
super(TestTaskProxy, self).setUp()
self.proxy = tasks.background()(record_task)
def test_run_task(self):
tasks.run_task(self.proxy.name, [], {})
self.assertEqual(((), {}), _recorded.pop())
tasks.run_task(self.proxy.name, ['hi'], {})
self.assertEqual((('hi',), {}), _recorded.pop())
tasks.run_task(self.proxy.name, [], {'kw': 1})
self.assertEqual(((), {'kw': 1}), _recorded.pop())
class TestTaskSchedule(unittest.TestCase):
def test_priority(self):
self.assertEqual(0, TaskSchedule().priority)
self.assertEqual(0, TaskSchedule(priority=0).priority)
self.assertEqual(1, TaskSchedule(priority=1).priority)
self.assertEqual(2, TaskSchedule(priority=2).priority)
def _within_one_second(self, d1, d2):
self.failUnless(isinstance(d1, datetime))
self.failUnless(isinstance(d2, datetime))
self.failUnless(abs(d1 - d2) <= timedelta(seconds=1))
def test_run_at(self):
for schedule in [None, 0, timedelta(seconds=0)]:
now = timezone.now()
run_at = TaskSchedule(run_at=schedule).run_at
self._within_one_second(run_at, now)
now = timezone.now()
run_at = TaskSchedule(run_at=now).run_at
self._within_one_second(run_at, now)
fixed_dt = timezone.now() + timedelta(seconds=60)
run_at = TaskSchedule(run_at=fixed_dt).run_at
self._within_one_second(run_at, fixed_dt)
run_at = TaskSchedule(run_at=90).run_at
self._within_one_second(run_at, timezone.now() + timedelta(seconds=90))
run_at = TaskSchedule(run_at=timedelta(seconds=35)).run_at
self._within_one_second(run_at, timezone.now() + timedelta(seconds=35))
def test_create(self):
fixed_dt = timezone.now() + timedelta(seconds=10)
schedule = TaskSchedule.create({'run_at': fixed_dt})
self.assertEqual(schedule.run_at, fixed_dt)
self.assertEqual(0, schedule.priority)
self.assertEqual(TaskSchedule.SCHEDULE, schedule.action)
schedule = {'run_at': fixed_dt, 'priority': 2,
'action': TaskSchedule.RESCHEDULE_EXISTING}
schedule = TaskSchedule.create(schedule)
self.assertEqual(schedule.run_at, fixed_dt)
self.assertEqual(2, schedule.priority)
self.assertEqual(TaskSchedule.RESCHEDULE_EXISTING, schedule.action)
schedule = TaskSchedule.create(0)
self._within_one_second(schedule.run_at, timezone.now())
schedule = TaskSchedule.create(10)
self._within_one_second(schedule.run_at,
timezone.now() + timedelta(seconds=10))
schedule = TaskSchedule.create(TaskSchedule(run_at=fixed_dt))
self.assertEqual(schedule.run_at, fixed_dt)
self.assertEqual(0, schedule.priority)
self.assertEqual(TaskSchedule.SCHEDULE, schedule.action)
def test_merge(self):
default = TaskSchedule(run_at=10, priority=2,
action=TaskSchedule.RESCHEDULE_EXISTING)
schedule = TaskSchedule.create(20).merge(default)
self._within_one_second(timezone.now() + timedelta(seconds=20),
schedule.run_at)
self.assertEqual(2, schedule.priority)
self.assertEqual(TaskSchedule.RESCHEDULE_EXISTING, schedule.action)
schedule = TaskSchedule.create({'priority': 0}).merge(default)
self._within_one_second(timezone.now() + timedelta(seconds=10),
schedule.run_at)
self.assertEqual(0, schedule.priority)
self.assertEqual(TaskSchedule.RESCHEDULE_EXISTING, schedule.action)
action = TaskSchedule.CHECK_EXISTING
schedule = TaskSchedule.create({'action': action}).merge(default)
self._within_one_second(timezone.now() + timedelta(seconds=10),
schedule.run_at)
self.assertEqual(2, schedule.priority)
self.assertEqual(action, schedule.action)
def test_repr(self):
self.assertEqual('TaskSchedule(run_at=10, priority=0)',
repr(TaskSchedule(run_at=10, priority=0)))
class TestSchedulingTasks(TransactionTestCase):
def test_background_gets_scheduled(self):
self.result = None
@tasks.background(name='test_background_gets_scheduled')
def set_result(result):
self.result = result
# calling set_result should now actually create a record in the db
set_result(1)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual('test_background_gets_scheduled', task.task_name)
self.assertEqual('[[1], {}]', task.task_params)
def test_reschedule_existing(self):
reschedule_existing = TaskSchedule.RESCHEDULE_EXISTING
@tasks.background(name='test_reschedule_existing',
schedule=TaskSchedule(action=reschedule_existing))
def reschedule_fn():
pass
# this should only end up with one task
# and it should be scheduled for the later time
reschedule_fn()
reschedule_fn(schedule=90)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual('test_reschedule_existing', task.task_name)
# check task is scheduled for later on
now = timezone.now()
self.failUnless(now + timedelta(seconds=89) < task.run_at)
self.failUnless(now + timedelta(seconds=91) > task.run_at)
def test_check_existing(self):
check_existing = TaskSchedule.CHECK_EXISTING
@tasks.background(name='test_check_existing',
schedule=TaskSchedule(action=check_existing))
def check_fn():
pass
# this should only end up with the first call
# scheduled
check_fn()
check_fn(schedule=90)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual('test_check_existing', task.task_name)
# check new task is scheduled for the earlier time
now = timezone.now()
self.failUnless(now - timedelta(seconds=1) < task.run_at)
self.failUnless(now + timedelta(seconds=1) > task.run_at)
class TestTaskRunner(TransactionTestCase):
def setUp(self):
super(TestTaskRunner, self).setUp()
self.runner = tasks._runner
def test_get_task_to_run_no_tasks(self):
self.failIf(self.runner.get_task_to_run(tasks))
def test_get_task_to_run(self):
task = Task.objects.new_task('mytask', (1), {})
task.save()
self.failUnless(task.locked_by is None)
self.failUnless(task.locked_at is None)
locked_task = self.runner.get_task_to_run(tasks)
self.failIf(locked_task is None)
self.failIf(locked_task.locked_by is None)
self.assertEqual(self.runner.worker_name, locked_task.locked_by)
self.failIf(locked_task.locked_at is None)
self.assertEqual('mytask', locked_task.task_name)
class TestTaskModel(TransactionTestCase):
def test_lock_uncontested(self):
task = Task.objects.new_task('mytask')
task.save()
self.failUnless(task.locked_by is None)
self.failUnless(task.locked_at is None)
locked_task = task.lock('mylock')
self.assertEqual('mylock', locked_task.locked_by)
self.failIf(locked_task.locked_at is None)
self.assertEqual(task.pk, locked_task.pk)
def test_lock_contested(self):
# locking should actually look at db, not object
# in memory
task = Task.objects.new_task('mytask')
task.save()
self.failIf(task.lock('mylock') is None)
self.failUnless(task.lock('otherlock') is None)
def test_lock_expired(self):
settings.MAX_RUN_TIME = 60
task = Task.objects.new_task('mytask')
task.save()
locked_task = task.lock('mylock')
# force expire the lock
expire_by = timedelta(seconds=(settings.MAX_RUN_TIME + 2))
locked_task.locked_at = locked_task.locked_at - expire_by
locked_task.save()
# now try to get the lock again
self.failIf(task.lock('otherlock') is None)
def test__unicode__(self):
task = Task.objects.new_task('mytask')
self.assertEqual(u'Task(mytask)', unicode(task))
class TestTasks(TransactionTestCase):
def setUp(self):
super(TestTasks, self).setUp()
settings.MAX_RUN_TIME = 60
settings.MAX_ATTEMPTS = 25
@tasks.background(name='set_fields')
def set_fields(**fields):
for key, value in fields.items():
setattr(self, key, value)
@tasks.background(name='throws_error')
def throws_error():
raise RuntimeError("an error")
self.set_fields = set_fields
self.throws_error = throws_error
def test_run_next_task_nothing_scheduled(self):
self.failIf(tasks.run_next_task())
def test_run_next_task_one_task_scheduled(self):
self.set_fields(worked=True)
self.failIf(hasattr(self, 'worked'))
self.failUnless(tasks.run_next_task())
self.failUnless(hasattr(self, 'worked'))
self.failUnless(self.worked)
def test_run_next_task_several_tasks_scheduled(self):
self.set_fields(one='1')
self.set_fields(two='2')
self.set_fields(three='3')
for i in range(3):
self.failUnless(tasks.run_next_task())
self.failIf(tasks.run_next_task()) # everything should have been run
for field, value in [('one', '1'), ('two', '2'), ('three', '3')]:
self.failUnless(hasattr(self, field))
self.assertEqual(value, getattr(self, field))
def test_run_next_task_error_handling(self):
self.throws_error()
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
original_task = all_tasks[0]
# should run, but trigger error
self.failUnless(tasks.run_next_task())
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
failed_task = all_tasks[0]
# should have an error recorded
self.failIfEqual('', failed_task.last_error)
self.failUnless(failed_task.failed_at is None)
self.assertEqual(1, failed_task.attempts)
# should have been rescheduled for the future
# and no longer locked
self.failUnless(failed_task.run_at > original_task.run_at)
self.failUnless(failed_task.locked_by is None)
self.failUnless(failed_task.locked_at is None)
def test_run_next_task_does_not_run_locked(self):
self.set_fields(locked=True)
self.failIf(hasattr(self, 'locked'))
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
original_task = all_tasks[0]
original_task.lock('lockname')
self.failIf(tasks.run_next_task())
self.failIf(hasattr(self, 'locked'))
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
def test_run_next_task_unlocks_after_MAX_RUN_TIME(self):
self.set_fields(lock_overridden=True)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
original_task = all_tasks[0]
locked_task = original_task.lock('lockname')
self.failIf(tasks.run_next_task())
self.failIf(hasattr(self, 'lock_overridden'))
# put lot time into past
expire_by = timedelta(seconds=(settings.MAX_RUN_TIME + 2))
locked_task.locked_at = locked_task.locked_at - expire_by
locked_task.save()
# so now we should be able to override the lock
# and run the task
self.failUnless(tasks.run_next_task())
self.assertEqual(0, Task.objects.count())
self.failUnless(hasattr(self, 'lock_overridden'))
self.failUnless(self.lock_overridden)
def test_default_schedule_used_for_run_at(self):
@tasks.background(name='default_schedule_used_for_run_at', schedule=60)
def default_schedule_used_for_time():
pass
now = timezone.now()
default_schedule_used_for_time()
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.failUnless(now < task.run_at)
self.failUnless((task.run_at - now) <= timedelta(seconds=61))
self.failUnless((task.run_at - now) >= timedelta(seconds=59))
def test_default_schedule_used_for_priority(self):
@tasks.background(name='default_schedule_used_for_priority',
schedule={'priority': 2})
def default_schedule_used_for_priority():
pass
now = timezone.now()
default_schedule_used_for_priority()
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual(2, task.priority)
def test_non_default_schedule_used(self):
default_run_at = timezone.now() + timedelta(seconds=90)
@tasks.background(name='non_default_schedule_used',
schedule={'run_at': default_run_at, 'priority': 2})
def default_schedule_used_for_priority():
pass
run_at = timezone.now().replace(microsecond=0) + timedelta(seconds=60)
default_schedule_used_for_priority(schedule=run_at)
all_tasks = Task.objects.all()
self.assertEqual(1, all_tasks.count())
task = all_tasks[0]
self.assertEqual(run_at, task.run_at)
def test_failed_at_set_after_MAX_ATTEMPTS(self):
@tasks.background(name='test_failed_at_set_after_MAX_ATTEMPTS')
def failed_at_set_after_MAX_ATTEMPTS():
raise RuntimeError('failed')
failed_at_set_after_MAX_ATTEMPTS()
available = Task.objects.find_available()
self.assertEqual(1, available.count())
task = available[0]
self.failUnless(task.failed_at is None)
task.attempts = settings.MAX_ATTEMPTS
task.save()
# task should be scheduled to run now
# but will be marked as failed straight away
self.failUnless(tasks.run_next_task())
available = Task.objects.find_available()
self.assertEqual(0, available.count())
all_tasks = Task.objects.all()
self.assertEqual(0, all_tasks.count())
self.assertEqual(1, CompletedTask.objects.count())
completed_task = CompletedTask.objects.all()[0]
self.failIf(completed_task.failed_at is None)
class MaxAttemptsTestCase(TransactionTestCase):
def setUp(self):
@tasks.background(name='failing task')
def failing_task():
return 0/0
self.failing_task = failing_task
self.task1 = self.failing_task()
self.task2 = self.failing_task()
self.task1_id = self.task1.id
self.task2_id = self.task2.id
def test_max_attempts_one(self):
with self.settings(MAX_ATTEMPTS=1):
self.assertEqual(settings.MAX_ATTEMPTS, 1)
self.assertEqual(Task.objects.count(), 2)
tasks.run_next_task()
self.assertEqual(Task.objects.count(), 1)
self.assertEqual(Task.objects.all()[0].id, self.task2_id)
self.assertEqual(CompletedTask.objects.count(), 1)
completed_task = CompletedTask.objects.all()[0]
self.assertEqual(completed_task.attempts, 1)
self.assertEqual(completed_task.task_name, self.task1.task_name)
self.assertEqual(completed_task.task_params, self.task1.task_params)
self.assertIsNotNone(completed_task.last_error)
self.assertIsNotNone(completed_task.failed_at)
tasks.run_next_task()
self.assertEqual(Task.objects.count(), 0)
self.assertEqual(CompletedTask.objects.count(), 2)
def test_max_attempts_two(self):
with self.settings(MAX_ATTEMPTS=2):
self.assertEqual(settings.MAX_ATTEMPTS, 2)
tasks.run_next_task()
self.assertEqual(Task.objects.count(), 2)
self.assertEqual(CompletedTask.objects.count(), 0)
|
|
#!/usr/bin/python
# Copyright (c) Arni Mar Jonsson.
# See LICENSE for details.
import sys, string, unittest, itertools
class TestRocksDB(unittest.TestCase):
def setUp(self):
# import local rocksdb
import rocksdb as _rocksdb
self.rocksdb = _rocksdb
dir(self.rocksdb)
# Python2/3 compat
if hasattr(string, 'lowercase'):
self.lowercase = string.lowercase
self.uppercase = string.uppercase
else:
self.lowercase = string.ascii_lowercase
self.uppercase = string.ascii_uppercase
# comparator
if sys.version_info[0] < 3:
def my_comparison(a, b):
return cmp(a, b)
else:
def my_comparison(a, b):
if a < b:
return -1
elif a > b:
return 1
else:
return 0
self.comparator = 'bytewise'
if True:
self.comparator = ('bytewise', my_comparison)
# repair/destroy previous database, if any
self.name = 'db_a'
#self.rocksdb.RepairDB(self.name, comparator = self.comparator)
self.rocksdb.DestroyDB(self.name)
def _open_options(self, create_if_missing = True, error_if_exists = False):
v = {
'create_if_missing': True,
'error_if_exists': error_if_exists,
'paranoid_checks': False,
'write_buffer_size': 2 * (2 << 20),
'max_open_files': 1000,
'comparator': self.comparator
}
return v
def _open(self, *args, **kwargs):
options = self._open_options(*args, **kwargs)
db = self.rocksdb.RocksDB(self.name, **options)
dir(db)
return db
def testIteratorNone(self):
options = self._open_options()
db = self.rocksdb.RocksDB(self.name, **options)
for s in 'abcdef':
db.Put(self._s(s), self._s(s))
kv_ = [(self._s('a'), self._s('a')), (self._s('b'), self._s('b')), (self._s('c'), self._s('c')), (self._s('d'), self._s('d')), (self._s('e'), self._s('e')), (self._s('f'), self._s('f'))]
kv = list(db.RangeIter(key_from = None, key_to = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter(key_to = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter(key_from = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter())
self.assertEqual(kv, kv_)
def testIteratorCrash(self):
options = self._open_options()
db = self.rocksdb.RocksDB(self.name, **options)
db.Put(self._s('a'), self._s('b'))
i = db.RangeIter(include_value = False, reverse = True)
dir(i)
del self.rocksdb
def _s(self, s):
if sys.version_info[0] >= 3:
return bytearray(s, encoding = 'latin1')
else:
return s
def _join(self, i):
return self._s('').join(i)
# NOTE: modeled after test 'Snapshot'
def testSnapshotBasic(self):
db = self._open()
# destroy database, if any
db.Put(self._s('foo'), self._s('v1'))
s1 = db.CreateSnapshot()
dir(s1)
db.Put(self._s('foo'), self._s('v2'))
s2 = db.CreateSnapshot()
db.Put(self._s('foo'), self._s('v3'))
s3 = db.CreateSnapshot()
db.Put(self._s('foo'), self._s('v4'))
self.assertEqual(s1.Get(self._s('foo')), self._s('v1'))
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(s3.Get(self._s('foo')), self._s('v3'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s3
self.assertEqual(s1.Get(self._s('foo')), self._s('v1'))
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s1
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s2
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# re-open
del db
db = self._open()
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
def ClearDB(self, db):
for k in list(db.RangeIter(include_value = False, reverse = True)):
db.Delete(k)
def ClearDB_batch(self, db):
b = self.rocksdb.WriteBatch()
dir(b)
for k in db.RangeIter(include_value = False, reverse = True):
b.Delete(k)
db.Write(b)
def CountDB(self, db):
return sum(1 for i in db.RangeIter(reverse = True))
def _insert_lowercase(self, db):
b = self.rocksdb.WriteBatch()
for c in self.lowercase:
b.Put(self._s(c), self._s('hello'))
db.Write(b)
def _insert_uppercase_batch(self, db):
b = self.rocksdb.WriteBatch()
for c in self.uppercase:
b.Put(self._s(c), self._s('hello'))
db.Write(b)
def _test_uppercase_get(self, db):
for k in self.uppercase:
v = db.Get(self._s(k))
self.assertEqual(v, self._s('hello'))
self.assertTrue(k in self.uppercase)
def _test_uppercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('J'), self._s('M')))
self.assertEqual(s, self._s('JKLM'))
s = self._join(k for k, v in db.RangeIter(self._s('S')))
self.assertEqual(s, self._s('STUVWXYZ'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('E')))
self.assertEqual(s, self._s('ABCDE'))
def _test_uppercase_iter_rev(self, db):
# inside range
s = self._join(k for k, v in db.RangeIter(self._s('J'), self._s('M'), reverse = True))
self.assertEqual(s, self._s('MLKJ'))
# partly outside range
s = self._join(k for k, v in db.RangeIter(self._s('Z'), self._s(chr(ord('Z') + 1)), reverse = True))
self.assertEqual(s, self._s('Z'))
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('A') - 1)), self._s('A'), reverse = True))
self.assertEqual(s, self._s('A'))
# wholly outside range
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('Z') + 1)), self._s(chr(ord('Z') + 2)), reverse = True))
self.assertEqual(s, self._s(''))
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('A') - 2)), self._s(chr(ord('A') - 1)), reverse = True))
self.assertEqual(s, self._s(''))
# lower limit
s = self._join(k for k, v in db.RangeIter(self._s('S'), reverse = True))
self.assertEqual(s, self._s('ZYXWVUTS'))
# upper limit
s = self._join(k for k, v in db.RangeIter(key_to = self._s('E'), reverse = True))
self.assertEqual(s, self._s('EDCBA'))
def _test_lowercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('j'), self._s('m')))
self.assertEqual(s, self._s('jklm'))
s = self._join(k for k, v in db.RangeIter(self._s('s')))
self.assertEqual(s, self._s('stuvwxyz'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('e')))
self.assertEqual(s, self._s('abcde'))
def _test_lowercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('j'), self._s('m'), reverse = True))
self.assertEqual(s, self._s('mlkj'))
s = self._join(k for k, v in db.RangeIter(self._s('s'), reverse = True))
self.assertEqual(s, self._s('zyxwvuts'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('e'), reverse = True))
self.assertEqual(s, self._s('edcba'))
def _test_lowercase_get(self, db):
for k in self.lowercase:
v = db.Get(self._s(k))
self.assertEqual(v, self._s('hello'))
self.assertTrue(k in self.lowercase)
def testIterationBasic(self):
db = self._open()
self._insert_lowercase(db)
self.assertEqual(self.CountDB(db), 26)
self._test_lowercase_iter(db)
#self._test_lowercase_iter_rev(db)
self._test_lowercase_get(db)
self.ClearDB_batch(db)
self._insert_uppercase_batch(db)
self._test_uppercase_iter(db)
self._test_uppercase_iter_rev(db)
self._test_uppercase_get(db)
self.assertEqual(self.CountDB(db), 26)
def testCompact(self):
db = self._open()
s = self._s('foo' * 10)
for i in itertools.count():
db.Put(self._s('%i' % i), s)
if i > 10000:
break
db.CompactRange(self._s('1000'), self._s('10000'))
db.CompactRange(start = self._s('1000'))
db.CompactRange(end = self._s('1000'))
db.CompactRange(start = self._s('1000'), end = None)
db.CompactRange(start = None, end = self._s('1000'))
db.CompactRange()
# tried to re-produce http://code.google.com/p/rocksdb/issues/detail?id=44
def testMe(self):
db = self._open()
db.Put(self._s('key1'), self._s('val1'))
del db
db = self._open()
db.Delete(self._s('key2'))
db.Delete(self._s('key1'))
del db
db = self._open()
db.Delete(self._s('key2'))
del db
db = self._open()
db.Put(self._s('key3'), self._s('val1'))
del db
db = self._open()
del db
db = self._open()
v = list(db.RangeIter())
self.assertEqual(v, [(self._s('key3'), self._s('val1'))])
if __name__ == '__main__':
unittest.main()
|
|
"""Algorithms for computing symbolic roots of polynomials. """
from __future__ import print_function, division
import math
from sympy.core.symbol import Dummy, Symbol, symbols
from sympy.core import S, I, pi
from sympy.core.mul import expand_2arg
from sympy.core.relational import Eq
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, igcd
from sympy.ntheory import divisors, isprime, nextprime
from sympy.functions import exp, sqrt, re, im, Abs, cos, acos, sin, Piecewise
from sympy.polys.polytools import Poly, cancel, factor, gcd_list, discriminant
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.polys.polyerrors import PolynomialError, GeneratorsNeeded, DomainError
from sympy.polys.polyquinticconst import PolyQuintic
from sympy.polys.rationaltools import together
from sympy.simplify import simplify, powsimp
from sympy.utilities import default_sort_key, public
from sympy.core.compatibility import reduce, xrange
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial."""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
R = sqrt(_simplify(r))
else:
R = sqrt(r)
r0 = R
r1 = -R
else:
d = b**2 - 4*a*c
if dom.is_Numerical:
D = sqrt(d)
r0 = (-b + D) / (2*a)
r1 = (-b - D) / (2*a)
else:
D = sqrt(_simplify(d))
A = 2*a
E = _simplify(-b/A)
F = D/A
r0 = E + F
r1 = E - F
return sorted([expand_2arg(i) for i in (r0, r1)], key=default_sort_key)
def roots_cubic(f, trig=False):
"""Returns a list of roots of a cubic polynomial."""
if trig:
a, b, c, d = f.all_coeffs()
p = (3*a*c - b**2)/3/a**2
q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3)
D = 18*a*b*c*d - 4*b**3*d+b**2*c**2 - 4*a*c**3 - 27*a**2*d**2
if (D > 0) == True:
rv = []
for k in range(3):
rv.append(2*sqrt(-p/3)*cos(acos(3*q/2/p*sqrt(-3/p))/3 - k*2*pi/3))
return list(sorted([i - b/3/a for i in rv]))
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1, a, b], multiple=True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
else:
if q.is_real:
if (q > 0) == True:
u1 = -q**Rational(1, 3)
else:
u1 = (-q)**Rational(1, 3)
else:
u1 = (-q)**Rational(1, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
elif q.is_real and q < 0:
u1 = -(-q/2 + sqrt(q**2/4 + pon3**3))**Rational(1, 3)
else:
u1 = (q/2 + sqrt(q**2/4 + pon3**3))**Rational(1, 3)
coeff = S.ImaginaryUnit*sqrt(3)/2
u2 = u1*(-S.Half + coeff)
u3 = u1*(-S.Half - coeff)
if p is S.Zero:
return [u1 - aon3, u2 - aon3, u3 - aon3]
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def _roots_quartic_euler(p, q, r, a):
"""
Descartes-Euler solution of the quartic equation
Parameters
==========
p, q, r: coefficients of ``x**4 + p*x**2 + q*x + r``
a: shift of the roots
Notes
=====
This is a helper function for ``roots_quartic``.
Look for solutions of the form ::
``x1 = sqrt(R) - sqrt(A + B*sqrt(R))``
``x2 = -sqrt(R) - sqrt(A - B*sqrt(R))``
``x3 = -sqrt(R) + sqrt(A - B*sqrt(R))``
``x4 = sqrt(R) + sqrt(A + B*sqrt(R))``
To satisfy the quartic equation one must have
``p = -2*(R + A); q = -4*B*R; r = (R - A)**2 - B**2*R``
so that ``R`` must satisfy the Descartes-Euler resolvent equation
``64*R**3 + 32*p*R**2 + (4*p**2 - 16*r)*R - q**2 = 0``
If the resolvent does not have a rational solution, return None;
in that case it is likely that the Ferrari method gives a simpler
solution.
Examples
========
>>> from sympy import S
>>> from sympy.polys.polyroots import _roots_quartic_euler
>>> p, q, r = -S(64)/5, -S(512)/125, -S(1024)/3125
>>> _roots_quartic_euler(p, q, r, S(0))[0]
-sqrt(32*sqrt(5)/125 + 16/5) + 4*sqrt(5)/5
"""
from sympy.solvers import solve
# solve the resolvent equation
x = Symbol('x')
eq = 64*x**3 + 32*p*x**2 + (4*p**2 - 16*r)*x - q**2
xsols = list(roots(Poly(eq, x), cubics=False).keys())
xsols = [sol for sol in xsols if sol.is_rational]
if not xsols:
return None
R = max(xsols)
c1 = sqrt(R)
B = -q*c1/(4*R)
A = -R - p/2
c2 = sqrt(A + B)
c3 = sqrt(A - B)
return [c1 - c2 - a, -c1 - c3 - a, -c1 + c3 - a, c1 + c2 - a]
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although no general solution that is always applicable for all
coefficients is known to this reviewer, certain conditions are tested
to determine the simplest 4 expressions that can be returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. http://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
7. eqworld.ipmnet.ru/en/solutions/ae/ae0108.pdf
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
aon4 = a/4
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple=True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple=True)
return [tmp - aon4 for tmp in y]
else:
# Descartes-Euler method, see [7]
sols = _roots_quartic_euler(e, f, g, aon4)
if sols:
return sols
# Ferrari method, see [1, 2]
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
def _ans(y):
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
ans = []
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
# p == 0 case
y1 = -5*e/6 - q**TH
if p.is_zero:
return _ans(y1)
# if p != 0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3 - r, x)
y2 = -5*e/6 + u - p/u/3
if p.is_nonzero:
return _ans(y2)
# sort it out once they know the values of the coefficients
return [Piecewise((a1, Eq(p, 0)), (a2, True))
for a1, a2 in zip(_ans(y1), _ans(y2))]
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial."""
n = f.degree()
a, b = f.nth(n), f.nth(0)
alpha = (-cancel(b/a))**Rational(1, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
roots, I = [], S.ImaginaryUnit
for k in xrange(n):
zeta = exp(2*k*S.Pi*I/n).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return sorted(roots, key=default_sort_key)
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in xrange(L, U + 1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
for k in xrange(1, n + 1):
if igcd(k, n) == 1:
roots.append(exp(2*k*S.Pi*I/n).expand(complex=True))
else:
g = Poly(f, extension=(-1)**Rational(1, n))
for h, _ in g.factor_list()[1]:
roots.append(-h.TC())
return sorted(roots, key=default_sort_key)
def roots_quintic(f):
"""
Calulate exact roots of a solvable quintic
"""
result = []
coeff_5, coeff_4, p, q, r, s = f.all_coeffs()
# Eqn must be of the form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
return result
if coeff_5 != 1:
l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5]
if not all(coeff.is_Rational for coeff in l):
return result
f = Poly(f/coeff_5)
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / S(2))
l4 = _quintic_simplify((-alpha - sqrt(disc)) / S(2))
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / S(2))
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / S(2))
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
# Problems importing on top
from sympy.utilities.randtest import comp
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
sol = Symbol('sol')
# Simplifying improves performace a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# Solve imported here. Causing problems if imported as 'solve'
# and hence the changed name
from sympy.solvers.solvers import solve as _solve
a, b = symbols('a b', cls=Dummy)
_sol = _solve( sol**5 - a - I*b, sol)
for i in range(5):
_sol[i] = factor(_sol[i])
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, root in enumerate(_sol):
Res[1][i] = _quintic_simplify(root.subs({ a: R1[0], b: R1[1] }))
Res[2][i] = _quintic_simplify(root.subs({ a: R2[0], b: R2[1] }))
Res[3][i] = _quintic_simplify(root.subs({ a: R3[0], b: R3[1] }))
Res[4][i] = _quintic_simplify(root.subs({ a: R4[0], b: R4[1] }))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
u, v = quintic.uv(theta, d)
sqrt5 = math.sqrt(5)
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if( comp( r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus, 0, tol) and
comp( r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus, 0, tol ) ):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2:
break
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
result_n = []
for root in result:
result_n.append(root.n(5))
result_n = sorted(result_n, key=default_sort_key)
prev_entry = None
for r in result_n:
if r == prev_entry:
# Roots are identical. Abort. Return []
# and fall back to usual solve
return []
prev_entry = r
return result
def _quintic_simplify(expr):
expr = powsimp(expr)
expr = cancel(expr)
return together(expr)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers.
Returns the integer ``div`` such that substituting ``x = div*y``
``p(x) = m*q(y)`` where the coefficients of ``q`` are smaller
than those of ``p``.
For example ``x**5 + 512*x + 1024 = 0``
with ``div = 4`` becomes ``y**5 + 2*y + 1 = 0``
Returns the integer ``div`` or ``None`` if there is no possible scaling.
Examples
========
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> from sympy.polys.polyroots import _integer_basis
>>> p = Poly(x**5 + 512*x + 1024, x, domain='ZZ')
>>> _integer_basis(p)
4
"""
monoms, coeffs = list(zip(*poly.terms()))
monoms, = list(zip(*monoms))
coeffs = list(map(abs, coeffs))
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
n = monoms[0]
monoms = [n - i for i in reversed(monoms)]
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = next(divs)
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = next(divs)
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
# TODO: This is fragile. Figure out how to make this independent of construct_domain().
if poly.get_domain().is_Poly and all(c.is_term for c in poly.rep.coeffs()):
poly = poly.inject()
strips = list(zip(*poly.monoms()))
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n - k[0])
poly = poly.termwise(func)
coeff *= basis
return coeff, poly
@public
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively. If cubic
roots are real but are expressed in terms of complex numbers
(casus irreducibilis [1]) the ``trig`` flag can be set to True to
have the solutions returned in terms of cosine and inverse cosine
functions.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a tuple containing all
those roots set the ``multiple`` flag to True.
Examples
========
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
References
==========
1. http://en.wikipedia.org/wiki/Cubic_function#Trigonometric_.28and_hyperbolic.29_method
"""
from sympy.polys.polytools import to_rational_coeffs
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
trig = flags.pop('trig', False)
quartics = flags.pop('quartics', True)
quintics = flags.pop('quintics', False)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f) - 1
for coeff in f:
poly[i], i = sympify(coeff), i - 1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, root, k):
if root in result:
result[root] += k
else:
result[root] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for root in _try_heuristics(factors[0]):
roots.append(root)
for factor in factors[1:]:
previous, roots = list(roots), []
for root in previous:
g = factor - Poly(root, f.gen)
for root in _try_heuristics(g):
roots.append(root)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S(0)]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return list(map(cancel, roots_linear(f)))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += list(map(cancel, roots_linear(f)))
elif n == 2:
result += list(map(cancel, roots_quadratic(f)))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f, trig=trig)
elif n == 4 and quartics:
result += roots_quartic(f)
elif n == 5 and quintics:
result += roots_quintic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S(0): k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().has_Ring:
f = f.to_field()
rescale_x = None
translate_x = None
result = {}
if not f.is_ground:
if not f.get_domain().is_Exact:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
elif f.length() == 2:
for r in roots_binomial(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and factors[0][1] == 1:
if f.get_domain().is_EX:
res = to_rational_coeffs(f)
if res:
if res[0] is None:
translate_x, f = res[2:]
else:
rescale_x, f = res[1], res[-1]
result = roots(f)
if not result:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for factor, k in factors:
for r in _try_heuristics(Poly(factor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for root, k in _result.items():
result[coeff*root] = k
result.update(zeros)
if filter not in [None, 'C']:
handlers = {
'Z': lambda r: r.is_Integer,
'Q': lambda r: r.is_Rational,
'R': lambda r: r.is_real,
'I': lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).keys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).keys():
if not predicate(zero):
del result[zero]
if rescale_x:
result1 = {}
for k, v in result.items():
result1[k*rescale_x] = v
result = result1
if translate_x:
result1 = {}
for k, v in result.items():
result1[k + translate_x] = v
result = result1
if not multiple:
return result
else:
zeros = []
for zero, k in result.items():
zeros.extend([zero]*k)
return sorted(zeros, key=default_sort_key)
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in zeros.items():
factors, N = factors + [Poly(x - r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p, q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [ f.as_expr() for f in factors ]
return sorted(factors, key=default_sort_key)
|
|
import json
import sys
from sharepa.search import ShareSearch
from sharepa.experimental_analysis_functions import convert_nested_to_dataframe
from elasticsearch_dsl.utils import AttrDict
from mock import Mock
def pretty_print(d):
print(json.dumps(d, indent=4))
def test_convert_nested_to_dataframe_crossed():
my_search = ShareSearch() # BASE_URL='https://staging.osf.io/api/v1/share/search/')
# first we test crossed data
my_search.aggs.bucket(
'tags', # Every aggregation needs a name
'terms',
field='tags',
# We store the source of a document in its type, so this will aggregate by source #BYNOTE so this looks at the type feild and agregates by that?
size=3, # These are just to make sure we get numbers for all the sources, to make it easier to combine graphs
min_doc_count=0,
).metric(
'source',
'terms',
field='source',
size=3,
min_doc_count=0
).metric(
'dates',
'date_histogram',
field='providerUpdatedDateTime',
interval='1y',
format='yyyy-MM-dd',
extended_bounds={
"min": "2014-01-01",
"max": "2015-01-01"},
min_doc_count=0
)
search_mock = AttrDict({u'aggregations':{u'tags': {u'buckets':
[{u'dates': {u'buckets': [{u'doc_count': 5,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 15776,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 15781,
u'key': u'water',
u'source': {u'buckets': [{u'doc_count': 15760, u'key': u'dataone'},
{u'doc_count': 21, u'key': u'clinicaltrials'},
{u'doc_count': 0, u'key': u'arxiv_oai'}],
u'doc_count_error_upper_bound': 0,
u'sum_other_doc_count': 0}},
{u'dates': {u'buckets': [{u'doc_count': 0,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 15505,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 15505,
u'key': u'california',
u'source': {u'buckets': [{u'doc_count': 15505, u'key': u'dataone'},
{u'doc_count': 0, u'key': u'arxiv_oai'},
{u'doc_count': 0, u'key': u'asu'}],
u'doc_count_error_upper_bound': 0,
u'sum_other_doc_count': 0}},
{u'dates': {u'buckets': [{u'doc_count': 1,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 14825,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 14826,
u'key': u'county',
u'source': {u'buckets': [{u'doc_count': 14825, u'key': u'dataone'},
{u'doc_count': 1, u'key': u'clinicaltrials'},
{u'doc_count': 0, u'key': u'arxiv_oai'}],
u'doc_count_error_upper_bound': 0,
u'sum_other_doc_count': 0}}],
u'doc_count_error_upper_bound': 5860,
u'sum_other_doc_count': 706643}}})
my_search.execute = Mock(return_value=search_mock)
my_results = my_search.execute()
my_dataframe = convert_nested_to_dataframe(my_results.aggregations)
assert my_dataframe.shape == (9, 5)
for tag_buckets in my_results.aggregations.tags.buckets:
assert tag_buckets.key in my_dataframe['tags'].values.tolist()
for source_buckets in tag_buckets.source.buckets:
assert source_buckets.source in my_dataframe['source'].values.tolist() or (dates_buckets.dates is 'NaN')
for dates_buckets in tag_buckets.dates.buckets:
assert (dates_buckets.dates in my_dataframe['dates'].values.tolist()) or (dates_buckets.dates is 'NaN')
def test_convert_nested_to_dataframe_nested():
my_search = ShareSearch()
my_search.aggs.bucket(
'tags', # Every aggregation needs a name
'terms',
field='tags',
# We store the source of a document in its type, so this will aggregate by source #BYNOTE so this looks at the type feild and agregates by that?
size=3, # These are just to make sure we get numbers for all the sources, to make it easier to combine graphs
min_doc_count=0,
).bucket(
'source',
'terms',
field='source',
size=3,
min_doc_count=0
).bucket(
'dates',
'date_histogram',
field='providerUpdatedDateTime',
interval='1y',
format='yyyy-MM-dd',
extended_bounds={
"min": "2014-11-01",
"max": "2015-01-01"},
min_doc_count=0
)
search_mock = AttrDict({u'aggregations':
{u'tags': {u'buckets': [{u'doc_count': 15781,
u'key': u'water',
u'source': {u'buckets': [
{u'dates': {u'buckets':
[{u'doc_count': 0,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 15760,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}
]},
u'doc_count': 15760,
u'key': u'dataone'},
{u'dates': {u'buckets':
[{u'doc_count': 5,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 16,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}
]},
u'doc_count': 21,
u'key': u'clinicaltrials'},
{u'dates': {u'buckets':
[{u'doc_count': 0,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 0,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}
]},
u'doc_count': 0,
u'key': u'arxiv_oai'}],
u'doc_count_error_upper_bound': 0,
u'sum_other_doc_count': 0}},
{u'doc_count': 15505,
u'key': u'california',
u'source': {u'buckets': [{u'dates': {u'buckets': [{u'doc_count': 0,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 15505,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 15505,
u'key': u'dataone'},
{u'dates': {u'buckets': [{u'doc_count': 0,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 0,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 0,
u'key': u'arxiv_oai'},
{u'dates': {u'buckets': [{u'doc_count': 0,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 0,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 0,
u'key': u'asu'}],
u'doc_count_error_upper_bound': 0,
u'sum_other_doc_count': 0}},
{u'doc_count': 14826,
u'key': u'county',
u'source': {u'buckets': [{u'dates': {u'buckets': [{u'doc_count': 0,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 14825,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 14825,
u'key': u'dataone'},
{u'dates': {u'buckets': [{u'doc_count': 1,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 0,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 1,
u'key': u'clinicaltrials'},
{u'dates': {u'buckets': [{u'doc_count': 0,
u'key': 1388534400000,
u'key_as_string': u'2014-01-01'},
{u'doc_count': 0,
u'key': 1420070400000,
u'key_as_string': u'2015-01-01'}]},
u'doc_count': 0,
u'key': u'arxiv_oai'}],
u'doc_count_error_upper_bound': 0,
u'sum_other_doc_count': 0}}],
u'doc_count_error_upper_bound': 5860,
u'sum_other_doc_count': 706643}
}})
my_search.execute = Mock(return_value=search_mock)
my_results = my_search.execute()
my_dataframe = convert_nested_to_dataframe(my_results.aggregations)
assert my_dataframe.shape == (18, 4)
for tag_buckets in my_results.aggregations.tags.buckets:
assert tag_buckets.key in my_dataframe['tags'].values.tolist()
for source_buckets in tag_buckets.source.buckets:
assert source_buckets.key in my_dataframe['source'].values.tolist()
for dates_buckets in source_buckets.dates.buckets:
assert dates_buckets.dates in my_dataframe['dates'].values.tolist()
def test_convert_nested_to_dataframe_raise_ValueError():
return
#FIXME currently this search breaks sharepa, no sure why, but needed to raise the value error
my_search = ShareSearch() # BASE_URL='https://staging.osf.io/api/v1/share/search/')
# first we test crossed data
my_search.aggs.bucket(
'tags', # Every aggregation needs a name
'terms',
field='tags',
size=3,
min_doc_count=0,
).bucket(
'source',
'terms',
field='source',
size=3,
min_doc_count=0
).bucket(
'tags2',
'terms',
field='tags',
size=10,
min_doc_count=0
).bucket(
'dates',
'date_histogram',
field='providerUpdatedDateTime',
interval='1y',
format='yyyy-MM-dd',
extended_bounds={
"min": "2014-01-01",
"max": "2015-01-01"},
min_doc_count=0
)
#TODO create Mock return object for my_search.execute() here
my_results = my_search.execute()
my_dataframe = convert_nested_to_dataframe(my_results.aggregations)
print(my_dataframe)
|
|
'''
Selection tests
===============
'''
import unittest
from kivy.uix.widget import Widget
from kivy.uix.listview import ListView, ListItemButton
from kivy.properties import NumericProperty, StringProperty
from kivy.adapters.listadapter import ListAdapter
from kivy.adapters.dictadapter import DictAdapter
from kivy.adapters.models import SelectableDataItem
# The following integers_dict and fruit categories / fruit data dictionaries
# are from kivy/examples/widgets/lists/fixtures.py, and the classes are from
# examples there.
# ----------------------------------------------------------------------------
# A dictionary of dicts, with only the minimum required is_selected attribute,
# for use with examples using a simple list of integers in a list view.
integers_dict = \
{str(i): {'text': str(i), 'is_selected': False} for i in range(100)}
# ----------------------------------------------------------------------------
# A dataset of fruit category and fruit data for use in examples.
#
# Data from http://www.fda.gov/Food/LabelingNutrition/\
# FoodLabelingGuidanceRegulatoryInformation/\
# InformationforRestaurantsRetailEstablishments/\
# ucm063482.htm
#
fruit_categories = \
{'Melons': {'name': 'Melons',
'fruits': ['Cantaloupe', 'Honeydew', 'Watermelon'],
'is_selected': False},
'Tree Fruits': {'name': 'Tree Fruits',
'fruits': ['Apple', 'Avocado', 'Banana', 'Nectarine',
'Peach', 'Pear', 'Pineapple', 'Plum',
'Cherry'],
'is_selected': False},
'Citrus Fruits': {'name': 'Citrus Fruits',
'fruits': ['Grapefruit', 'Lemon', 'Lime', 'Orange',
'Tangerine'],
'is_selected': False},
'Other Fruits': {'name': 'Other Fruits',
'fruits': ['Grape', 'Kiwifruit',
'Strawberry'],
'is_selected': False}}
fruit_data_list_of_dicts = \
[{'name':'Apple',
'Serving Size': '1 large (242 g/8 oz)',
'data': [130, 0, 0, 0, 0, 0, 260, 7, 34, 11, 5, 20, 25, 1, 2, 8, 2, 2],
'is_selected': False},
{'name':'Avocado',
'Serving Size': '1/5 medium (30 g/1.1 oz)',
'data': [50, 35, 4.5, 7, 0, 0, 140, 4, 3, 1, 1, 4, 0, 1, 0, 4, 0, 2],
'is_selected': False},
{'name':'Banana',
'Serving Size': '1 medium (126 g/4.5 oz)',
'data': [110, 0, 0, 0, 0, 0, 450, 13, 30, 10, 3, 12, 19, 1, 2, 15, 0, 2],
'is_selected': False},
{'name':'Cantaloupe',
'Serving Size': '1/4 medium (134 g/4.8 oz)',
'data': [50, 0, 0, 0, 20, 1, 240, 7, 12, 4, 1, 4, 11, 1, 120, 80, 2, 2],
'is_selected': False},
{'name':'Grapefruit',
'Serving Size': '1/2 medium (154 g/5.5 oz)',
'data': [60, 0, 0, 0, 0, 0, 160, 5, 15, 5, 2, 8, 11, 1, 35, 100, 4, 0],
'is_selected': False},
{'name':'Grape',
'Serving Size': '3/4 cup (126 g/4.5 oz)',
'data': [90, 0, 0, 0, 15, 1, 240, 7, 23, 8, 1, 4, 20, 0, 0, 2, 2, 0],
'is_selected': False},
{'name':'Honeydew',
'Serving Size': '1/10 medium melon (134 g/4.8 oz)',
'data': [50, 0, 0, 0, 30, 1, 210, 6, 12, 4, 1, 4, 11, 1, 2, 45, 2, 2],
'is_selected': False},
{'name':'Kiwifruit',
'Serving Size': '2 medium (148 g/5.3 oz)',
'data': [90, 10, 1, 2, 0, 0, 450, 13, 20, 7, 4, 16, 13, 1, 2, 240, 4, 2],
'is_selected': False},
{'name':'Lemon',
'Serving Size': '1 medium (58 g/2.1 oz)',
'data': [15, 0, 0, 0, 0, 0, 75, 2, 5, 2, 2, 8, 2, 0, 0, 40, 2, 0],
'is_selected': False},
{'name':'Lime',
'Serving Size': '1 medium (67 g/2.4 oz)',
'data': [20, 0, 0, 0, 0, 0, 75, 2, 7, 2, 2, 8, 0, 0, 0, 35, 0, 0],
'is_selected': False},
{'name':'Nectarine',
'Serving Size': '1 medium (140 g/5.0 oz)',
'data': [60, 5, 0.5, 1, 0, 0, 250, 7, 15, 5, 2, 8, 11, 1, 8, 15, 0, 2],
'is_selected': False},
{'name':'Orange',
'Serving Size': '1 medium (154 g/5.5 oz)',
'data': [80, 0, 0, 0, 0, 0, 250, 7, 19, 6, 3, 12, 14, 1, 2, 130, 6, 0],
'is_selected': False},
{'name':'Peach',
'Serving Size': '1 medium (147 g/5.3 oz)',
'data': [60, 0, 0.5, 1, 0, 0, 230, 7, 15, 5, 2, 8, 13, 1, 6, 15, 0, 2],
'is_selected': False},
{'name':'Pear',
'Serving Size': '1 medium (166 g/5.9 oz)',
'data': [100, 0, 0, 0, 0, 0, 190, 5, 26, 9, 6, 24, 16, 1, 0, 10, 2, 0],
'is_selected': False},
{'name':'Pineapple',
'Serving Size': '2 slices, 3" diameter, 3/4" thick (112 g/4 oz)',
'data': [50, 0, 0, 0, 10, 0, 120, 3, 13, 4, 1, 4, 10, 1, 2, 50, 2, 2],
'is_selected': False},
{'name':'Plum',
'Serving Size': '2 medium (151 g/5.4 oz)',
'data': [70, 0, 0, 0, 0, 0, 230, 7, 19, 6, 2, 8, 16, 1, 8, 10, 0, 2],
'is_selected': False},
{'name':'Strawberry',
'Serving Size': '8 medium (147 g/5.3 oz)',
'data': [50, 0, 0, 0, 0, 0, 170, 5, 11, 4, 2, 8, 8, 1, 0, 160, 2, 2],
'is_selected': False},
{'name':'Cherry',
'Serving Size': '21 cherries; 1 cup (140 g/5.0 oz)',
'data': [100, 0, 0, 0, 0, 0, 350, 10, 26, 9, 1, 4, 16, 1, 2, 15, 2, 2],
'is_selected': False},
{'name':'Tangerine',
'Serving Size': '1 medium (109 g/3.9 oz)',
'data': [50, 0, 0, 0, 0, 0, 160, 5, 13, 4, 2, 8, 9, 1, 6, 45, 4, 0],
'is_selected': False},
{'name':'Watermelon',
'Serving Size': '1/18 medium melon; 2 cups diced pieces (280 g/10.0 oz)',
'data': [80, 0, 0, 0, 0, 0, 270, 8, 21, 7, 1, 4, 20, 1, 30, 25, 2, 4],
'is_selected': False}]
fruit_data_attributes = ['(gram weight/ ounce weight)',
'Calories',
'Calories from Fat',
'Total Fat',
'Sodium',
'Potassium',
'Total Carbo-hydrate',
'Dietary Fiber',
'Sugars',
'Protein',
'Vitamin A',
'Vitamin C',
'Calcium',
'Iron']
fruit_data_attribute_units = ['(g)',
'(%DV)',
'(mg)',
'(%DV)',
'(mg)',
'(%DV)',
'(g)',
'(%DV)',
'(g)(%DV)',
'(g)',
'(g)',
'(%DV)',
'(%DV)',
'(%DV)',
'(%DV)']
attributes_and_units = \
dict(list(zip(fruit_data_attributes, fruit_data_attribute_units)))
fruit_data = {}
for fruit_record in fruit_data_list_of_dicts:
fruit_data[fruit_record['name']] = {}
fruit_data[fruit_record['name']] = \
dict({'name': fruit_record['name'],
'Serving Size': fruit_record['Serving Size'],
'is_selected': fruit_record['is_selected']},
**dict(list(zip(list(attributes_and_units.keys()),
fruit_record['data']))))
class CategoryItem(SelectableDataItem):
def __init__(self, is_selected=False, fruits=None, name='', **kwargs):
super(CategoryItem, self).__init__(is_selected=is_selected, **kwargs)
self.name = name
self.fruits = fruits if fruits is not None else []
self.is_selected = is_selected
class FruitItem(SelectableDataItem):
def __init__(self, is_selected=False, data=None, name='', **kwargs):
self.serving_size = kwargs.pop('Serving Size', '')
super(FruitItem, self).__init__(is_selected=is_selected, **kwargs)
self.name = name
self.data = data if data is not None else data
self.is_selected = is_selected
def reset_to_defaults(data):
if type(data) is 'dict':
for key in data:
data[key]['is_selected'] = False
elif type(data) is 'list':
for obj in data:
obj.is_selected = False
category_data_items = \
[CategoryItem(**fruit_categories[c]) for c in sorted(fruit_categories)]
fruit_data_items = \
[FruitItem(**fruit_dict) for fruit_dict in fruit_data_list_of_dicts]
class FruitSelectionObserver(Widget):
fruit_name = StringProperty('')
call_count = NumericProperty(0)
def on_selection_change(self, list_adapter, *args):
if len(list_adapter.selection) > 0:
self.fruit_name = list_adapter.selection[0].text
self.call_count += 1
class FruitsDictAdapter(DictAdapter):
def fruit_category_changed(self, fruit_categories_adapter, *args):
if len(fruit_categories_adapter.selection) == 0:
self.data = {}
return
category = \
fruit_categories[str(fruit_categories_adapter.selection[0])]
self.sorted_keys = category['fruits']
class ListAdapterTestCase(unittest.TestCase):
def setUp(self):
self.args_converter = \
lambda row_index, selectable: {'text': selectable.name,
'size_hint_y': None,
'height': 25}
reset_to_defaults(category_data_items)
reset_to_defaults(fruit_data_items)
reset_to_defaults(fruit_categories)
reset_to_defaults(fruit_data)
def test_list_adapter_selection_mode_none(self):
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=self.args_converter,
selection_mode='none',
allow_empty_selection=True,
cls=ListItemButton)
self.assertEqual(sorted([obj.name for obj in list_adapter.data]),
['Apple', 'Avocado', 'Banana', 'Cantaloupe', 'Cherry', 'Grape',
'Grapefruit', 'Honeydew', 'Kiwifruit', 'Lemon', 'Lime',
'Nectarine', 'Orange', 'Peach', 'Pear', 'Pineapple', 'Plum',
'Strawberry', 'Tangerine', 'Watermelon'])
# The reason why len(selection) == 0 here is because it is ListView,
# at the end of its __init__(), that calls check_for_empty_selection()
# and triggers the initial selection, and we didn't make a ListView.
self.assertEqual(len(list_adapter.selection), 0)
list_adapter.check_for_empty_selection()
self.assertEqual(len(list_adapter.selection), 0)
def test_list_adapter_selection_mode_single(self):
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=self.args_converter,
selection_mode='single',
propagate_selection_to_data=True,
allow_empty_selection=True,
cls=ListItemButton)
list_view = ListView(adapter=list_adapter)
# The reason why len(selection) == 0 here is because ListView,
# at the end of its __init__(), calls check_for_empty_selection()
# and does NOT trigger the initial selection, because we set
# allow_empty_selection = True.
self.assertEqual(len(list_adapter.selection), 0)
list_adapter.check_for_empty_selection()
# Nothing should have changed by that call, because still we have
# allow_empty_selection = True, so no action in that check.
self.assertEqual(len(list_adapter.selection), 0)
# Still no selection, but triggering a selection should make len = 1.
# So, first we need to select the associated data item.
self.assertEqual(fruit_data_items[0].name, 'Apple')
fruit_data_items[0].is_selected = True
apple = list_view.adapter.get_view(0)
self.assertEqual(apple.text, 'Apple')
self.assertTrue(apple.is_selected)
self.assertEqual(len(list_adapter.selection), 1)
def test_list_adapter_selection_mode_single_auto_selection(self):
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=self.args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
list_view = ListView(adapter=list_adapter)
# The reason why len(selection) == 1 here is because ListView,
# at the end of its __init__(), calls check_for_empty_selection()
# and triggers the initial selection, because allow_empty_selection is
# False.
apple = list_view.adapter.cached_views[0]
self.assertEqual(list_adapter.selection[0], apple)
self.assertEqual(len(list_adapter.selection), 1)
list_adapter.check_for_empty_selection()
# Nothing should have changed for len, as we already have a selection.
self.assertEqual(len(list_adapter.selection), 1)
def test_list_adapter_selection_mode_multiple_auto_selection(self):
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=self.args_converter,
selection_mode='multiple',
propagate_selection_to_data=True,
allow_empty_selection=False,
cls=ListItemButton)
list_view = ListView(adapter=list_adapter)
# The reason why len(selection) == 1 here is because ListView,
# at the end of its __init__(), calls check_for_empty_selection()
# and triggers the initial selection, because allow_empty_selection is
# False.
self.assertEqual(len(list_adapter.selection), 1)
apple = list_adapter.selection[0]
self.assertEqual(apple.text, 'Apple')
# Add Avocado to the selection, doing necessary steps on data first.
self.assertEqual(fruit_data_items[1].name, 'Avocado')
fruit_data_items[1].is_selected = True
avocado = list_view.adapter.get_view(1) # does selection
self.assertEqual(avocado.text, 'Avocado')
self.assertEqual(len(list_adapter.selection), 2)
# Re-selection of the same item should decrease the len by 1.
list_adapter.handle_selection(avocado)
self.assertEqual(len(list_adapter.selection), 1)
# And now only apple should be in selection.
self.assertEqual(list_adapter.selection, [apple])
# Selection of several different items should increment len,
# because we have selection_mode as multiple.
#
# avocado has been unselected. Select it again.
list_adapter.handle_selection(avocado)
self.assertEqual(len(list_adapter.selection), 2)
self.assertEqual(list_adapter.selection, [apple, avocado])
# And select some different ones.
self.assertEqual(fruit_data_items[2].name, 'Banana')
fruit_data_items[2].is_selected = True
banana = list_view.adapter.get_view(2) # does selection
self.assertEqual(list_adapter.selection, [apple, avocado, banana])
self.assertEqual(len(list_adapter.selection), 3)
def test_list_adapter_selection_mode_multiple_and_limited(self):
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=self.args_converter,
selection_mode='multiple',
propagate_selection_to_data=True,
selection_limit=3,
allow_empty_selection=True,
cls=ListItemButton)
list_view = ListView(adapter=list_adapter)
# Selection should be limited to 3 items, because selection_limit = 3.
for i in range(5):
# Add item to the selection, doing necessary steps on data first.
fruit_data_items[i].is_selected = True
list_view.adapter.get_view(i) # does selection
self.assertEqual(len(list_adapter.selection),
i + 1 if i < 3 else 3)
def test_list_adapter_selection_handle_selection(self):
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=self.args_converter,
selection_mode='single',
propagate_selection_to_data=True,
allow_empty_selection=False,
cls=ListItemButton)
selection_observer = FruitSelectionObserver()
list_adapter.bind(
on_selection_change=selection_observer.on_selection_change)
list_view = ListView(adapter=list_adapter)
self.assertEqual(selection_observer.call_count, 0)
# From the check for initial selection, we should have apple selected.
self.assertEqual(list_adapter.selection[0].text, 'Apple')
self.assertEqual(len(list_adapter.selection), 1)
# Go through the tests routine to trigger selection of banana.
# (See notes above about triggering selection in tests.)
self.assertEqual(fruit_data_items[2].name, 'Banana')
fruit_data_items[2].is_selected = True
banana = list_view.adapter.get_view(2) # does selection
self.assertTrue(banana.is_selected)
# Now unselect it with handle_selection().
list_adapter.handle_selection(banana)
self.assertFalse(banana.is_selected)
# But, since we have allow_empty_selection=False, Apple will be
# reselected.
self.assertEqual(selection_observer.fruit_name, 'Apple')
# Call count:
#
# Apple got selected initally (0), then unselected when Banana was
# selected (1). Then banana was unselected, causing reselection of
# Apple (3). len should be 1.
self.assertEqual(selection_observer.call_count, 3)
self.assertEqual(len(list_adapter.selection), 1)
class DictAdapterTestCase(unittest.TestCase):
def setUp(self):
self.args_converter = lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 25}
self.fruits = sorted(fruit_data.keys())
reset_to_defaults(fruit_categories)
reset_to_defaults(fruit_data)
def test_dict_adapter_selection_cascade(self):
# Categories of fruits:
#
categories = sorted(fruit_categories.keys())
categories_dict_adapter = \
DictAdapter(sorted_keys=categories,
data=fruit_categories,
args_converter=self.args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
fruit_categories_list_view = \
ListView(adapter=categories_dict_adapter,
size_hint=(.2, 1.0))
# Fruits, for a given category, are shown based on the fruit category
# selected in the first categories list above. The selected item in
# the first list is used as the key into a dict of lists of list
# items to reset the data in FruitsDictAdapter's
# fruit_category_changed() method.
#
# data is initially set to the first list of list items.
#
fruits_dict_adapter = \
FruitsDictAdapter(
sorted_keys=fruit_categories[categories[0]]['fruits'],
data=fruit_data,
args_converter=self.args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
categories_dict_adapter.bind(
on_selection_change=fruits_dict_adapter.fruit_category_changed)
fruits_list_view = ListView(adapter=fruits_dict_adapter,
size_hint=(.2, 1.0))
# List views should have adapters set.
self.assertEqual(fruit_categories_list_view.adapter,
categories_dict_adapter)
self.assertEqual(fruits_list_view.adapter, fruits_dict_adapter)
# Each list adapter has allow_empty_selection=False, so each should
# have one selected item.
self.assertEqual(len(categories_dict_adapter.selection), 1)
self.assertEqual(len(fruits_dict_adapter.selection), 1)
# The selected list items should show is_selected True.
self.assertEqual(categories_dict_adapter.selection[0].is_selected,
True)
self.assertEqual(fruits_dict_adapter.selection[0].is_selected,
True)
# And they should be red, for background_color.
self.assertEqual(
categories_dict_adapter.selection[0].background_color,
[1.0, 0., 0., 1.0])
self.assertEqual(
fruits_dict_adapter.selection[0].background_color,
[1.0, 0., 0., 1.0])
|
|
"""
Load npy xy, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 14}
matplotlib.rc('font', **font)
import numpy as np
from datetime import timedelta
import datetime
import imp
import re
from textwrap import wrap
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/Rain_Land_Sea_Diurnal'
pp_file = 'avg.5216'
lon_max = 101.866
lon_min = 64.115
lat_max= 33.
lat_min=-6.79
trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s.npz" % (lat_min,lat_max, lon_min, lon_max)
#############
# Make own time x-axis
utc_to_local=datetime.timedelta(hours=5, minutes=30)
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30)+utc_to_local, datetime.datetime(2011, 8, 22, 6, 30)+utc_to_local, timedelta(hours=1))
formatter = matplotlib.dates.DateFormatter('%H:%M')
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
# experiment_ids_p = ['djznw', 'djzny', 'djznq', 'dklzq', 'dkmbq', 'dkjxq' ] # Most of Params
# experiment_ids_e = ['dklwu', 'dklyu', 'djzns', 'dkbhu', 'djznu', 'dkhgu'] # Most of Explicit
experiment_ids_p = ['dkmbq', 'dklzq' ]
experiment_ids_e = ['dklwu', 'dklyu', 'djznu']
#experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#plt.ion()
NUM_COLOURS = 15
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
# for ls in ['land', 'total']:
for ls in ['sea']:
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(111)
legendEntries=[]
legendtext=[]
plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file))
dates_trmm=[]
p=[]
for dp in plot_trmm['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
#print dates_trmm
a = np.argsort(dates_trmm,axis=0)
d_trmm = np.array(dates_trmm)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_trmm+utc_to_local, pl, label='TRMM', linewidth=2*1.5, linestyle='-', marker='', markersize=2, fmt='', color='#262626')
legendEntries.append(l)
legendtext.append('TRMM')
#land
#l0=plt.legend(legendEntries, legendtext,title='', frameon=False, loc=9, bbox_to_anchor=(0.31, 0,1, 1))
#sea
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, loc=9, bbox_to_anchor=(0, 0,1, 1))
# Change the legend label colors to almost black
texts = l0.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*2/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*6/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*8/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*10/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain.npy' % (top_dir, expmin1, experiment_id, pp_file, ls))
if (ls != 'total'):
l, = plt.plot_date(d, plotnp[0]*3600, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth*2.5, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
else:
l, = plt.plot_date(d, plotnp*3600, label=model_name_convert_legend.main(experiment_id), linewidth=linewidth*2.5, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
#Land
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, bbox_to_anchor=(0, 0,1, 1))
#Sea
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, bbox_to_anchor=(-0.255, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain.npy' % (top_dir, expmin1, experiment_id, pp_file, ls))
if (ls != 'total'):
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth*1.5, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
else:
l, = plt.plot_date(d, plotnp*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth*1.5, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
#Land
#l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(0.155, 0,1, 1))
#Sea
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(-0.1, 0,1, 1))
plt.gca().add_artist(l1)
plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (local)')
plt.ylabel('mm/h')
title="Domain Averaged Rainfall - %s" % ls
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= pp_file.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
datemin = d.min()
datemax = d.max()
ax.set_xlim(datemin, datemax)
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/'): os.makedirs('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/')
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_dkbhu_notitle_big_font_8and12km.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16, color='#262626')
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_dkbhu_big_font_8and12km.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Categorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
class Categorical(distribution.Distribution):
"""Categorical distribution.
The categorical distribution is parameterized by the log-probabilities
of a set of classes.
#### Examples
Creates a 3-class distiribution, with the 2nd class, the most likely to be
drawn from.
```python
p = [0.1, 0.5, 0.4]
dist = Categorical(probs=p)
```
Creates a 3-class distiribution, with the 2nd class the most likely to be
drawn from, using logits.
```python
logits = [-50, 400, 40]
dist = Categorical(logits=logits)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = Categorical(probs=p)
dist.prob(0) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts.
counts = [1, 0]
dist.prob(counts) # Shape [2]
# p will be broadcast to shape [3, 5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7, 3]
```
"""
def __init__(
self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Categorical"):
"""Initialize Categorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each class. Only one of
`logits` or `probs` should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[logits, probs]) as ns:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
multidimensional=True,
name=name)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
logits_shape = array_ops.shape(self._logits, name="logits_shape")
if logits_shape_static[-1].value is not None:
self._event_size = ops.convert_to_tensor(
logits_shape_static[-1].value,
dtype=dtypes.int32,
name="event_size")
else:
with ops.name_scope(name="event_size"):
self._event_size = logits_shape[self._batch_rank]
if logits_shape_static[:-1].is_fully_defined():
self._batch_shape_val = constant_op.constant(
logits_shape_static[:-1].as_list(),
dtype=dtypes.int32,
name="batch_shape")
else:
with ops.name_scope(name="batch_shape"):
self._batch_shape_val = logits_shape[:-1]
super(Categorical, self).__init__(
dtype=dtype,
is_continuous=False,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs],
name=ns)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of coordinatewise probabilities."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.identity(self._batch_shape_val)
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.event_size])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self.dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat([[n], self.batch_shape_tensor()], 0))
return ret
def _log_prob(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.logits.get_shape()[:-1] == k.get_shape():
logits = self.logits
else:
logits = self.logits * array_ops.ones_like(
array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
logits_shape = array_ops.shape(logits)[:-1]
k *= array_ops.ones(logits_shape, dtype=k.dtype)
k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
logits=logits)
def _prob(self, k):
return math_ops.exp(self._log_prob(k))
def _entropy(self):
return -math_ops.reduce_sum(
nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
def _mode(self):
ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.batch_shape)
return ret
@kullback_leibler.RegisterKL(Categorical, Categorical)
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Categorical.
Args:
a: instance of a Categorical distribution object.
b: instance of a Categorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_categorical_categorical",
values=[a.logits, b.logits]):
# sum(probs log(probs / (1 - probs)))
delta_log_probs1 = (nn_ops.log_softmax(a.logits) -
nn_ops.log_softmax(b.logits))
return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1,
axis=-1)
|
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import os
from chainer import serializers
from future.utils import with_metaclass
import numpy
import warnings
from chainerrl.misc.makedirs import makedirs
def load_npz_no_strict(filename, obj):
try:
serializers.load_npz(filename, obj)
except KeyError as e:
warnings.warn(repr(e))
with numpy.load(filename) as f:
d = serializers.NpzDeserializer(f, strict=False)
d.load(obj)
class Agent(with_metaclass(ABCMeta, object)):
"""Abstract agent class."""
@abstractmethod
def act_and_train(self, obs, reward):
"""Select an action for training.
Returns:
~object: action
"""
raise NotImplementedError()
@abstractmethod
def act(self, obs):
"""Select an action for evaluation.
Returns:
~object: action
"""
raise NotImplementedError()
@abstractmethod
def stop_episode_and_train(self, state, reward, done=False):
"""Observe consequences and prepare for a new episode.
Returns:
None
"""
raise NotImplementedError()
@abstractmethod
def stop_episode(self):
"""Prepare for a new episode.
Returns:
None
"""
raise NotImplementedError()
@abstractmethod
def save(self, dirname):
"""Save internal states.
Returns:
None
"""
pass
@abstractmethod
def load(self, dirname):
"""Load internal states.
Returns:
None
"""
pass
@abstractmethod
def get_statistics(self):
"""Get statistics of the agent.
Returns:
List of two-item tuples. The first item in a tuple is a str that
represents the name of item, while the second item is a value to be
recorded.
Example: [('average_loss': 0), ('average_value': 1), ...]
"""
pass
class AttributeSavingMixin(object):
"""Mixin that provides save and load functionalities."""
@abstractproperty
def saved_attributes(self):
"""Specify attribute names to save or load as a tuple of str."""
pass
def save(self, dirname):
"""Save internal states."""
self.__save(dirname, [])
def __save(self, dirname, ancestors):
makedirs(dirname, exist_ok=True)
ancestors.append(self)
for attr in self.saved_attributes:
assert hasattr(self, attr)
attr_value = getattr(self, attr)
if attr_value is None:
continue
if isinstance(attr_value, AttributeSavingMixin):
assert not any(
attr_value is ancestor
for ancestor in ancestors
), "Avoid an infinite loop"
attr_value.__save(os.path.join(dirname, attr), ancestors)
else:
serializers.save_npz(
os.path.join(dirname, '{}.npz'.format(attr)),
getattr(self, attr))
ancestors.pop()
def load(self, dirname):
"""Load internal states."""
self.__load(dirname, [])
def __load(self, dirname, ancestors):
ancestors.append(self)
for attr in self.saved_attributes:
assert hasattr(self, attr)
attr_value = getattr(self, attr)
if attr_value is None:
continue
if isinstance(attr_value, AttributeSavingMixin):
assert not any(
attr_value is ancestor
for ancestor in ancestors
), "Avoid an infinite loop"
attr_value.load(os.path.join(dirname, attr))
else:
"""Fix Chainer Issue #2772
In Chainer v2, a (stateful) optimizer cannot be loaded from
an npz saved before the first update.
"""
load_npz_no_strict(
os.path.join(dirname, '{}.npz'.format(attr)),
getattr(self, attr))
ancestors.pop()
class AsyncAgent(with_metaclass(ABCMeta, Agent)):
"""Abstract asynchronous agent class."""
@abstractproperty
def process_idx(self):
"""Index of process as integer, 0 for the representative process."""
pass
@abstractproperty
def shared_attributes(self):
"""Tuple of names of shared attributes."""
pass
class BatchAgent(with_metaclass(ABCMeta, Agent)):
"""Abstract agent class that can interact with a batch of envs."""
@abstractmethod
def batch_act(self, batch_obs):
"""Select a batch of actions for evaluation.
Args:
batch_obs (Sequence of ~object): Observations.
Returns:
Sequence of ~object: Actions.
"""
raise NotImplementedError()
@abstractmethod
def batch_act_and_train(self, batch_obs):
"""Select a batch of actions for training.
Args:
batch_obs (Sequence of ~object): Observations.
Returns:
Sequence of ~object: Actions.
"""
raise NotImplementedError()
@abstractmethod
def batch_observe(self, batch_obs, batch_reward, batch_done, batch_reset):
"""Observe a batch of action consequences for evaluation.
Args:
batch_obs (Sequence of ~object): Observations.
batch_reward (Sequence of float): Rewards.
batch_done (Sequence of boolean): Boolean values where True
indicates the current state is terminal.
batch_reset (Sequence of boolean): Boolean values where True
indicates the current episode will be reset, even if the
current state is not terminal.
Returns:
None
"""
raise NotImplementedError()
@abstractmethod
def batch_observe_and_train(
self, batch_obs, batch_reward, batch_done, batch_reset):
"""Observe a batch of action consequences for training.
Args:
batch_obs (Sequence of ~object): Observations.
batch_reward (Sequence of float): Rewards.
batch_done (Sequence of boolean): Boolean values where True
indicates the current state is terminal.
batch_reset (Sequence of boolean): Boolean values where True
indicates the current episode will be reset, even if the
current state is not terminal.
Returns:
None
"""
raise NotImplementedError()
|
|
##########################################################################
#
# Copyright (c) 2010-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import IECore
import IECoreHoudini
import unittest
import os
class TestToHoudiniPointsConverter( IECoreHoudini.TestCase ) :
__testScene = "test/converterTest.hip"
def points( self ) :
pData = IECore.V3fVectorData( [
IECore.V3f( 0, 1, 2 ), IECore.V3f( 1 ), IECore.V3f( 2 ), IECore.V3f( 3 ),
IECore.V3f( 4 ), IECore.V3f( 5 ), IECore.V3f( 6 ), IECore.V3f( 7 ),
IECore.V3f( 8 ), IECore.V3f( 9 ), IECore.V3f( 10 ), IECore.V3f( 11 ),
] )
points = IECore.PointsPrimitive( pData )
floatData = IECore.FloatData( 1.5 )
v2fData = IECore.V2fData( IECore.V2f( 1.5, 2.5 ) )
v3fData = IECore.V3fData( IECore.V3f( 1.5, 2.5, 3.5 ) )
v3fData = IECore.V3fData( IECore.V3f( 1.5, 2.5, 3.5 ) )
color3fData = IECore.Color3fData( IECore.Color3f( 1.5, 2.5, 3.5 ) )
intData = IECore.IntData( 1 )
v2iData = IECore.V2iData( IECore.V2i( 1, 2 ) )
v3iData = IECore.V3iData( IECore.V3i( 1, 2, 3 ) )
stringData = IECore.StringData( "this is a string" )
m33fData = IECore.M33fData( IECore.M33f(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0) )
m44fData = IECore.M44fData( IECore.M44f(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0) )
intRange = range( 1, 13 )
floatVectorData = IECore.FloatVectorData( [ x+0.5 for x in intRange ] )
v2fVectorData = IECore.V2fVectorData( [ IECore.V2f( x, x+0.5 ) for x in intRange ] )
v3fVectorData = IECore.V3fVectorData( [ IECore.V3f( x, x+0.5, x+0.75 ) for x in intRange ] )
color3fVectorData = IECore.Color3fVectorData( [ IECore.Color3f( x, x+0.5, x+0.75 ) for x in intRange ] )
quatVectorData = IECore.QuatfVectorData( [ IECore.Quatf( x, x+0.25, x+0.5, x+0.75 ) for x in intRange ] )
intVectorData = IECore.IntVectorData( intRange )
v2iVectorData = IECore.V2iVectorData( [ IECore.V2i( x, -x ) for x in intRange ] )
v3iVectorData = IECore.V3iVectorData( [ IECore.V3i( x, -x, x*2 ) for x in intRange ] )
stringVectorData = IECore.StringVectorData( [ "string number %06d!" % x for x in intRange ] )
m33fVectorData = IECore.M33fVectorData( [ IECore.M33f(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0) for x in intRange ] )
m44fVectorData = IECore.M44fVectorData( [ IECore.M44f(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0) for x in intRange ] )
detailInterpolation = IECore.PrimitiveVariable.Interpolation.Constant
uniformInterpolation = IECore.PrimitiveVariable.Interpolation.Uniform
pointInterpolation = IECore.PrimitiveVariable.Interpolation.Vertex
# add all valid detail attrib types
points["floatDetail"] = IECore.PrimitiveVariable( detailInterpolation, floatData )
points["v2fDetail"] = IECore.PrimitiveVariable( detailInterpolation, v2fData )
points["v3fDetail"] = IECore.PrimitiveVariable( detailInterpolation, v3fData )
points["color3fDetail"] = IECore.PrimitiveVariable( detailInterpolation, color3fData )
points["intDetail"] = IECore.PrimitiveVariable( detailInterpolation, intData )
points["v2iDetail"] = IECore.PrimitiveVariable( detailInterpolation, v2iData )
points["v3iDetail"] = IECore.PrimitiveVariable( detailInterpolation, v3iData )
points["stringDetail"] = IECore.PrimitiveVariable( detailInterpolation, stringData )
points["m33fDetail"] = IECore.PrimitiveVariable( detailInterpolation, m33fData )
points["m44fDetail"] = IECore.PrimitiveVariable( detailInterpolation, m44fData )
# add all valid prim attrib types
points["floatPrim"] = IECore.PrimitiveVariable( uniformInterpolation, floatVectorData[:1] )
points["v2fPrim"] = IECore.PrimitiveVariable( uniformInterpolation, v2fVectorData[:1] )
points["v3fPrim"] = IECore.PrimitiveVariable( uniformInterpolation, v3fVectorData[:1] )
points["color3fPrim"] = IECore.PrimitiveVariable( uniformInterpolation, color3fVectorData[:1] )
points["quatPrim"] = IECore.PrimitiveVariable( uniformInterpolation, quatVectorData[:1] )
points["intPrim"] = IECore.PrimitiveVariable( uniformInterpolation, intVectorData[:1] )
points["v2iPrim"] = IECore.PrimitiveVariable( uniformInterpolation, v2iVectorData[:1] )
points["v3iPrim"] = IECore.PrimitiveVariable( uniformInterpolation, v3iVectorData[:1] )
points["stringPrim"] = IECore.PrimitiveVariable( detailInterpolation, stringVectorData[:1] )
points["stringPrimIndices"] = IECore.PrimitiveVariable( uniformInterpolation, IECore.IntVectorData( [ 0 ] ) )
points["m33fPrim"] = IECore.PrimitiveVariable( uniformInterpolation, m33fVectorData[:1] )
points["m44fPrim"] = IECore.PrimitiveVariable( uniformInterpolation, m44fVectorData[:1] )
# add all valid point attrib types
points["floatPoint"] = IECore.PrimitiveVariable( pointInterpolation, floatVectorData )
points["v2fPoint"] = IECore.PrimitiveVariable( pointInterpolation, v2fVectorData )
points["v3fPoint"] = IECore.PrimitiveVariable( pointInterpolation, v3fVectorData )
points["color3fPoint"] = IECore.PrimitiveVariable( pointInterpolation, color3fVectorData )
points["quatPoint"] = IECore.PrimitiveVariable( pointInterpolation, quatVectorData )
points["intPoint"] = IECore.PrimitiveVariable( pointInterpolation, intVectorData )
points["v2iPoint"] = IECore.PrimitiveVariable( pointInterpolation, v2iVectorData )
points["v3iPoint"] = IECore.PrimitiveVariable( pointInterpolation, v3iVectorData )
points["stringPoint"] = IECore.PrimitiveVariable( detailInterpolation, stringVectorData )
points["stringPointIndices"] = IECore.PrimitiveVariable( pointInterpolation, IECore.IntVectorData( range( 0, 12 ) ) )
points["m33fPoint"] = IECore.PrimitiveVariable( pointInterpolation, m33fVectorData )
points["m44fPoint"] = IECore.PrimitiveVariable( pointInterpolation, m44fVectorData )
return points
def emptySop( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
null = geo.createNode( "null" )
return null
def pointsSop( self ) :
obj = hou.node( "/obj" )
geo = obj.createNode( "geo", run_init_scripts=False )
box = geo.createNode( "box" )
facet = box.createOutputNode( "facet" )
facet.parm( "postnml" ).set(True)
points = facet.createOutputNode( "scatter" )
return points
def comparePrimAndSop( self, prim, sop ) :
geo = sop.geometry()
for key in [ "floatDetail", "intDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
def toTuple( v ):
if isinstance( v, IECore.M33f ):
return (
v[(0,0)], v[(0,1)], v[(0,2)],
v[(1,0)], v[(1,1)], v[(1,2)],
v[(2,0)], v[(2,1)], v[(2,2)]
)
elif isinstance( v, IECore.M44f ):
return (
v[(0,0)], v[(0,1)], v[(0,2)], v[(0,3)],
v[(1,0)], v[(1,1)], v[(1,2)], v[(1,3)],
v[(2,0)], v[(2,1)], v[(2,2)], v[(2,3)],
v[(3,0)], v[(3,1)], v[(3,2)], v[(3,3)]
)
else:
return tuple( v )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail", "m33fDetail", "m44fDetail" ] :
self.assertEqual( toTuple(prim[key].data.value), geo.attribValue( key ) )
sopPrims = geo.prims()
for key in [ "floatPrim", "intPrim", "stringPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPrims[i].attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim", "m33fPrim", "m44fPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( toTuple(data[i]), sopPrims[i].attribValue( key ) )
self.assertEqual( geo.findPrimAttrib( "quatPrim" ).qualifier(), "Quaternion" )
data = prim["quatPrim"].data
for i in range( 0, data.size() ) :
components = ( data[i][1], data[i][2], data[i][3], data[i][0] )
self.assertEqual( components, sopPrims[i].attribValue( "quatPrim" ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrimIndices"].data
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPrims[i].attribValue( "stringPrim" ) )
sopPoints = geo.points()
for key in [ "floatPoint", "intPoint" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPoints[i].attribValue( key ) )
for key in [ "P", "v2fPoint", "v3fPoint", "color3fPoint", "v2iPoint", "v3iPoint", "m33fPoint", "m44fPoint" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
self.assertEqual( toTuple(data[i]), sopPoints[i].attribValue( key ) )
self.assertEqual( geo.findPointAttrib( "quatPoint" ).qualifier(), "Quaternion" )
data = prim["quatPoint"].data
for i in range( 0, data.size() ) :
components = ( data[i][1], data[i][2], data[i][3], data[i][0] )
self.assertEqual( components, sopPoints[i].attribValue( "quatPoint" ) )
data = prim["stringPoint"].data
dataIndices = prim["stringPointIndices"].data
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[i].attribValue( "stringPoint" ) )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.keys(), prim.keys() )
for key in prim.keys() :
if result[key] != prim[key]:
print result[key].interpolation, result[key].data, prim[key].interpolation, prim[key].data
self.assertEqual( result[key], prim[key] )
self.assertEqual( result, prim )
def comparePrimAndAppendedSop( self, prim, sop, origSopPrim, multipleConversions=0 ) :
geo = sop.geometry()
# verify detail attribs
for key in [ "floatDetail", "intDetail", "stringDetail" ] :
self.assertEqual( prim[key].data.value, geo.attribValue( key ) )
def toTuple( v ):
if isinstance( v, IECore.M33f ):
return (
v[(0,0)], v[(0,1)], v[(0,2)],
v[(1,0)], v[(1,1)], v[(1,2)],
v[(2,0)], v[(2,1)], v[(2,2)]
)
elif isinstance( v, IECore.M44f ):
return (
v[(0,0)], v[(0,1)], v[(0,2)], v[(0,3)],
v[(1,0)], v[(1,1)], v[(1,2)], v[(1,3)],
v[(2,0)], v[(2,1)], v[(2,2)], v[(2,3)],
v[(3,0)], v[(3,1)], v[(3,2)], v[(3,3)]
)
else:
return tuple( v )
for key in [ "v2fDetail", "v3fDetail", "color3fDetail", "v2iDetail", "v3iDetail", "m33fDetail", "m44fDetail" ] :
self.assertEqual( toTuple(prim[key].data.value), geo.attribValue( key ) )
# verify prim attribs
sopPrims = geo.prims()
numPrims = multipleConversions + 1
self.assertEqual( len(sopPrims), numPrims )
for key in [ "floatPrim", "intPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
for sopPrim in sopPrims :
self.assertEqual( data[i], sopPrim.attribValue( key ) )
for key in [ "v2fPrim", "v3fPrim", "color3fPrim", "v2iPrim", "v3iPrim", "m33fPrim", "m44fPrim" ] :
data = prim[key].data
for i in range( 0, data.size() ) :
for sopPrim in sopPrims :
self.assertEqual( toTuple(data[i]), sopPrim.attribValue( key ) )
data = prim["stringPrim"].data
dataIndices = prim["stringPrimIndices"].data
for i in range( 0, data.size() ) :
for sopPrim in sopPrims :
self.assertEqual( data[ dataIndices[i] ], sopPrim.attribValue( "stringPrim" ) )
# verify points attribs
sopPoints = geo.points()
self.assertEqual( len(sopPoints), origSopPrim.numPoints + prim.numPoints )
for key in [ "floatPoint", "intPoint" ] :
data = prim[key].data
if multipleConversions :
defaultValue = origSopPrim[key].data
else :
defaultValue = [ 0 ] * origSopPrim.numPoints
for i in range( 0, origSopPrim.numPoints ) :
self.assertEqual( defaultValue[i], sopPoints[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[i], sopPoints[ origSopPrim.numPoints + i ].attribValue( key ) )
for key in [ "P", "v2fPoint", "v3fPoint", "color3fPoint", "v2iPoint", "v3iPoint", "m33fPoint", "m44fPoint" ] :
data = prim[key].data
if multipleConversions or key is "P" :
defaultValue = origSopPrim[key].data
else :
d = data[0].dimensions()
if isinstance( d, tuple ):
defaultValue = [ [ 0 ] * d[0] * d[1] ] * origSopPrim.numPoints
else:
defaultValue = [ [ 0 ] * data[0].dimensions() ] * origSopPrim.numPoints
for i in range( 0, origSopPrim.numPoints ) :
self.assertEqual( toTuple(defaultValue[i]), sopPoints[ i ].attribValue( key ) )
for i in range( 0, data.size() ) :
self.assertEqual( toTuple(data[i]), sopPoints[ origSopPrim.numPoints + i ].attribValue( key ) )
data = prim["stringPoint"].data
dataIndices = prim["stringPointIndices"].data
if multipleConversions :
defaultData = origSopPrim["stringPoint"].data
defaultIndices = origSopPrim["stringPointIndices"].data
for i in range( 0, origSopPrim.numPoints ) :
val = "" if ( defaultIndices[i] >= defaultData.size() ) else defaultData[ defaultIndices[i] ]
self.assertEqual( val, sopPoints[ i ].attribValue( "stringPoint" ) )
else :
defaultValues = [ "" ] * origSopPrim.numPoints
for i in range( 0, origSopPrim.numPoints ) :
self.assertEqual( defaultValues[i], sopPoints[ i ].attribValue( "stringPoint" ) )
for i in range( 0, data.size() ) :
self.assertEqual( data[ dataIndices[i] ], sopPoints[ origSopPrim.numPoints + i ].attribValue( "stringPoint" ) )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
for key in prim.keys() :
# prim attribs don't make it through on multiple conversions because the interpolation size is incorrect
if not( multipleConversions and "Prim" in key ) :
self.assert_( key in result.keys() )
def testCreateConverter( self ) :
converter = IECoreHoudini.ToHoudiniPointsConverter( self.points() )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.ToHoudiniPointsConverter ) ) )
def testFactory( self ) :
converter = IECoreHoudini.ToHoudiniGeometryConverter.create( self.points() )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.ToHoudiniPointsConverter ) ) )
self.failUnless( IECore.TypeId.PointsPrimitive in IECoreHoudini.ToHoudiniGeometryConverter.supportedTypes() )
def testConversionIntoEmptySop( self ) :
points = self.points()
sop = self.emptySop()
self.assertNotEqual( IECoreHoudini.FromHoudiniPointsConverter( sop ).convert(), points )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop ) )
self.comparePrimAndSop( points, sop )
def testConversionIntoExistingSop( self ) :
points = self.points()
sop = self.pointsSop()
orig = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertNotEqual( orig, points )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop, False ) )
self.comparePrimAndSop( points, sop )
def testAppendingIntoExistingSop( self ) :
points = self.points()
sop = self.pointsSop()
orig = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertNotEqual( orig, points )
self.assert_( not sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( points, sop, orig )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
sop.setHardLocked( False )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints )
self.assert_( "floatDetail" not in result.keys() )
self.assert_( "floatPoint" not in result.keys() )
def testAppendingIntoLockedSop( self ) :
points = self.points()
sop = self.pointsSop()
orig = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertNotEqual( orig, points )
sop.setHardLocked( True )
self.assert_( sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( points, sop, orig )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
sop.setHardLocked( False )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints )
self.assert_( "floatDetail" not in result.keys() )
self.assert_( "floatPoint" not in result.keys() )
def testSaveLoad( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
points = self.points()
sop = self.pointsSop()
sopPath = sop.path()
orig = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertNotEqual( orig, points )
self.assert_( not sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( points, sop, orig )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
hou.hipFile.save( TestToHoudiniPointsConverter.__testScene )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( TestToHoudiniPointsConverter.__testScene )
newSop = hou.node( sopPath )
self.assert_( newSop.isHardLocked() )
self.comparePrimAndAppendedSop( points, newSop, orig )
result = IECoreHoudini.FromHoudiniPointsConverter( newSop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
def testSaveLoadWithLockedSop( self ) :
hou.hipFile.clear( suppress_save_prompt=True )
points = self.points()
sop = self.pointsSop()
sopPath = sop.path()
orig = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertNotEqual( orig, points )
sop.setHardLocked( True )
self.assert_( sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( points, sop, orig )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
hou.hipFile.save( TestToHoudiniPointsConverter.__testScene )
hou.hipFile.clear( suppress_save_prompt=True )
hou.hipFile.load( TestToHoudiniPointsConverter.__testScene )
newSop = hou.node( sopPath )
self.assert_( newSop.isHardLocked() )
self.comparePrimAndAppendedSop( points, newSop, orig )
result = IECoreHoudini.FromHoudiniPointsConverter( newSop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
def testMultipleConversions( self ) :
points = self.points()
sop = self.pointsSop()
orig = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertNotEqual( orig, points )
self.assert_( not sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( points, sop, orig )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
self.assert_( sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( points, sop, result, multipleConversions=1 )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + 2*points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
self.assertEqual( result["P"].data[ orig.numPoints + points.numPoints + i ], points["P"].data[i] )
self.assert_( sop.isHardLocked() )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop, True ) )
self.assert_( sop.isHardLocked() )
self.comparePrimAndAppendedSop( points, sop, result, multipleConversions=2 )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertEqual( result.numPoints, orig.numPoints + 3*points.numPoints )
for i in range( 0, points["P"].data.size() ) :
self.assertEqual( result["P"].data[ orig.numPoints + i ], points["P"].data[i] )
self.assertEqual( result["P"].data[ orig.numPoints + points.numPoints + i ], points["P"].data[i] )
self.assertEqual( result["P"].data[ orig.numPoints + 2*points.numPoints + i ], points["P"].data[i] )
def testObjectWasDeleted( self ) :
points = self.points()
sop = self.pointsSop()
converter = IECoreHoudini.ToHoudiniPointsConverter( points )
self.assert_( converter.convert( sop, False ) )
self.comparePrimAndSop( points, sop )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
del points
sop.setHardLocked( False )
self.assertNotEqual( IECoreHoudini.FromHoudiniPointsConverter( sop ).convert(), result )
self.assert_( converter.convert( sop, False ) )
self.assertEqual( IECoreHoudini.FromHoudiniPointsConverter( sop ).convert(), result )
def testWithUnacceptablePrimVars( self ) :
points = self.points()
points["badDetail"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.TransformationMatrixfData() )
points["badPoint"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.DoubleVectorData( [ 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5 ] ) )
sop = self.emptySop()
self.assertNotEqual( IECoreHoudini.FromHoudiniPointsConverter( sop ).convert(), points )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop ) )
self.assert_( "badDetail" not in [ x.name() for x in sop.geometry().globalAttribs() ] )
self.assert_( "badPoint" not in [ x.name() for x in sop.geometry().pointAttribs() ] )
result = IECoreHoudini.FromHoudiniPointsConverter( sop ).convert()
self.assertNotEqual( result, points )
self.assert_( "badDetail" not in result )
self.assert_( "badPoint" not in result )
del points["badDetail"]
del points["badPoint"]
self.comparePrimAndSop( points, sop )
def testConvertingOverExistingAttribs( self ) :
points = self.points()
sop = self.emptySop()
detailAttr = sop.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm( "name" ).set( "floatDetail" )
detailAttr.parm( "class" ).set( 0 ) # detail
detailAttr.parm( "type" ).set( 0 ) # float
detailAttr.parm( "size" ).set( 1 ) # 1 element
detailAttr.parm( "value1" ).set( 123.456 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm( "name" ).set( "floatPoint" )
pointAttr.parm( "class" ).set( 2 ) # point
pointAttr.parm( "type" ).set( 0 ) # float
pointAttr.parm( "size" ).set( 1 ) # 1 element
pointAttr.parm( "value1" ).set( 123.456 )
self.assertNotEqual( IECoreHoudini.FromHoudiniPointsConverter( pointAttr ).convert(), points )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( pointAttr ) )
self.comparePrimAndSop( points, pointAttr )
def testConvertingOverExistingAttribsWithDifferentTypes( self ) :
points = self.points()
sop = self.emptySop()
detailAttr = sop.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm( "name" ).set( "floatDetail" )
detailAttr.parm( "class" ).set( 0 ) # detail
detailAttr.parm( "type" ).set( 1 ) # int
detailAttr.parm( "size" ).set( 3 ) # 3 elements
detailAttr.parm( "value1" ).set( 10 )
detailAttr.parm( "value2" ).set( 11 )
detailAttr.parm( "value3" ).set( 12 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm( "name" ).set( "floatPoint" )
pointAttr.parm( "class" ).set( 2 ) # point
pointAttr.parm( "type" ).set( 1 ) # int
pointAttr.parm( "size" ).set( 3 ) # 3 elements
pointAttr.parm( "value1" ).set( 10 )
pointAttr.parm( "value2" ).set( 11 )
pointAttr.parm( "value3" ).set( 12 )
self.assertNotEqual( IECoreHoudini.FromHoudiniPointsConverter( pointAttr ).convert(), points )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( pointAttr ) )
self.comparePrimAndSop( points, pointAttr )
def testVertAttribsCantBeConverted( self ) :
points = self.points()
points["floatVert"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.FaceVarying, IECore.FloatVectorData( 1 ) )
sop = self.emptySop()
self.assertNotEqual( IECoreHoudini.FromHoudiniPointsConverter( sop ).convert(), points )
self.assert_( IECoreHoudini.ToHoudiniPointsConverter( points ).convert( sop ) )
allAttribs = [ x.name() for x in sop.geometry().globalAttribs() ]
allAttribs.extend( [ x.name() for x in sop.geometry().pointAttribs() ] )
allAttribs.extend( [ x.name() for x in sop.geometry().primAttribs() ] )
allAttribs.extend( [ x.name() for x in sop.geometry().vertexAttribs() ] )
self.assert_( "floatVert" not in allAttribs )
del points["floatVert"]
self.comparePrimAndSop( points, sop )
def testAttributeFilter( self ) :
points = self.points()
sop = self.emptySop()
converter = IECoreHoudini.ToHoudiniPointsConverter( points )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'color3fPoint', 'floatPoint', 'intPoint', 'm33fPoint', 'm44fPoint', 'quatPoint', 'stringPoint', 'v2fPoint', 'v2iPoint', 'v3fPoint', 'v3iPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'floatPrim', 'intPrim', 'm33fPrim', 'm44fPrim', 'quatPrim', 'stringPrim', 'v2fPrim', 'v2iPrim', 'v3fPrim', 'v3iPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), ['color3fDetail', 'floatDetail', 'intDetail', 'm33fDetail', 'm44fDetail', 'stringDetail', 'v2fDetail', 'v2iDetail', 'v3fDetail', 'v3iDetail'] )
converter.parameters()["attributeFilter"].setTypedValue( "P *3f*" )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'color3fPoint', 'm33fPoint', 'v3fPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), ['color3fPrim', 'm33fPrim', 'v3fPrim'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), ['color3fDetail', 'm33fDetail', 'v3fDetail'] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^*Detail ^int* ^*Prim" )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['P', 'Pw', 'color3fPoint', 'floatPoint', 'm33fPoint', 'm44fPoint', 'quatPoint', 'stringPoint', 'v2fPoint', 'v2iPoint', 'v3fPoint', 'v3iPoint'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
def testStandardAttributeConversion( self ) :
sop = self.emptySop()
points = IECore.PointsPrimitive( IECore.V3fVectorData( [ IECore.V3f( 1 ) ] * 10 ) )
points["Cs"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ IECore.V3f( 1, 0, 0 ) ] * 10, IECore.GeometricData.Interpretation.Color ) )
points["width"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] * 10 ) )
points["Pref"] = points["P"]
self.assertTrue( points.arePrimitiveVariablesValid() )
converter = IECoreHoudini.ToHoudiniPointsConverter( points )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['Cd', 'P', 'Pw', 'pscale', 'rest'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
converter["convertStandardAttributes"].setTypedValue( False )
self.assertTrue( converter.convert( sop ) )
self.assertEqual( sorted([ x.name() for x in sop.geometry().pointAttribs() ]), ['Cs', 'P', 'Pref', 'Pw', 'width'] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().primAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().vertexAttribs() ]), [] )
self.assertEqual( sorted([ x.name() for x in sop.geometry().globalAttribs() ]), [] )
def testName( self ) :
sop = self.emptySop()
points = self.points()
converter = IECoreHoudini.ToHoudiniPointsConverter( points )
# unnamed unless we set the parameter
self.assert_( converter.convert( sop ) )
geo = sop.geometry()
self.assertEqual( sop.geometry().findPrimAttrib( "name" ), None )
converter["name"].setTypedValue( "testPoints" )
self.assert_( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "testPoints" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "testPoints" ]), points.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) )
# blindData still works for backwards compatibility
points.blindData()["name"] = IECore.StringData( "blindPoints" )
converter = IECoreHoudini.ToHoudiniPointsConverter( points )
self.assert_( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "blindPoints" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "blindPoints" ]), points.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) )
# name parameter takes preference over blindData
converter["name"].setTypedValue( "testPoints" )
self.assert_( converter.convert( sop ) )
geo = sop.geometry()
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ "testPoints" ] ) )
self.assertEqual( len([ x for x in geo.prims() if x.attribValue( "name" ) == "testPoints" ]), points.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) )
def tearDown( self ) :
if os.path.isfile( TestToHoudiniPointsConverter.__testScene ) :
os.remove( TestToHoudiniPointsConverter.__testScene )
if __name__ == "__main__":
unittest.main()
|
|
from pythran.tests import TestEnv
from pythran.typing import *
class TestCopperhead(TestEnv):
# from copperhead test suite
# https://github.com/copperhead
def test_saxpy(self):
self.run_test(
"def saxpy(a, x, y): return list(map(lambda xi, yi: a * xi + yi, x, y))",
1.5, [1,2,3], [0.,2.,4.],
saxpy=[float, List[int], List[float]])
def test_saxpy2(self):
self.run_test(
"def saxpy2(a, x, y): return [a*xi+yi for xi,yi in zip(x,y)]",
1.5, [1,2,3], [0.,2.,4.],
saxpy2=[float,List[int], List[float]])
def test_saxpy3(self):
code="""
def saxpy3(a, x, y):
def triad(xi, yi): return a * xi + yi
return list(map(triad, x, y))
"""
self.run_test(
code,
1.5, [1,2,3], [0.,2.,4.],
saxpy3=[float,List[int], List[float]])
def test_saxpy4(self):
code="""
def saxpy4(a, x, y):
return manual(y,x,a)
def manual(y,x,a):
__list=list()
for __tuple in zip(y,x):
__list.append(__tuple[0]*a+__tuple[1])
return __list
"""
self.run_test(
code,
1.5, [1,2,3], [0.,2.,4.],
saxpy4=[float,List[int], List[float]])
def test_sxpy(self):
code="""
def sxpy(x, y):
def duad(xi, yi): return xi + yi
return list(map(duad, x, y))
"""
self.run_test(
code,
[1,2,3], [0.,2.,4.],
sxpy=[List[int], List[float]])
def test_incr(self):
self.run_test(
"def incr(x): return list(map(lambda xi: xi + 1, x))",
[0., 0., 0.],
incr=[List[float]])
def test_as_ones(self):
self.run_test(
"def as_ones(x): return list(map(lambda xi: 1, x))",
[0., 0., 0.],
as_ones=[List[float]])
def test_idm(self):
self.run_test(
"def idm(x): return list(map(lambda b: b, x))",
[1, 2, 3],
idm=[List[int]])
def test_incr_list(self):
self.run_test(
"def incr_list(x): return [xi + 1 for xi in x]",
[1., 2., 3.],
incr_list=[List[float]])
def test_idx(self):
code="""
def idx(x):
def id(xi): return xi
return list(map(id, x))"""
self.run_test(code, [1,2,3], idx=[List[int]])
def test_rbf(self):
code="""
from math import exp
def norm2_diff(x, y):
def el(xi, yi):
diff = xi - yi
return diff * diff
return sum(map(el, x, y))
def rbf(ngamma, x, y):
return exp(ngamma * norm2_diff(x,y))"""
self.run_test(
code,
2.3, [1,2,3], [1.1,1.2,1.3],
rbf=[float, List[int], List[float]])
# from copperhead-new/copperhead/prelude.py
def test_indices(self):
self.run_test(
"def indices(A):return list(range(len(A)))",
[1,2],
indices=[List[int]])
def test_gather(self):
self.run_test(
"def gather(x, indices): return [x[i] for i in indices]",
[1,2,3,4,5], [0,2,4],
gather=[List[int], List[int]])
def test_scatter(self):
code="""
def indices(x): return list(range(len(x)))
def scatter(src, indices_, dst):
assert len(src)==len(indices_)
result = list(dst)
for i in range(len(src)):
result[indices_[i]] = src[i]
return result
"""
self.run_test(
code,
[0.0,1.0,2.,3.,4.,5.,6.,7.,8.,9.],[5,6,7,8,9,0,1,2,3,4],[0,0,0,0,0,0,0,0,0,0,18],
scatter=[List[float], List[int], List[int]])
def test_scan(self):
code="""
def prefix(A): return scan(lambda x,y:x+y, A)
def scan(f, A):
B = list(A)
for i in range(1, len(B)):
B[i] = f(B[i-1], B[i])
return B
"""
self.run_test(code, [1.,2.,3.], prefix=[List[float]])
# from Copperhead: Compiling an Embedded Data Parallel Language
# by Bryan Catanzaro, Michael Garland and Kurt Keutzer
# http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-124.html
def test_spvv_csr(self):
code="""
def spvv_csr(x, cols, y):
def gather(x, indices): return [x[i] for i in indices]
z = gather(y, cols)
return sum(map(lambda a, b: a * b, x, z))
"""
self.run_test(code, [1,2,3],[0,1,2],[5.5,6.6,7.7], spvv_csr=[List[int], List[int], List[float]])
def test_spmv_csr(self):
code="""
def spvv_csr(x, cols, y):
def gather(x, indices): return [x[i] for i in indices]
z = gather(y, cols)
return sum(map(lambda a, b: a * b, x, z))
def spmv_csr(Ax, Aj, x):
return list(map(lambda y, cols: spvv_csr(y, cols, x), Ax, Aj))
"""
self.run_test(code, [[0,1,2],[0,1,2],[0,1,2]],[[0,1,2],[0,1,2],[0,1,2]],[0,1,2], spmv_csr=[List[List[int]], List[List[int]], List[int]])
def test_spmv_ell(self):
code="""
def indices(x): return range(len(x))
def spmv_ell(data, idx, x):
def kernel(i):
return sum(map(lambda Aj, J: Aj[i] * x[J[i]], data, idx))
return list(map(kernel, indices(x)))
"""
self.run_test(code, [[0,1,2],[0,1,2],[0,1,2]],[[0,1,2],[0,1,2],[0,1,2]],[0,1,2], spmv_ell=[List[List[int]], List[List[int]], List[int]])
def test_vadd(self):
self.run_test("def vadd(x, y): return list(map(lambda a, b: a + b, x, y))", [0.,1.,2.],[5.,6.,7.], vadd=[List[float], List[float]])
def test_vmul(self):
self.run_test("def vmul(x, y): return list(map(lambda a, b: a * b, x, y))", [0.,1.,2.],[5.,6.,7.], vmul=[List[float], List[float]])
def test_form_preconditioner(self):
code="""
def vadd(x, y): return list(map(lambda a, b: a + b, x, y))
def vmul(x, y): return list(map(lambda a, b: a * b, x, y))
def form_preconditioner(a, b, c):
def det_inverse(ai, bi, ci):
return 1.0/(ai * ci - bi * bi)
indets = list(map(det_inverse, a, b, c))
p_a = vmul(indets, c)
p_b = list(map(lambda a, b: -a * b, indets, b))
p_c = vmul(indets, a)
return p_a, p_b, p_c
"""
self.run_test(code, [1,2,3],[0,1,2],[5.5,6.6,7.7],form_preconditioner=[List[int], List[int], List[float]])
def test_precondition(self):
code="""
def precondition(u, v, p_a, p_b, p_c):
def vadd(x, y): return map(lambda a, b: a + b, x, y)
def vmul(x, y): return map(lambda a, b: a * b, x, y)
e = vadd(vmul(p_a, u), vmul(p_b, v))
f = vadd(vmul(p_b, u), vmul(p_c, v))
return list(e), list(f)
"""
self.run_test(code, [1,2,3], [5.5,6.6,7.7],[1,2,3], [5.5,6.6,7.7],[8.8,9.9,10.10], precondition=[List[int], List[float], List[int], List[float], List[float]])
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkPeeringsOperations(object):
"""VirtualNetworkPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkPeering"
"""Gets the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.VirtualNetworkPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "_models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "_models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkPeering"]
"""Creates or updates a peering in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the peering.
:type virtual_network_peering_name: str
:param virtual_network_peering_parameters: Parameters supplied to the create or update virtual
network peering operation.
:type virtual_network_peering_parameters: ~azure.mgmt.network.v2018_11_01.models.VirtualNetworkPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
virtual_network_peering_parameters=virtual_network_peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkPeeringListResult"]
"""Gets all virtual network peerings in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings'} # type: ignore
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.adcpt_m_wvs
@file marine-integrations/mi/dataset/parser/adcpt_m_wvs.py
@author Ronald Ronquillo
@brief Parser for the adcpt_m_wvs dataset driver
This file contains code for the adcpt_m_wvs parser and code to produce data particles.
The wave record structure is an extensible, packed, binary, data format that contains the
processed results of a single burst of ADCP wave data. A burst is typically 20 minutes
of data sampled at 2 Hz. Wave Records are appended together into a file that represents a
deployment time. The wave record usually contains the wave height spectra, directional
spectra, wave parameters, and information about how the data was collected/processed. A
wave record can also contain raw time-series from pressure sensor, surface track and
orbital velocity measurements.
The waves record file (*.WVS) is in binary (HID:7F7A) format.
This format is similar to that of the binary PD0 recovered format.
The data is divided into files of ~20MB size.
Release notes:
Initial Release
"""
__author__ = 'Ronald Ronquillo'
__license__ = 'Apache 2.0'
import calendar
import numpy
import re
import os
import struct
from mi.core.exceptions import RecoverableSampleException, SampleEncodingException
from mi.dataset.dataset_parser import BufferLoadingParser
from mi.core.common import BaseEnum
from mi.core.instrument.dataset_data_particle import DataParticle, DataParticleKey
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.parser.common_regexes import UNSIGNED_INT_REGEX
#Data Type IDs
HEADER = '\x7f\x7a' # Dec: 31359
FIXED_LEADER = 1
VARIABLE_LEADER = 2
VELOCITY_TIME_SERIES = 3
AMPLITUDE_TIME_SERIES = 4
SURFACE_TIME_SERIES = 5
PRESSURE_TIME_SERIES = 6
VELOCITY_SPECTRUM = 7
SURFACE_TRACK_SPECTRUM = 8
PRESSURE_SPECTRUM = 9
DIRECTIONAL_SPECTRUM = 10
WAVE_PARAMETERS = 11
WAVE_PARAMETERS2 = 12
SURFACE_DIR_SPECTRUM = 13
HEADING_PITCH_ROLL_TIME_SERIES = 14
BOTTOM_VELOCITY_TIME_SERIES = 15
ALTITUDE_TIME_SERIES = 16
UNKNOWN = 17
# The particle Data Type ID's that must be filled in a particle
EXPECTED_PARTICLE_IDS_SET = frozenset(
[FIXED_LEADER,
VARIABLE_LEADER,
VELOCITY_SPECTRUM,
SURFACE_TRACK_SPECTRUM,
PRESSURE_SPECTRUM,
DIRECTIONAL_SPECTRUM,
WAVE_PARAMETERS,
HEADING_PITCH_ROLL_TIME_SERIES])
class AdcptMWVSParticleKey(BaseEnum):
"""
Class that defines fields that need to be extracted from the data
"""
FILE_TIME = "file_time"
SEQUENCE_NUMBER = "sequence_number"
FILE_MODE = "file_mode"
REC_TIME_SERIES = "rec_time_series"
REC_SPECTRA = "rec_spectra"
REC_DIR_SPEC = "rec_dir_spec"
SAMPLES_PER_BURST = "samples_per_burst"
TIME_BETWEEN_SAMPLES = "time_between_samples"
TIME_BETWEEN_BURSTS_SEC = "time_between_bursts_sec"
BIN_SIZE = "bin_size"
BIN_1_MIDDLE = "bin_1_middle"
NUM_RANGE_BINS = "num_range_bins"
NUM_VEL_BINS = "num_vel_bins"
NUM_INT_BINS = "num_int_bins"
NUM_BEAMS = "num_beams"
BEAM_CONF = "beam_conf"
WAVE_PARAM_SOURCE = "wave_param_source"
NFFT_SAMPLES = "nfft_samples"
NUM_DIRECTIONAL_SLICES = "num_directional_slices"
NUM_FREQ_BINS = "num_freq_bins"
WINDOW_TYPE = "window_type"
USE_PRESS_4_DEPTH = "use_press_4_depth"
USE_STRACK_4_DEPTH = "use_strack_4_depth"
STRACK_SPEC = "strack_spec"
PRESS_SPEC = "press_spec"
VEL_MIN = "vel_min"
VEL_MAX = "vel_max"
VEL_STD = "vel_std"
VEL_MAX_CHANGE = "vel_max_change"
VEL_PCT_GD = "vel_pct_gd"
SURF_MIN = "surf_min"
SURF_MAX = "surf_max"
SURF_STD = "surf_std"
SURF_MAX_CHNG = "surf_max_chng"
SURF_PCT_GD = "surf_pct_gd"
TBE_MAX_DEV = "tbe_max_dev"
H_MAX_DEV = "h_max_dev"
PR_MAX_DEV = "pr_max_dev"
NOM_DEPTH = "nom_depth"
CAL_PRESS = "cal_press"
DEPTH_OFFSET = "depth_offset"
CURRENTS = "currents"
SMALL_WAVE_FREQ = "small_wave_freq"
SMALL_WAVE_THRESH = "small_wave_thresh"
TILTS = "tilts"
FIXED_PITCH = "fixed_pitch"
FIXED_ROLL = "fixed_roll"
BOTTOM_SLOPE_X = "bottom_slope_x"
BOTTOM_SLOPE_Y = "bottom_slope_y"
DOWN = "down"
TRANS_V2_SURF = "trans_v2_surf"
SCALE_SPEC = "scale_spec"
SAMPLE_RATE = "sample_rate"
FREQ_THRESH = "freq_thresh"
DUMMY_SURF = "dummy_surf"
REMOVE_BIAS = "remove_bias"
DIR_CUTOFF = "dir_cutoff"
HEADING_VARIATION = "heading_variation"
SOFT_REV = "soft_rev"
CLIP_PWR_SPEC = "clip_pwr_spec"
DIR_P2 = "dir_p2"
HORIZONTAL = "horizontal"
START_TIME = "start_time"
STOP_TIME = "stop_time"
FREQ_LO = "freq_lo"
AVERAGE_DEPTH = "average_depth"
ALTITUDE = "altitude"
BIN_MAP = "bin_map"
DISC_FLAG = "disc_flag"
PCT_GD_PRESS = "pct_gd_press"
AVG_SS = "avg_ss"
AVG_TEMP = "avg_temp"
PCT_GD_SURF = "pct_gd_surf"
PCT_GD_VEL = "pct_gd_vel"
HEADING_OFFSET = "heading_offset"
HS_STD = "hs_std"
VS_STD = "vs_std"
PS_STD = "ps_std"
DS_FREQ_HI = "ds_freq_hi"
VS_FREQ_HI = "vs_freq_hi"
PS_FREQ_HI = "ps_freq_hi"
SS_FREQ_HI = "ss_freq_hi"
X_VEL = "x_vel"
Y_VEL = "y_vel"
AVG_PITCH = "avg_pitch"
AVG_ROLL = "avg_roll"
AVG_HEADING = "avg_heading"
SAMPLES_COLLECTED = "samples_collected"
VSPEC_PCT_MEASURED = "vspec_pct_measured"
VSPEC_NUM_FREQ = "vspec_num_freq"
VSPEC_DAT = "vspec_dat"
SSPEC_NUM_FREQ = "sspec_num_freq"
SSPEC_DAT = "sspec_dat"
PSPEC_NUM_FREQ = "pspec_num_freq"
PSPEC_DAT = "pspec_dat"
DSPEC_NUM_FREQ = "dspec_num_freq"
DSPEC_NUM_DIR = "dspec_num_dir"
DSPEC_GOOD = "dspec_good"
DSPEC_DAT = "dspec_dat"
WAVE_HS1 = "wave_hs1"
WAVE_TP1 = "wave_tp1"
WAVE_DP1 = "wave_dp1"
WAVE_HS2 = "wave_hs2"
WAVE_TP2 = "wave_tp2"
WAVE_DP2 = "wave_dp2"
WAVE_DM = "wave_dm"
HPR_NUM_SAMPLES = "hpr_num_samples"
BEAM_ANGLE = "beam_angle"
HEADING_TIME_SERIES = "heading_time_series"
PITCH_TIME_SERIES = "pitch_time_series"
ROLL_TIME_SERIES = "roll_time_series"
SPARE = "spare"
# Basic patterns
common_matches = {
'UINT': UNSIGNED_INT_REGEX,
'HEADER': HEADER
}
common_matches.update(AdcptMWVSParticleKey.__dict__)
# Regex to extract just the timestamp from the WVS log file name
# (path/to/CE01ISSM-ADCPT_YYYYMMDD_###_TS.WVS)
# 'CE01ISSM-ADCPT_20140418_000_TS1404180021.WVS'
# 'CE01ISSM-ADCPT_20140418_000_TS1404180021 - excerpt.WVS'
FILE_NAME_MATCHER = re.compile(r"""(?x)
%(UINT)s_(?P<%(SEQUENCE_NUMBER)s> %(UINT)s)_TS(?P<%(FILE_TIME)s> %(UINT)s).*?\.WVS
""" % common_matches, re.VERBOSE | re.DOTALL)
# Regex used by the sieve_function
# Header data: ie. \x7f\x7a followed by 10 bytes of binary data
HEADER_MATCHER = re.compile(r"""(?x)
%(HEADER)s(?P<Spare1> (.){2}) (?P<Record_Size> (.{4})) (?P<Spare2_4> (.){3}) (?P<NumDataTypes> (.))
""" % common_matches, re.VERBOSE | re.DOTALL)
def make_null_parameters(rules):
"""
Get the parameter names from an encoding rules list and create a list with NULL parameters
"""
return_list = []
for key in rules:
if AdcptMWVSParticleKey.SPARE not in key:
if type(key[0]) == list:
return_list.extend([{DataParticleKey.VALUE_ID: keys, DataParticleKey.VALUE: None}
for keys in key[0]])
else:
return_list.append({DataParticleKey.VALUE_ID: key[0], DataParticleKey.VALUE: None})
return return_list
# ENCODING RULES = [parameter name, unpack format]
FIXED_LEADER_UNPACKING_RULES = [
(AdcptMWVSParticleKey.FILE_MODE, 'B'),
(AdcptMWVSParticleKey.REC_TIME_SERIES, 'B'),
(AdcptMWVSParticleKey.REC_SPECTRA, 'B'),
(AdcptMWVSParticleKey.REC_DIR_SPEC, 'B'),
(AdcptMWVSParticleKey.SAMPLES_PER_BURST, 'H'),
(AdcptMWVSParticleKey.TIME_BETWEEN_SAMPLES, 'H'),
(AdcptMWVSParticleKey.TIME_BETWEEN_BURSTS_SEC, 'H'),
(AdcptMWVSParticleKey.BIN_SIZE, 'H'),
(AdcptMWVSParticleKey.BIN_1_MIDDLE, 'H'),
(AdcptMWVSParticleKey.NUM_RANGE_BINS, 'B'),
(AdcptMWVSParticleKey.NUM_VEL_BINS, 'B'),
(AdcptMWVSParticleKey.NUM_INT_BINS, 'B'),
(AdcptMWVSParticleKey.NUM_BEAMS, 'B'),
(AdcptMWVSParticleKey.BEAM_CONF, 'B'),
(AdcptMWVSParticleKey.WAVE_PARAM_SOURCE, 'B'),
(AdcptMWVSParticleKey.NFFT_SAMPLES, 'H'),
(AdcptMWVSParticleKey.NUM_DIRECTIONAL_SLICES, 'H'),
(AdcptMWVSParticleKey.NUM_FREQ_BINS, 'H'),
(AdcptMWVSParticleKey.WINDOW_TYPE, 'H'),
(AdcptMWVSParticleKey.USE_PRESS_4_DEPTH, 'B'),
(AdcptMWVSParticleKey.USE_STRACK_4_DEPTH, 'B'),
(AdcptMWVSParticleKey.STRACK_SPEC, 'B'),
(AdcptMWVSParticleKey.PRESS_SPEC, 'B'),
#SCREENING_TYPE_UNPACKING_RULES
(AdcptMWVSParticleKey.VEL_MIN, 'h'),
(AdcptMWVSParticleKey.VEL_MAX, 'h'),
(AdcptMWVSParticleKey.VEL_STD, 'B'),
(AdcptMWVSParticleKey.VEL_MAX_CHANGE, 'H'),
(AdcptMWVSParticleKey.VEL_PCT_GD, 'B'),
(AdcptMWVSParticleKey.SURF_MIN, 'i'),
(AdcptMWVSParticleKey.SURF_MAX, 'i'),
(AdcptMWVSParticleKey.SURF_STD, 'B'),
(AdcptMWVSParticleKey.SURF_MAX_CHNG, 'i'),
(AdcptMWVSParticleKey.SURF_PCT_GD, 'B'),
(AdcptMWVSParticleKey.TBE_MAX_DEV, 'H'),
(AdcptMWVSParticleKey.H_MAX_DEV, 'H'),
(AdcptMWVSParticleKey.PR_MAX_DEV, 'B'),
(AdcptMWVSParticleKey.NOM_DEPTH, 'I'),
(AdcptMWVSParticleKey.CAL_PRESS, 'B'),
(AdcptMWVSParticleKey.DEPTH_OFFSET, 'i'),
(AdcptMWVSParticleKey.CURRENTS, 'B'),
(AdcptMWVSParticleKey.SMALL_WAVE_FREQ, 'H'),
(AdcptMWVSParticleKey.SMALL_WAVE_THRESH, 'h'),
(AdcptMWVSParticleKey.TILTS, 'B'),
(AdcptMWVSParticleKey.FIXED_PITCH, 'h'),
(AdcptMWVSParticleKey.FIXED_ROLL, 'h'),
(AdcptMWVSParticleKey.BOTTOM_SLOPE_X, 'h'),
(AdcptMWVSParticleKey.BOTTOM_SLOPE_Y, 'h'),
(AdcptMWVSParticleKey.DOWN, 'B'),
(AdcptMWVSParticleKey.SPARE, '17x'),
#END_SCREENING_TYPE_UNPACKING_RULES
(AdcptMWVSParticleKey.TRANS_V2_SURF, 'B'),
(AdcptMWVSParticleKey.SCALE_SPEC, 'B'),
(AdcptMWVSParticleKey.SAMPLE_RATE, 'f'),
(AdcptMWVSParticleKey.FREQ_THRESH, 'f'),
(AdcptMWVSParticleKey.DUMMY_SURF, 'B'),
(AdcptMWVSParticleKey.REMOVE_BIAS, 'B'),
(AdcptMWVSParticleKey.DIR_CUTOFF, 'H'),
(AdcptMWVSParticleKey.HEADING_VARIATION, 'h'),
(AdcptMWVSParticleKey.SOFT_REV, 'B'),
(AdcptMWVSParticleKey.CLIP_PWR_SPEC, 'B'),
(AdcptMWVSParticleKey.DIR_P2, 'B'),
(AdcptMWVSParticleKey.HORIZONTAL, 'B')
]
NULL_FIXED_LEADER = make_null_parameters(FIXED_LEADER_UNPACKING_RULES)
VARIABLE_LEADER_UNPACKING_RULES = [
(AdcptMWVSParticleKey.START_TIME, '8B'),
(AdcptMWVSParticleKey.STOP_TIME, '8B'),
(AdcptMWVSParticleKey.FREQ_LO, 'H'),
(AdcptMWVSParticleKey.AVERAGE_DEPTH, 'I'),
(AdcptMWVSParticleKey.ALTITUDE, 'I'),
(AdcptMWVSParticleKey.BIN_MAP, '128b'),
(AdcptMWVSParticleKey.DISC_FLAG, 'B'),
(AdcptMWVSParticleKey.PCT_GD_PRESS, 'B'),
(AdcptMWVSParticleKey.AVG_SS, 'H'),
(AdcptMWVSParticleKey.AVG_TEMP, 'H'),
(AdcptMWVSParticleKey.PCT_GD_SURF, 'B'),
(AdcptMWVSParticleKey.PCT_GD_VEL, 'B'),
(AdcptMWVSParticleKey.HEADING_OFFSET, 'h'),
(AdcptMWVSParticleKey.HS_STD, 'I'),
(AdcptMWVSParticleKey.VS_STD, 'I'),
(AdcptMWVSParticleKey.PS_STD, 'I'),
(AdcptMWVSParticleKey.DS_FREQ_HI, 'I'),
(AdcptMWVSParticleKey.VS_FREQ_HI, 'I'),
(AdcptMWVSParticleKey.PS_FREQ_HI, 'I'),
(AdcptMWVSParticleKey.SS_FREQ_HI, 'I'),
(AdcptMWVSParticleKey.X_VEL, 'h'),
(AdcptMWVSParticleKey.Y_VEL, 'h'),
(AdcptMWVSParticleKey.AVG_PITCH, 'h'),
(AdcptMWVSParticleKey.AVG_ROLL, 'h'),
(AdcptMWVSParticleKey.AVG_HEADING, 'h'),
(AdcptMWVSParticleKey.SAMPLES_COLLECTED, 'h'),
(AdcptMWVSParticleKey.VSPEC_PCT_MEASURED, 'h')
]
NULL_VARIABLE_LEADER = make_null_parameters(VARIABLE_LEADER_UNPACKING_RULES)
VELOCITY_SPECTRUM_UNPACKING_RULES = [
(AdcptMWVSParticleKey.VSPEC_NUM_FREQ, 'H'),
(AdcptMWVSParticleKey.VSPEC_DAT, 'i')
]
NULL_VELOCITY_SPECTRUM = make_null_parameters(VELOCITY_SPECTRUM_UNPACKING_RULES)
SURFACE_TRACK_SPECTRUM_UNPACKING_RULES = [
(AdcptMWVSParticleKey.SSPEC_NUM_FREQ, 'H'),
(AdcptMWVSParticleKey.SSPEC_DAT, 'i')
]
NULL_SURFACE_TRACK_SPECTRUM = make_null_parameters(SURFACE_TRACK_SPECTRUM_UNPACKING_RULES)
PRESSURE_SPECTRUM_UNPACKING_RULES = [
(AdcptMWVSParticleKey.PSPEC_NUM_FREQ, 'H'),
(AdcptMWVSParticleKey.PSPEC_DAT, 'i')
]
NULL_PRESSURE_SPECTRUM = make_null_parameters(PRESSURE_SPECTRUM_UNPACKING_RULES)
DIRECTIONAL_SPECTRUM_UNPACKING_RULES = [
(AdcptMWVSParticleKey.DSPEC_NUM_FREQ, 'H'), # COUNT uint32[dspec_num_freq][dspec_num_dir]
(AdcptMWVSParticleKey.DSPEC_NUM_DIR, 'H'), # COUNT
(AdcptMWVSParticleKey.DSPEC_GOOD, 'H'),
(AdcptMWVSParticleKey.DSPEC_DAT, 'I')
]
NULL_DIRECTIONAL_SPECTRUM = make_null_parameters(DIRECTIONAL_SPECTRUM_UNPACKING_RULES)
WAVE_PARAMETER_UNPACKING_RULES = [
(AdcptMWVSParticleKey.WAVE_HS1, 'h'),
(AdcptMWVSParticleKey.WAVE_TP1, 'h'),
(AdcptMWVSParticleKey.WAVE_DP1, 'h'),
(AdcptMWVSParticleKey.SPARE, 'x'),
(AdcptMWVSParticleKey.WAVE_HS2, 'h'),
(AdcptMWVSParticleKey.WAVE_TP2, 'h'),
(AdcptMWVSParticleKey.WAVE_DP2, 'h'),
(AdcptMWVSParticleKey.WAVE_DM, 'h')
]
NULL_WAVE_PARAMETER = make_null_parameters(WAVE_PARAMETER_UNPACKING_RULES)
HPR_TIME_SERIES_UNPACKING_RULES = [
(AdcptMWVSParticleKey.HPR_NUM_SAMPLES, 'H'), # COUNT
(AdcptMWVSParticleKey.BEAM_ANGLE, 'H'),
(AdcptMWVSParticleKey.SPARE, 'H'),
([AdcptMWVSParticleKey.HEADING_TIME_SERIES,
AdcptMWVSParticleKey.PITCH_TIME_SERIES,
AdcptMWVSParticleKey.ROLL_TIME_SERIES]
, 'h')
]
NULL_HPR_TIME_SERIES = make_null_parameters(HPR_TIME_SERIES_UNPACKING_RULES)
# Offsets for reading a record and its header
HEADER_NUM_DATA_TYPES_OFFSET = 11
HEADER_OFFSETS_OFFSET = 12
ID_TYPE_SIZE = 2
# Size and Indices used for unpacking Heading, Pitch, Role data type
HPR_TIME_SERIES_ARRAY_SIZE = 3
HEADING_TIME_SERIES_IDX = 0
PITCH_TIME_SERIES_IDX = 1
ROLL_TIME_SERIES_IDX = 2
# Indices into an encoding rules list
UNPACK_RULES = 0
ENCODE_FUNC = 1
ENCODE_NULL = 2
class DataParticleType(BaseEnum):
"""
Class that defines the data particles generated from the adcpt_m WVS recovered data
"""
SAMPLE = 'adcpt_m_wvs_recovered' # instrument data particle
class AdcptMWVSInstrumentDataParticle(DataParticle):
"""
Class for generating the adcpt_m_instrument_wvs_recovered data particle.
"""
_data_particle_type = DataParticleType.SAMPLE
def __init__(self, raw_data, sequence_number, file_time, **kwargs):
self._sequence_number = sequence_number
self._file_time = file_time
# Data Type ID: [Unpacking Rules, Encoding Function, NULL Filler]
self.encoding_func_dict = {
FIXED_LEADER: [FIXED_LEADER_UNPACKING_RULES,
self._parse_values, NULL_FIXED_LEADER],
VARIABLE_LEADER: [VARIABLE_LEADER_UNPACKING_RULES,
self._parse_values, NULL_VARIABLE_LEADER],
VELOCITY_SPECTRUM: [VELOCITY_SPECTRUM_UNPACKING_RULES,
self._parse_values_with_array, NULL_VELOCITY_SPECTRUM],
SURFACE_TRACK_SPECTRUM: [SURFACE_TRACK_SPECTRUM_UNPACKING_RULES,
self._parse_values_with_array, NULL_SURFACE_TRACK_SPECTRUM],
PRESSURE_SPECTRUM: [PRESSURE_SPECTRUM_UNPACKING_RULES,
self._parse_values_with_array, NULL_PRESSURE_SPECTRUM],
DIRECTIONAL_SPECTRUM: [DIRECTIONAL_SPECTRUM_UNPACKING_RULES,
self._parse_directional_spectrum, NULL_DIRECTIONAL_SPECTRUM],
WAVE_PARAMETERS: [WAVE_PARAMETER_UNPACKING_RULES,
self._parse_values, NULL_WAVE_PARAMETER],
HEADING_PITCH_ROLL_TIME_SERIES: [HPR_TIME_SERIES_UNPACKING_RULES,
self._parse_hpr_time_series, NULL_HPR_TIME_SERIES]
}
super(AdcptMWVSInstrumentDataParticle, self).__init__(raw_data, **kwargs)
def _build_parsed_values(self):
"""
Build parsed values for Recovered Instrument Data Particle.
"""
self.final_result = []
retrieved_data_types = set() # keep track of data type ID's unpacked from record
# Get the file time from the file name
if self._file_time:
self.final_result.append(self._encode_value(
AdcptMWVSParticleKey.FILE_TIME, self._file_time, str))
else:
self.final_result.append({DataParticleKey.VALUE_ID: AdcptMWVSParticleKey.FILE_TIME,
DataParticleKey.VALUE: None})
# Get the sequence number from the file name
if self._sequence_number:
self.final_result.append(self._encode_value(
AdcptMWVSParticleKey.SEQUENCE_NUMBER, self._sequence_number, int))
else:
self.final_result.append({DataParticleKey.VALUE_ID: AdcptMWVSParticleKey.SEQUENCE_NUMBER,
DataParticleKey.VALUE: None})
# Get the number of data types from the Header
num_data_types = struct.unpack_from('<B', self.raw_data, HEADER_NUM_DATA_TYPES_OFFSET)
# Get the list of offsets from the Header
offsets = struct.unpack_from('<%sI' % num_data_types, self.raw_data, HEADER_OFFSETS_OFFSET)
# Unpack Type IDs from the offsets
for offset in offsets:
data_type_id, = struct.unpack_from('<h', self.raw_data, offset)
# keep track of retrieved data types
retrieved_data_types.add(data_type_id)
# Feed the data through the corresponding encoding function and unpacking rules
try:
self.encoding_func_dict[data_type_id][ENCODE_FUNC](
offset + ID_TYPE_SIZE, self.encoding_func_dict[data_type_id][UNPACK_RULES])
except KeyError:
log.debug("Skipping unsupported data type ID: %s at offset: %s",
data_type_id, offset)
# go through the list of expected data type ID's, fill in None for missing data type ID's
missing_data = EXPECTED_PARTICLE_IDS_SET.difference(retrieved_data_types)
for data_type_id in missing_data:
if data_type_id is VARIABLE_LEADER:
# timestamp is essential for a data particle - no timestamp, no particle
message = "Variable Leader Data Type is required for internal timestamp, " \
"particle ignored."
log.warn(message)
raise RecoverableSampleException(message)
self.final_result.extend(self.encoding_func_dict[data_type_id][ENCODE_NULL])
log.trace("FINAL RESULT: %s\n", self.final_result)
return self.final_result
def _parse_directional_spectrum(self, offset, rules):
"""
Convert the binary data into particle data for the Directional Spectrum Data Type
"""
# Unpack the unpacking rules
(num_freq_name, num_dir_name, good_name, dat_name),\
(num_freq_fmt, num_dir_fmt, good_fmt, dat_fmt) = zip(*rules)
# First unpack the array lengths and single length values
(num_freq_data, num_dir_data, dspec_good_data) = struct.unpack_from(
'<%s%s%s' % (num_freq_fmt, num_dir_fmt, good_fmt), self.raw_data, offset)
# Then unpack the array using the retrieved lengths values
next_offset = offset + struct.calcsize(num_freq_fmt) + struct.calcsize(num_dir_fmt) + \
struct.calcsize(good_fmt)
dspec_dat_list_data = struct.unpack_from(
'<%s%s' % (num_freq_data * num_dir_data, dat_fmt), self.raw_data, next_offset)
# convert to numpy array and reshape the data per IDD spec
transformed_dat_data = numpy.array(dspec_dat_list_data).reshape(
(num_freq_data, num_dir_data)).tolist()
# Add to the collected parameter data
self.final_result.extend(
({DataParticleKey.VALUE_ID: num_freq_name, DataParticleKey.VALUE: num_freq_data},
{DataParticleKey.VALUE_ID: num_dir_name, DataParticleKey.VALUE: num_dir_data},
{DataParticleKey.VALUE_ID: good_name, DataParticleKey.VALUE: dspec_good_data},
{DataParticleKey.VALUE_ID: dat_name, DataParticleKey.VALUE: transformed_dat_data}))
def _parse_hpr_time_series(self, offset, rules):
"""
Convert the binary data into particle data for the Heading, Pitch, Time Series Data Type
"""
# Unpack the unpacking rules
(hpr_num_name, beam_angle_name, spare_name, hpr_time_names),\
(hpr_num_fmt, beam_angle_fmt, spare_fmt, hpr_time_fmt) = zip(*rules)
# First unpack the array length and single length value, no need to unpack spare
(hpr_num_data, beam_angle_data) = struct.unpack_from(
'<%s%s' % (hpr_num_fmt, beam_angle_fmt), self.raw_data, offset)
# Then unpack the array using the retrieved lengths value
next_offset = offset + struct.calcsize(hpr_num_fmt) + struct.calcsize(beam_angle_fmt) + \
struct.calcsize(spare_fmt)
hpr_time_list_data = struct.unpack_from(
'<%s%s' % (hpr_num_data * HPR_TIME_SERIES_ARRAY_SIZE, hpr_time_fmt), self.raw_data, next_offset)
# convert to numpy array and reshape the data to a 2d array per IDD spec
transformed_hpr_time_data = numpy.array(hpr_time_list_data).reshape(
(hpr_num_data, HPR_TIME_SERIES_ARRAY_SIZE)).transpose().tolist()
# Add to the collected parameter data
self.final_result.extend(
({DataParticleKey.VALUE_ID: hpr_num_name, DataParticleKey.VALUE: hpr_num_data},
{DataParticleKey.VALUE_ID: beam_angle_name, DataParticleKey.VALUE: beam_angle_data},
{DataParticleKey.VALUE_ID: hpr_time_names[HEADING_TIME_SERIES_IDX],
DataParticleKey.VALUE: transformed_hpr_time_data[HEADING_TIME_SERIES_IDX]},
{DataParticleKey.VALUE_ID: hpr_time_names[PITCH_TIME_SERIES_IDX],
DataParticleKey.VALUE: transformed_hpr_time_data[PITCH_TIME_SERIES_IDX]},
{DataParticleKey.VALUE_ID: hpr_time_names[ROLL_TIME_SERIES_IDX],
DataParticleKey.VALUE: transformed_hpr_time_data[ROLL_TIME_SERIES_IDX]}))
def _parse_values(self, offset, rules):
"""
Convert the binary data into particle data for the given rules
"""
position = offset
# Iterate through the unpacking rules and append the retrieved values with its corresponding
# particle name
for key, formatter in rules:
# Skip over spare values
if AdcptMWVSParticleKey.SPARE in key:
position += struct.calcsize(formatter)
continue
value = list(struct.unpack_from('<%s' % formatter, self.raw_data, position))
# Support unpacking single values and lists
if len(value) == 1:
value = value[0]
if AdcptMWVSParticleKey.START_TIME in key:
timestamp = ((value[0]*100 + value[1]), value[2], value[3], value[4],
value[5], value[6], value[7], 0, 0)
log.trace("TIMESTAMP: %s", timestamp)
elapsed_seconds = calendar.timegm(timestamp)
self.set_internal_timestamp(unix_time=elapsed_seconds)
log.trace("DATA: %s:%s @ %s", key, value, position)
position += struct.calcsize(formatter)
self.final_result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
def _parse_values_with_array(self, offset, rules):
"""
Convert the binary data into particle data for the given rules
Assumes first value to unpack contains the size of the array for the second value to unpack
"""
# Unpack the unpacking rules
(param_size_name, param_list_name), (param_size_fmt, param_list_fmt) = zip(*rules)
# First unpack the array length value
num_data, = struct.unpack_from('<%s' % param_size_fmt, self.raw_data, offset)
# Then unpack the array using the retrieved length value, casting from a tuple to a list
param_list_data = list(
struct.unpack_from('<%s%s' % (num_data, param_list_fmt),
self.raw_data, offset + struct.calcsize(param_size_fmt)))
# Add to the collected parameter data
self.final_result.extend(
({DataParticleKey.VALUE_ID: param_size_name, DataParticleKey.VALUE: num_data},
{DataParticleKey.VALUE_ID: param_list_name, DataParticleKey.VALUE: param_list_data}))
class AdcptMWVSParser(BufferLoadingParser):
"""
Parser for WVS data.
Makes use of the buffer loading parser to store the data as it is being read in 1024 bytes
at a time and parsed in the sieve below.
"""
def __init__(self, config, stream_handle, exception_callback):
self.particle_count = 0
self.file_size = os.fstat(stream_handle.fileno()).st_size
super(AdcptMWVSParser, self).__init__(
config,
stream_handle,
None, # state is no longer used
self.sieve_function,
lambda state, ingested: None, # state_callback no longer used
lambda data: log.trace("Found data: %s", data), # publish_callback
exception_callback)
def sieve_function(self, input_buffer):
"""
Sort through the input buffer looking for a data record.
A data record is considered to be properly framed if there is a
sync word and the appropriate size followed by the next sync word.
Note: this binary data has no checksum to verify against.
Arguments:
input_buffer - the contents of the input stream
Returns:
A list of start,end tuples
"""
indices_list = [] # initialize the return list to empty
# File is being read 1024 bytes at a time
# Match a Header up to the "number of data types" value
#find all occurrences of the record header sentinel
header_iter = HEADER_MATCHER.finditer(input_buffer)
for match in header_iter:
record_start = match.start()
record_size, = struct.unpack('I', match.group('Record_Size'))
record_end = record_start + record_size
num_data, = struct.unpack('B', match.group('NumDataTypes'))
# Get a whole record based on meeting the expected size and matching the next sentinel
if len(input_buffer) - match.start() >= record_size + ID_TYPE_SIZE \
and HEADER == input_buffer[record_end:record_end + ID_TYPE_SIZE]:
self.particle_count += 1
indices_list.append((record_start, record_end))
log.trace("FOUND RECORD #%s %s:%s ending at %x with %s data types, len: %s",
self.particle_count, record_start, record_end,
self._stream_handle.tell(), num_data, len(input_buffer))
# If at the end of the file and the length of the buffer is of record size, this is the
# last record
elif self._stream_handle.tell() == self.file_size and len(input_buffer) >= record_size:
self.particle_count += 1
indices_list.append((record_start, record_end))
log.trace("FOUND RECORD #%s %s:%s ending at %x with %s data types. len: %s",
self.particle_count, record_start, record_end,
self._stream_handle.tell(), num_data, len(input_buffer))
# else record does not contain enough bytes or is misaligned
return indices_list
def handle_non_data(self, non_data, non_end, start):
"""
Handle any non-data that is found in the file
"""
# Handle non-data here.
# Increment the position within the file.
# Use the _exception_callback.
if non_data is not None and non_end <= start:
self._exception_callback(RecoverableSampleException(
"Found %d bytes of un-expected non-data %s" %
(len(non_data), non_data)))
def _process_end_of_file(self):
"""
Override method to use exception call back for corrupt data at the end of a file.
Confirm that the chunker does not have any extra bytes left at the end of the file.
"""
(nd_timestamp, non_data) = self._chunker.get_next_non_data()
if non_data and len(non_data) > 0:
message = "Extra un-expected non-data bytes at the end of the file:%s", non_data
log.warn(message)
self._exception_callback(RecoverableSampleException(message))
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker.
If it is valid data, build a particle.
Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this parsing, plus the state.
"""
file_name = self._stream_handle.name
sequence_number = None
file_time = None
# Extract the sequence number & file time from the file name
match = FILE_NAME_MATCHER.search(file_name)
if match:
# store the sequence number & file time to put into the particle
sequence_number = match.group(AdcptMWVSParticleKey.SEQUENCE_NUMBER)
file_time = match.group(AdcptMWVSParticleKey.FILE_TIME)
else:
message = 'Unable to extract file time or sequence number from WVS input file: %s '\
% file_name
log.warn(message)
self._exception_callback(RecoverableSampleException(message))
result_particles = []
nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)
timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)
self.handle_non_data(non_data, non_end, start)
while chunk:
particle = self._extract_sample(self._particle_class, sequence_number, file_time,
None, chunk, None)
if particle is not None:
result_particles.append((particle, None))
nd_timestamp, non_data, non_start, non_end = self._chunker.get_next_non_data_with_index(clean=False)
timestamp, chunk, start, end = self._chunker.get_next_data_with_index(clean=True)
self.handle_non_data(non_data, non_end, start)
return result_particles
def _extract_sample(self, particle_class, sequence_number, file_time, regex,
raw_data, timestamp):
"""
Override method to pass sequence number and file time to particle. Also need to
handle a particle without timestamp detected in _build_parsed_values() which
raises a RecoverableSampleException and returns a particle with no values
Extract sample from data if present and publish parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample if regex
is none then process every line
@param raw_data data to input into this particle.
@retval return a raw particle if a sample was found, else None
"""
particle = None
particle_dict = {}
try:
if regex is None or regex.match(raw_data):
particle = particle_class(raw_data, sequence_number, file_time,
internal_timestamp=timestamp,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
# need to actually parse the particle fields to find out if there are errors
particle_dict = particle.generate_dict()
log.trace('Parsed particle: %s\n\n' % particle_dict)
encoding_errors = particle.get_encoding_errors()
if encoding_errors:
log.warn("Failed to encode: %s", encoding_errors)
raise SampleEncodingException("Failed to encode: %s" % encoding_errors)
# Also catch any possible exceptions thrown from unpacking data
except (RecoverableSampleException, SampleEncodingException, struct.error) as e:
log.error("Sample exception detected: %s raw data: %r", e, raw_data)
if self._exception_callback:
self._exception_callback(e)
else:
raise e
# Do not return a particle if there are no values within
if not particle_dict or not particle_dict.get(DataParticleKey.VALUES):
return None
return particle
|
|
import time
from typing import Any, Dict, List, Optional
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from . import logging
from .access_permissions import BaseAccessPermissions
from .auth import UserDoesNotExist
from .autoupdate import AutoupdateElement, inform_changed_data, inform_elements
from .rest_api import model_serializer_classes
from .utils import convert_camel_case_to_pseudo_snake_case, get_element_id
logger = logging.getLogger(__name__)
class MinMaxIntegerField(models.IntegerField):
"""
IntegerField with options to set a min- and a max-value.
"""
def __init__(
self, min_value: int = None, max_value: int = None, *args: Any, **kwargs: Any
) -> None:
self.min_value, self.max_value = min_value, max_value
super(MinMaxIntegerField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs: Any) -> Any:
defaults = {"min_value": self.min_value, "max_value": self.max_value}
defaults.update(kwargs)
return super(MinMaxIntegerField, self).formfield(**defaults)
class RESTModelMixin:
"""
Mixin for Django models which are used in our REST API.
"""
access_permissions: Optional[BaseAccessPermissions] = None
personalized_model = False
"""
Flag, if the model is personalized on a per-user basis.
Requires the model to have a `user_id` which should be
a OneToOne relation to User. The relation must never change,
because it won't be deleted from it's former user when the relation
changes.
"""
def get_root_rest_element(self) -> models.Model:
"""
Returns the root rest instance.
Uses self as default.
"""
return self
@classmethod
def get_access_permissions(cls) -> BaseAccessPermissions:
"""
Returns a container to handle access permissions for this model and
its corresponding viewset.
"""
if cls.access_permissions is None:
raise ImproperlyConfigured(
"A RESTModel needs to have an access_permission."
)
return cls.access_permissions
@classmethod
def get_collection_string(cls) -> str:
"""
Returns the string representing the name of the collection. Returns
None if this is not a so called root rest instance.
"""
# TODO Check if this is a root rest element class and return None if not.
app_label = cls._meta.app_label # type: ignore
object_name = cls._meta.object_name # type: ignore
return "/".join(
(
convert_camel_case_to_pseudo_snake_case(app_label),
convert_camel_case_to_pseudo_snake_case(object_name),
)
)
def get_rest_pk(self) -> int:
"""
Returns the primary key used in the REST API. By default this is
the database pk.
"""
return self.pk # type: ignore
def get_element_id(self) -> str:
return get_element_id(self.get_collection_string(), self.get_rest_pk())
def save(
self,
skip_autoupdate: bool = False,
no_delete_on_restriction: bool = False,
*args: Any,
**kwargs: Any,
) -> Any:
"""
Calls Django's save() method and afterwards hits the autoupdate system.
If skip_autoupdate is set to True, then the autoupdate system is not
informed about the model changed. This also means, that the model cache
is not updated. You have to do this manually by calling
inform_changed_data().
"""
# We don't know how to fix this circular import
from .autoupdate import inform_changed_data
return_value = super().save(*args, **kwargs) # type: ignore
if not skip_autoupdate:
inform_changed_data(
self.get_root_rest_element(),
no_delete_on_restriction=no_delete_on_restriction,
)
return return_value
def delete(self, skip_autoupdate: bool = False, *args: Any, **kwargs: Any) -> Any:
"""
Calls Django's delete() method and afterwards hits the autoupdate system.
If skip_autoupdate is set to True, then the autoupdate system is not
informed about the model changed. This also means, that the model cache
is not updated. You have to do this manually by calling
inform_deleted_data().
"""
# We don't know how to fix this circular import
from .autoupdate import inform_changed_data, inform_deleted_data
instance_pk = self.pk # type: ignore
return_value = super().delete(*args, **kwargs) # type: ignore
if not skip_autoupdate:
if self != self.get_root_rest_element():
# The deletion of a included element is a change of the root element.
inform_changed_data(self.get_root_rest_element())
else:
inform_deleted_data([(self.get_collection_string(), instance_pk)])
return return_value
@classmethod
def get_elements(cls, ids: Optional[List[int]] = None) -> List[Dict[str, Any]]:
"""
Returns all elements as full_data.
"""
do_logging = not bool(ids)
if do_logging:
logger.info(f"Loading {cls.get_collection_string()}")
# Get the query to receive all data from the database.
try:
query = cls.objects.get_prefetched_queryset(ids=ids) # type: ignore
except AttributeError:
# If the model des not have to method get_prefetched_queryset(), then use
# the default queryset from django.
query = cls.objects # type: ignore
if ids:
query = query.filter(pk__in=ids)
# Build a dict from the instance id to the full_data
instances = query.all()
full_data = []
# For logging the progress
last_time = time.time()
instances_length = len(instances)
for i, instance in enumerate(instances):
# Append full data from this instance
full_data.append(instance.get_full_data())
if do_logging:
# log progress every 5 seconds
current_time = time.time()
if current_time > last_time + 5:
last_time = current_time
logger.info(f" {i+1}/{instances_length}...")
return full_data
@classmethod
async def restrict_elements(
cls, user_id: int, elements: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""
Converts a list of elements from full_data to restricted_data.
"""
try:
return await cls.get_access_permissions().get_restricted_data(
elements, user_id
)
except UserDoesNotExist:
return []
def get_full_data(self) -> Dict[str, Any]:
"""
Returns the full_data of the instance.
"""
try:
serializer_class = model_serializer_classes[type(self)]
except KeyError:
# Because of the order of imports, it can happen, that the serializer
# for a model is not imported yet. Try to guess the name of the
# module and import it.
module_name = type(self).__module__.rsplit(".", 1)[0] + ".serializers"
__import__(module_name)
serializer_class = model_serializer_classes[type(self)]
return serializer_class(self).data
def SET_NULL_AND_AUTOUPDATE(
collector: Any, field: Any, sub_objs: Any, using: Any
) -> None:
"""
Like models.SET_NULL but also informs the autoupdate system about the
instance that was reference.
"""
instances = []
for sub_obj in sub_objs:
setattr(sub_obj, field.name, None)
instances.append(sub_obj)
inform_changed_data(instances)
models.SET_NULL(collector, field, sub_objs, using)
def CASCADE_AND_AUTOUPDATE(
collector: Any, field: Any, sub_objs: Any, using: Any
) -> None:
"""
Like models.CASCADE but also informs the autoupdate system about the
root rest element of the also deleted instance.
"""
elements = []
for sub_obj in sub_objs:
root_rest_element = sub_obj.get_root_rest_element()
elements.append(
AutoupdateElement(
collection_string=root_rest_element.get_collection_string(),
id=root_rest_element.get_rest_pk(),
)
)
inform_elements(elements)
models.CASCADE(collector, field, sub_objs, using)
|
|
# coding: spec
from harpoon.executor import docker_context as docker_context_maker
from harpoon.option_spec.harpoon_specs import HarpoonSpec
from harpoon.ship.builder import Builder
from harpoon.ship.runner import Runner
from tests.helpers import HarpoonCase
from delfick_project.norms import Meta
import codecs
import pytest
import uuid
import os
import re
pytestmark = pytest.mark.integration
describe HarpoonCase, "Building docker images":
def make_image(self, options, harpoon_options=None, image_name="awesome_image"):
config_root = self.make_temp_dir()
if harpoon_options is None:
harpoon_options = {}
harpoon_options["docker_context"] = self.docker_client
harpoon_options["docker_context_maker"] = docker_context_maker
harpoon = HarpoonSpec().harpoon_spec.normalise(Meta({}, []), harpoon_options)
if "harpoon" not in options:
options["harpoon"] = harpoon
everything = {"harpoon": harpoon, "_key_name_1": image_name, "config_root": config_root}
return HarpoonSpec().image_spec.normalise(Meta(everything, []), options)
it "Builds an image":
ident = str(uuid.uuid1())
ident_tag = "{0}:latest".format(ident)
images = self.docker_api.images()
repo_tags = [image["RepoTags"] for image in images if image["RepoTags"] is not None]
assert all(ident_tag not in repo_tag_list for repo_tag_list in repo_tags), images
conf = self.make_image(
{"context": False, "commands": ["FROM {0}".format(os.environ["BASE_IMAGE"])]}
)
conf.image_name = ident
Builder().build_image(conf)
images = self.docker_api.images()
repo_tags = [image["RepoTags"] for image in images if image["RepoTags"] is not None]
assert any(
ident_tag in repo_tag_list for repo_tag_list in repo_tags
), "Couldn't find {0} in {1}".format(ident_tag, images)
self.docker_api.remove_image(ident_tag)
it "knows if the build was cached":
from_line = "FROM {0}".format(os.environ["BASE_IMAGE"])
commands1 = [from_line]
commands2 = [from_line, "RUN echo {0}".format(uuid.uuid1())]
commands3 = commands2 + [["ADD", {"content": "blah", "dest": "/tmp/blah"}]]
with self.a_built_image({"context": False, "commands": commands1}) as (cached, conf1):
assert cached
with self.a_built_image({"context": False, "commands": commands2}) as (cached, conf2):
assert not cached
with self.a_built_image({"context": False, "commands": commands2}) as (
cached,
conf3,
):
assert cached
with self.a_built_image({"context": False, "commands": commands3}) as (
cached,
conf4,
):
assert not cached
it "can steal files from other containers":
from_line = "FROM {0}".format(os.environ["BASE_IMAGE"])
commands1 = [
from_line,
"RUN mkdir /tmp/blah",
"RUN echo 'lol' > /tmp/blah/one",
"RUN echo 'hehehe' > /tmp/blah/two",
"RUN mkdir /tmp/blah/another",
"RUN echo 'hahahha' > /tmp/blah/another/three",
"RUN echo 'hello' > /tmp/other",
]
conf1 = self.make_image({"context": False, "commands": commands1}, image_name="one")
commands2 = [
from_line,
["ADD", {"dest": "/tmp/copied", "content": {"image": conf1, "path": "/tmp/blah"}}],
[
"ADD",
{"dest": "/tmp/copied/other", "content": {"image": conf1, "path": "/tmp/other"}},
],
"CMD find /tmp/copied -type f | sort | xargs -t cat",
]
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
harpoon_options = {
"no_intervention": True,
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
}
with self.a_built_image(
{"context": False, "commands": commands2},
harpoon_options=harpoon_options,
images={"one": conf1},
image_name="two",
) as (_, conf2):
Runner().run_container(conf2, {"one": conf1, "two": conf2})
with codecs.open(fake_sys_stdout.name) as fle:
output = fle.read().strip()
if isinstance(output, bytes):
output = output.decode("utf-8")
output = "\n".join([line for line in output.split("\n") if "lxc-start" not in line])
expected = """
Step 1(/4)? : .+
.+
Step 2(/4)? : .+
.+
Step 3(/4)? : .+
.+
Step 4(/4)? : .+
.+
.+
Successfully built .+
cat /tmp/copied/another/three /tmp/copied/one /tmp/copied/other /tmp/copied/two
hahahha
lol
hello
hehehe
"""
self.assertReMatchLines(
expected,
output,
remove=[
re.compile("^Successfully tagged .+"),
re.compile("^Removing intermediate container .+"),
],
)
it "can cleanup intermediate images from multi stage builds":
from_line = "FROM {0}".format(os.environ["BASE_IMAGE"])
exist_before = [image["Id"] for image in self.docker_client.api.images()]
untagged_before = [
image["Id"] for image in self.docker_client.api.images(filters={"dangling": True})
]
commands = [
"{0} as base".format(from_line),
'RUN echo "{0}" > /wat'.format(str(uuid.uuid1())),
from_line,
"COPY --from=base /wat /wat",
]
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
harpoon_options = {
"no_intervention": True,
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
}
with self.a_built_image(
{"context": False, "commands": commands},
harpoon_options=harpoon_options,
image_name="one",
) as (_, conf):
assert conf.image_name not in exist_before, (exist_before, conf)
untagged = [
image["Id"] for image in self.docker_client.api.images(filters={"dangling": True})
]
assert untagged == untagged_before
it "can not cleanup intermediate images from multi stage builds":
from_line = "FROM {0}".format(os.environ["BASE_IMAGE"])
exist_before = [image["Id"] for image in self.docker_client.api.images()]
untagged_before = [
image["Id"] for image in self.docker_client.api.images(filters={"dangling": True})
]
u = str(uuid.uuid1())
commands = [
"{0} as base".format(from_line),
'RUN echo "{0}" > /wat'.format(u),
from_line,
"COPY --from=base /wat /wat",
]
commands2 = [from_line, 'RUN echo "{0}" > /wat'.format(u)]
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
harpoon_options = {
"no_intervention": True,
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
}
with self.a_built_image(
{"cleanup_intermediate_images": False, "context": False, "commands": commands},
harpoon_options=harpoon_options,
image_name="one",
) as (_, conf):
assert conf.image_name not in exist_before, (exist_before, conf)
untagged = [
image["Id"] for image in self.docker_client.api.images(filters={"dangling": True})
]
assert len(untagged) == len(untagged_before) + 1
with self.a_built_image(
{"cleanup_intermediate_images": False, "context": False, "commands": commands2},
harpoon_options=harpoon_options,
image_name="one",
) as (cached, _):
assert cached
it "can steal files from other images using staged builds":
from_line = "FROM {0}".format(os.environ["BASE_IMAGE"])
commands1 = [
from_line,
"RUN mkdir /tmp/blah",
"RUN echo 'lol' > /tmp/blah/one",
"RUN echo 'hehehe' > /tmp/blah/two",
"RUN mkdir /tmp/blah/another",
"RUN echo 'hahahha' > /tmp/blah/another/three",
"RUN echo 'hello' > /tmp/other",
]
conf1 = self.make_image({"context": False, "commands": commands1}, image_name="one")
commands2 = [
["FROM", conf1, "as other_image"],
from_line,
["COPY", {"from": conf1, "path": "/tmp/blah", "to": "/tmp/copied"}],
"COPY --from=other_image /tmp/other /tmp/copied/other",
"CMD find /tmp/copied -type f | sort | xargs -t cat",
]
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
harpoon_options = {
"no_intervention": True,
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
}
with self.a_built_image(
{"context": False, "commands": commands2},
harpoon_options=harpoon_options,
images={"one": conf1},
image_name="two",
) as (_, conf2):
Runner().run_container(conf2, {"one": conf1, "two": conf2})
with codecs.open(fake_sys_stdout.name) as fle:
output = fle.read().strip()
if isinstance(output, bytes):
output = output.decode("utf-8")
output = "\n".join([line for line in output.split("\n") if "lxc-start" not in line])
expected = """
Step 1(/5)? : .+
.+
Step 2(/5)? : .+
.+
Step 3(/5)? : .+
.+
Step 4(/5)? : .+
.+
Step 5(/5)? : .+
.+
.+
Successfully built .+
cat /tmp/copied/another/three /tmp/copied/one /tmp/copied/other /tmp/copied/two
hahahha
lol
hello
hehehe
"""
self.assertReMatchLines(
expected,
output,
remove=[
re.compile("^Successfully tagged .+"),
re.compile("^Removing intermediate container .+"),
],
)
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testhelp
from testrunner.testhelp import context
import os
from conary_test import rephelp
from conary_test import recipes
import shutil
from conary import conaryclient, cvc, state
from conary.deps import deps
from conary import errors, conarycfg
from conary.versions import Label, VersionFromString as VFS
from conary.checkin import ConaryStateFromFile, CheckinCallback
from conary import repository
from sourcetest import StatWrapper
from conary.build import use
from conary.conaryclient import branch
from conary.lib import log
class ShadowTest(rephelp.RepositoryHelper):
def _checkLatest(self, trove, verType, ver):
repos = self.openRepository()
if verType[0] == '/':
verDict = repos.getTroveLeavesByBranch(
{ trove : { VFS(verType) : None } } )
else:
verDict = repos.getTroveLeavesByLabel(
{ trove : { Label(verType) : None } } )
assert(len(verDict) == 1)
assert(verDict[trove].keys()[0].asString() == ver)
def _shadowPrefix(self, shadow):
shadowHost, label = shadow.split('@')
shadowNamespace, shadowName = label.split(':')
# if the shadow is on the same repository, then just the shadow
# name will show up in the version string
if shadowHost == 'localhost':
shadowPart = shadowName
else:
# otherwise use the whole shadow label
shadowPart = shadow
return '/localhost@rpl:linux//%s/' % shadowPart
def _testSourceShadow(self, shadow):
self.resetWork()
os.chdir(self.workDir)
self.newpkg("testcase")
os.chdir("testcase")
# the origional version, 1.0-1
self.writeFile("testcase.recipe", recipes.testTransientRecipe1)
self.addfile("testcase.recipe")
self.commit()
shadowPrefix = self._shadowPrefix(shadow)
# create a shadow of 1.0-1
self.mkbranch("1.0-1", shadow, "testcase:source", shadow = True)
self._checkLatest('testcase:source', shadow, shadowPrefix + '1.0-1')
rc = self.rdiff('testcase', '-1', shadowPrefix +'1.0-1')
rdiffShadowOutput = '\n'.join((
'New shadow:',
' %s1.0-1' %shadowPrefix,
' of',
' /localhost@rpl:linux/1.0-1',
'',
))
assert(rc == rdiffShadowOutput)
# build directly off the shadowed sources, the build count should
# be 0.1
self.cfg.buildLabel = Label(shadow)
built = self.cookFromRepository('testcase')
assert(built[0][1] == shadowPrefix + '1.0-1-0.1')
# check out the shadowed source and make a local change. source
# count should become 1.1
os.chdir("..")
shutil.rmtree("testcase")
self.checkout("testcase", shadow)
os.chdir("testcase")
f = open("testcase.recipe", "a")
f.write("\n\n")
del f
self.commit()
self._checkLatest('testcase:source', shadow, shadowPrefix + '1.0-1.1')
# check build count, should start at 1
built = self.cookFromRepository('testcase')
assert(built[0][1] == shadowPrefix + '1.0-1.1-1')
# test that changing the upstream version to 1.1 (from 1.0) results
# in the correct source count (0.1)
self.writeFile("testcase.recipe", recipes.testTransientRecipe2)
self.commit()
self._checkLatest('testcase:source', shadow, shadowPrefix + '1.1-0.1')
# check build count, should start at 1
built = self.cookFromRepository('testcase')
assert(built[0][1] == shadowPrefix + '1.1-0.1-1')
# build again, build count should be 2
built = self.cookFromRepository('testcase')
assert(built[0][1] == shadowPrefix + '1.1-0.1-2')
# test that changing the upstream version back to 1.0 results in the
# correct source count (1.2)
self.writeFile("testcase.recipe", recipes.testTransientRecipe1)
self.commit()
self._checkLatest('testcase:source', shadow, shadowPrefix + '1.0-1.2')
# and check that the build count becomes 1
built = self.cookFromRepository('testcase')
assert(built[0][1] == shadowPrefix + '1.0-1.2-1')
def testSourceShadow(self):
self.repos = self.openRepository()
self._testSourceShadow('localhost@rpl:shadow')
def testDistributedSourceShadow(self):
self.openRepository(0)
self.resetRepository(1)
self.openRepository(1)
self._testSourceShadow('localhost1@rpl:shadow')
def _testMerge(self, shadow, switchUsers=False):
def _assertExists(repos, ver):
repos.findTrove(None, ('testcase:source', ver, None))
def checkMetadata(ver, shortDesc):
md = self.findAndGetTrove('testcase:source=%s' % ver).getMetadata()
self.assertEquals(md['shortDesc'], shortDesc)
def updateMetadata(ver, shortDesc):
repos = self.openRepository()
mi = self.createMetadataItem(shortDesc=shortDesc)
trv = repos.findTrove(self.cfg.buildLabel,
('testcase:source', ver, None))[0]
repos.addMetadataItems([(trv, mi)])
def verifyCONARY(ver, verMap, lastMerged = None):
conaryState = ConaryStateFromFile("CONARY", None)
sourceState = conaryState.getSourceState()
assert(sourceState.getVersion().asString() == ver)
if lastMerged:
assert(sourceState.getLastMerged().asString() == lastMerged)
else:
assert(sourceState.getLastMerged() is None)
for pathId, path, fileId, version in sourceState.iterFileList():
foo = verMap.pop(path)
assert(version.asString() == foo)
# there should be no paths remaining in the vermap
assert(not verMap)
repos = self.openRepository()
shadowPrefix = self._shadowPrefix(shadow)
parentPrefix = '/%s/' %(self.cfg.buildLabel.asString())
self.resetWork()
os.chdir(self.workDir)
self.newpkg("testcase")
os.chdir("testcase")
# 1.0-1
self.writeFile("testcase.recipe", recipes.testTransientRecipe1)
self.writeFile('notChangedOnShadow', 'hello, world\n')
self.writeFile('removedOnShadow', 'goodbye, world\n')
self.writeFile('converging', '1\n2\n3\n4\n')
self.addfile("testcase.recipe")
self.addfile("notChangedOnShadow", text = True)
self.addfile("removedOnShadow", text = True)
self.addfile("converging", text = True)
self.commit()
updateMetadata('1.0', 'foo')
self.mkbranch("1.0-1", shadow, "testcase:source", shadow = True)
self._checkLatest('testcase:source', shadow, shadowPrefix + '1.0-1')
# metadata should have transfered with the shadow
checkMetadata(shadow, 'foo')
# do shadow operations as the daemon user, so there is always
if switchUsers:
origstat = os.stat
origlstat = os.lstat
otheruser = StatWrapper(origstat, origlstat, 1, 1)
# 1.0-1.1
if switchUsers:
os.stat = otheruser.stat
os.lstat = otheruser.lstat
os.chdir("..")
shutil.rmtree("testcase")
self.checkout("testcase", shadow)
os.chdir("testcase")
f = open("testcase.recipe", "a")
f.write("\n# extra comment\n")
f.close()
self.writeFile('addedOnShadow', 'hello, world\n')
self.writeFile('converging', '1\n2\n3\n4\n5\n')
self.addfile('addedOnShadow', text = True)
self.remove('removedOnShadow')
self.commit()
# 1.1 (upstream change)
if switchUsers:
os.stat = origstat
os.lstat = origlstat
os.chdir("..")
shutil.rmtree("testcase")
self.checkout("testcase")
os.chdir("testcase")
self.writeFile("testcase.recipe", recipes.testTransientRecipe2)
self.writeFile('notChangedOnShadow', 'hello 2\n')
self.writeFile('removedOnShadow', 'changed\n')
self.writeFile('converging', '1\n2\n3\n4\n5\n')
self.commit()
checkMetadata('1.1', 'foo')
updateMetadata('1.1', 'upstream change')
# 1.1-1.1 (merge)
if switchUsers:
os.stat = otheruser.stat
os.lstat = otheruser.lstat
os.chdir("..")
shutil.rmtree("testcase")
self.checkout("testcase", shadow)
os.chdir("testcase")
self.merge()
assert(not os.path.exists('removedOnShadow'))
# make sure that all the versions are correct
verifyCONARY(shadowPrefix + '1.0-1.1',
{'testcase.recipe': parentPrefix + '1.1-1',
'notChangedOnShadow': parentPrefix + '1.1-1',
'addedOnShadow': shadowPrefix + '1.0-1.1',
'converging': parentPrefix + '1.1-1' },
lastMerged = parentPrefix + '1.1-1')
self.commit()
# the merge doesn't override the metadata, metadata doesn't get
# transferred via merge.
checkMetadata(shadow, 'foo')
# after the commit, the source count should be 1.1
verifyCONARY(shadowPrefix + '1.1-1.1',
{'testcase.recipe': shadowPrefix + '1.1-1.1',
# XXX it would be nice if this was reset to be from
# the parent instead of a change on the shadow
'notChangedOnShadow': shadowPrefix + '1.1-1.1',
'addedOnShadow': shadowPrefix + '1.0-1.1',
'converging': parentPrefix + '1.1-1' })
self._checkLatest('testcase:source', shadow, shadowPrefix + '1.1-1.1')
# make sure the intermediate version appears shadowed
assert(repos.hasTrove('testcase:source', VFS(shadowPrefix + '1.1-1'), deps.Flavor()))
# check out the latest version on the shadow
os.chdir("..")
shutil.rmtree("testcase")
self.checkout("testcase", shadow)
os.chdir('testcase')
verifyCONARY(shadowPrefix + '1.1-1.1',
{'testcase.recipe': shadowPrefix + '1.1-1.1',
# XXX it would be nice if this was reset to be from
# the parent instead of a change on the shadow
'notChangedOnShadow': shadowPrefix + '1.1-1.1',
'addedOnShadow': shadowPrefix + '1.0-1.1',
'converging': parentPrefix + '1.1-1' })
# change shadowed version to 1.1-1.2
recipeText = open('testcase.recipe', 'r').read()
newText = recipeText + '\n#comment bottom\n'
self.writeFile("testcase.recipe", newText)
self.commit()
verifyCONARY(shadowPrefix + '1.1-1.2',
{'testcase.recipe': shadowPrefix + '1.1-1.2',
'notChangedOnShadow': shadowPrefix + '1.1-1.1',
'addedOnShadow': shadowPrefix + '1.0-1.1',
'converging': parentPrefix + '1.1-1' })
# change shadowed version to 2.0-0.1
newText = recipeText.replace("version = '1.1'", "version = '2.0'")
self.writeFile("testcase.recipe", newText)
self.commit()
verifyCONARY(shadowPrefix + '2.0-0.1',
{'testcase.recipe': shadowPrefix + '2.0-0.1',
'notChangedOnShadow': shadowPrefix + '1.1-1.1',
'addedOnShadow': shadowPrefix + '1.0-1.1',
'converging': parentPrefix + '1.1-1' })
# make two minor revisions upstream to get 1.1-3
if switchUsers:
os.stat = origstat
os.lstat = origlstat
os.chdir("..")
shutil.rmtree("testcase")
self.checkout("testcase")
os.chdir("testcase")
newText = open('testcase.recipe', 'r').read() + '\n#minor comment\n'
self.writeFile("testcase.recipe", newText)
self.commit()
newText = '\n#minor comment \n' + newText
self.writeFile("testcase.recipe", newText)
self.commit()
# merge to get 2.0-0.2
if switchUsers:
os.stat = otheruser.stat
os.lstat = otheruser.lstat
os.chdir("..")
shutil.rmtree("testcase")
self.checkout("testcase", shadow)
os.chdir("testcase")
verifyCONARY(shadowPrefix + '2.0-0.1',
{'testcase.recipe': shadowPrefix + '2.0-0.1',
'notChangedOnShadow': shadowPrefix + '1.1-1.1',
'addedOnShadow': shadowPrefix + '1.0-1.1',
'converging': parentPrefix + '1.1-1' },
lastMerged = None)
self.merge()
assert(not os.path.exists('removedOnShadow'))
# make sure that all the versions are correct
verifyCONARY(shadowPrefix + '2.0-0.1',
{'testcase.recipe': parentPrefix + '1.1-3',
'notChangedOnShadow': shadowPrefix + '1.1-1.1',
'addedOnShadow': shadowPrefix + '1.0-1.1',
'converging': parentPrefix + '1.1-1' },
lastMerged = parentPrefix + '1.1-3')
self.commit()
# after the commit, the source count should be 2.0-0.2
verifyCONARY(shadowPrefix + '2.0-0.2',
{'testcase.recipe': shadowPrefix + '2.0-0.2',
'notChangedOnShadow': shadowPrefix + '1.1-1.1',
'addedOnShadow': shadowPrefix + '1.0-1.1',
'converging': parentPrefix + '1.1-1' })
self._checkLatest('testcase:source', shadow, shadowPrefix + '2.0-0.2')
assert(not repos.hasTrove('testcase:source', VFS(shadowPrefix + '1.1-2'), deps.Flavor()))
assert(repos.hasTrove('testcase:source', VFS(shadowPrefix + '1.1-3'), deps.Flavor()))
if switchUsers:
os.stat = origstat
os.lstat = origlstat
def testMerge(self):
self.openRepository()
self._testMerge('localhost@rpl:shadow', False)
def testMergeDifferentUser(self):
self.openRepository()
self._testMerge('localhost@rpl:shadow', True)
def testDistributedMerge(self):
self.openRepository()
self.resetRepository(1)
self.openRepository(1)
self._testMerge('localhost1@rpl:shadow', False)
def testDistributedMergeDifferentUser(self):
self.openRepository()
self.resetRepository(1)
self.openRepository(1)
self._testMerge('localhost1@rpl:shadow', True)
def testMergeAndDiffAutoSource(self):
os.chdir(self.workDir)
self.newpkg('autosource')
os.chdir('autosource')
self.writeFile("autosource.recipe", recipes.autoSource1)
self.writeFile('localfile', 'test contents\n')
self.addfile("autosource.recipe")
self.addfile("localfile", text = True)
self.commit()
shadow = 'localhost@rpl:shadow'
shadowPrefix = self._shadowPrefix(shadow)
# create a shadow of 1.0-1
self.mkbranch("1.0-1", shadow, "autosource:source", shadow = True)
# make changes on the parent branch
self.writeFile("autosource.recipe", recipes.autoSource2)
open("localfile", "a").write("new contents\n")
self.commit()
# merge and diff calls buildLocalChanges
os.chdir('..')
self.checkout('autosource=%s' % shadow, dir='autosource-shadow')
os.chdir('autosource-shadow')
self.merge()
self.diff()
def testUnneededMerge(self):
# CNY-968
os.chdir(self.workDir)
self.newpkg("testcase")
os.chdir("testcase")
self.writeFile("testcase.recipe", recipes.testTransientRecipe1)
self.addfile("testcase.recipe")
self.writeFile("source", contents = "middle\n")
self.addfile("source", text = True)
self.commit()
# create a shadow of 1.0-1
shadow = 'localhost@test:shadow'
self.mkbranch("1.0-1", shadow, "testcase:source", shadow = True)
self.update(shadow)
self.writeFile("source", contents = "middle\nlast\n")
self.commit()
# nothing to merge
self.logCheck(self.merge, [],
'error: No changes have been made on the parent branch; '
'nothing to merge.')
self.logCheck(self.commit, [],
'+ no changes have been made to commit')
def testMergeRename(self):
# CNY-967
os.chdir(self.workDir)
self.newpkg("testcase")
os.chdir("testcase")
self.writeFile("testcase.recipe", recipes.testTransientRecipe1)
self.addfile("testcase.recipe")
self.writeFile("firstname", contents = "first\n")
self.addfile("firstname", text = True)
self.commit()
# create a shadow of 1.0-1
shadow = 'localhost@test:shadow'
self.mkbranch("1.0-1", shadow, "testcase:source", shadow = True)
self.rename("firstname", "secondname")
self.writeFile("secondname", contents = "second\n")
self.commit()
self.update(shadow)
self.verifyFile("firstname", "first\n")
assert(not os.path.exists("secondname"))
self.merge()
self.verifyFile("secondname", "second\n")
assert(not os.path.exists("firstname"))
def testMergeDuplicateAddAutosource(self):
# CNY-1856
os.chdir(self.workDir)
self.newpkg('autosource')
os.chdir('autosource')
self.writeFile('autosource.recipe', recipes.autoSource0)
self.addfile('autosource.recipe')
self.writeFile('localfile', contents='blah\n')
self.addfile('localfile', text=True)
self.commit()
# create a shadow of 1.0-1
shadow = 'localhost@test:shadow'
self.mkbranch('1.0-1', shadow, 'autosource:source', shadow = True)
# add an auto-source file to the original branch
self.writeFile('autosource.recipe', recipes.autoSource5)
self.commit()
# update to the shadow version
os.chdir("..")
self.checkout('autosource=%s' % shadow, dir='autosource-shadow')
# self.update(str(shadow))
os.chdir('autosource-shadow')
# add the same file, same contents
self.writeFile('autosource.recipe', recipes.autoSource5)
self.commit()
# now merge changes from parent
self.merge()
self.commit()
def testShadowBinaryGroup(self):
basicSplitGroup = """
class splitGroup(GroupRecipe):
name = 'group-first'
version = '1.0'
checkPathConflicts = False
clearBuildRequires()
def setup(self):
self.addTrove("testcase", ":linux", byDefault=False)
self.createGroup('group-second')
self.addTrove("testcase", ":test1",
groupName = 'group-second')
# add group-second to group-first
self.addNewGroup('group-second')
"""
repos = self.openRepository()
(built, d) = self.buildRecipe(recipes.testRecipe1, "TestRecipe1")
origBuildLabel = self.cfg.buildLabel
self.cfg.buildLabel = Label('localhost@rpl:test1')
(built, d) = self.buildRecipe(recipes.testRecipe1, "TestRecipe1")
self.cfg.buildLabel = origBuildLabel
(built, d) = self.buildRecipe(basicSplitGroup, "splitGroup")
n,v,f = [ x for x in built if x[0] == 'group-first' ][0]
v = VFS(v)
shadowVerStr = '/localhost@rpl:linux//shadow/1.0-1-1'
shadowVer = VFS(shadowVerStr)
flavor = use.Arch.getCurrentArch()._toDependency()
group = repos.getTrove(n,v,f)
self.verifyTroves(group, [('testcase',
'/localhost@rpl:linux/1.0-1-1',
flavor),
('group-second',
'/localhost@rpl:linux/1.0-1-1',
flavor)])
self.mkbranch("1.0-1-1", 'localhost@rpl:shadow', "group-first",
shadow=True, binaryOnly=True)
group = repos.getTrove('group-first', shadowVer, flavor)
assert(not group.includeTroveByDefault('testcase', shadowVer, flavor))
self.verifyTroves(group, [('testcase',
shadowVerStr,
flavor),
('group-second',
shadowVerStr,
flavor)])
group = repos.getTrove('group-second',
shadowVer,
flavor)
self.verifyTroves(group, [('testcase',
'/localhost@rpl:test1//shadow/1.0-1-1',
flavor)])
def testReShadowBinaryGroupWithVersions(self):
basicGroup = """
class TestGroup(GroupRecipe):
name = 'group-test'
version = '2.0'
clearBuildRequires()
def setup(self):
self.addTrove("testcase", "1.0")
"""
(built, d) = self.buildRecipe(recipes.testRecipe1, "TestRecipe1")
(built, d) = self.buildRecipe(basicGroup, "TestGroup")
self.mkbranch("2.0-1-1", 'localhost@rpl:shadow', "group-test",
shadow=True, binaryOnly=True)
self.logFilter.add()
self.mkbranch("2.0-1-1", 'localhost@rpl:shadow', "group-test",
shadow=True, binaryOnly=True)
self.logFilter.remove()
self.logFilter.compare(['warning: group-test already has branch /localhost@rpl:linux//shadow',
'warning: testcase already has branch /localhost@rpl:linux//shadow',
'warning: testcase:runtime already has branch /localhost@rpl:linux//shadow'])
def testShadowOpts(self):
self.repos = self.openRepository()
cfg = self.cfg
self.addTestPkg(1)
self.cookTestPkg(1)
self.addTestPkg(2)
self.cookTestPkg(2)
oldLabel = cfg.installLabelPath
shadowLabel = Label('localhost@rpl:shadow')
try:
self.mkbranch(["test1", "test2:source"], 'localhost@rpl:shadow',
shadow=True, sourceOnly=True)
cfg.installLabelPath = conarycfg.CfgLabelList([shadowLabel])
query = [('test1:source', None, None), ('test1', None, None),
('test2:source', None, None), ('test2', None, None)]
results = self.repos.findTroves(cfg.installLabelPath, query,
allowMissing=True)
assert(('test1:source', None, None) in results)
assert(('test2:source', None, None) in results)
assert(('test1', None, None) not in results)
self.mkbranch(['test1:source', 'test2'],
'localhost@rpl:shadow',
shadow=True, binaryOnly=True)
results = self.repos.findTroves(cfg.installLabelPath, query,
allowMissing=True)
assert(('test1:source', None, None) in results)
assert(('test2:source', None, None) in results)
assert(('test2', None, None) in results)
assert(('test1', None, None) not in results)
# now shadow both binary and source
shadowLabel2 = Label('localhost@rpl:shadow2')
self.mkbranch("1.0-1-1", 'localhost@rpl:shadow2', "test1",
shadow=True)
cfg.installLabelPath = conarycfg.CfgLabelList([shadowLabel2])
results = self.repos.findTroves(cfg.installLabelPath, query,
allowMissing=True)
assert(('test1:source', None, None) in results)
assert(('test1', None, None) in results)
finally:
cfg.installLabelPath = oldLabel
def testMerge2(self):
# a file is added on both the source and the shadow, then a merge
# is attempted
# create the original and the shadow
shadowLabel = Label('localhost@rpl:branch')
self.makeSourceTrove('test', simpleRecipe)
self.mkbranch(['test:source'], shadowLabel, shadow = True)
# update the original with the new file 'extra'
os.chdir(self.workDir)
self.checkout('test')
self.checkout('test', str(shadowLabel), dir='test-shadow')
os.chdir('test')
self.writeFile('extra', 'Contents1\nContents2\n')
self.addfile('extra', text = True)
self.commit()
# update the shadow with a conflicting version of the new file 'extra'
# (but by adding it, not by merging)
os.chdir('../test-shadow')
self.writeFile('extra', 'Contents1\nContents3\n')
self.addfile('extra', text = True)
self.commit()
# Now try to merge, there's a file conflict
self.logCheck(self.merge, [],
'error: path extra added both locally and in repository')
self.writeFile('extra', 'Contents1\nContents2\n')
self.commit()
self.logCheck(self.merge, [],
'error: path extra added both locally and in repository')
def testMergeNonShadow(self):
self.makeSourceTrove('test', simpleRecipe)
os.chdir(self.workDir)
self.checkout('test')
os.chdir('test')
self.logCheck(self.merge, [], 'error: test:source=/localhost@rpl:linux is not a shadow')
def testMergeNotAtTip(self):
self.addComponent('test:source', '/localhost@rpl:linux/1.0-1')
self.addComponent('test:source', '/localhost@rpl:linux//shadow/1.0-1')
self.addComponent('test:source', '/localhost@rpl:linux//shadow/1.0-1.1')
os.chdir(self.workDir)
self.checkout('test=localhost@rpl:shadow/1.0-1')
os.chdir('test')
self.logCheck2([ '+ working directory is not the latest on label '
'localhost@rpl:shadow' ], self.merge,
verbosity = log.INFO)
def testMergeWithRevision(self):
recipe1 = simpleRecipe
recipe2 = (simpleRecipe + '\n\t#extra line\n').replace('1.0', '2.0')
recipe3 = (simpleRecipe + '\n\t#extra line\n\t#extra line 2\n').replace('1.0', '3.0')
self.addComponent('test:source', '1.0-1', '',
[('test.recipe', recipe1)])
self.addComponent('test:source', '2.0-1', '',
[('test.recipe', recipe2 )])
self.addComponent('test:source', '3.0-1', '',
[('test.recipe', recipe3)])
self.mkbranch(['test:source=1.0'], 'localhost@rpl:shadow', shadow=True)
os.chdir(self.workDir)
self.cfg.buildLabel = Label('localhost@rpl:shadow')
self.checkout('test')
os.chdir('test')
self.merge('2.0')
self.verifyFile('test.recipe', recipe2)
self.commit()
self.merge('3.0-1')
self.verifyFile('test.recipe', recipe3)
self.commit()
self.logFilter.add()
self.merge('2.0') # raise an error
self.logFilter.compare(['error: Cannot merge: version specified is before the last merge point, would be merging backwards'])
self.logFilter.add()
self.merge('localhost@rpl:linux') # raise an error
self.logFilter.compare(['error: Can only specify upstream version, upstream version + source count or full versions to merge'])
def testShadowComponent(self):
self.makeSourceTrove('test', simpleRecipe)
self.cookFromRepository('test')
try:
self.mkbranch(['test:runtime'], 'localhost@rpl:shadow', shadow=True)
except errors.ParseError, err:
assert(str(err) == 'Cannot branch or shadow individual components:'
' test:runtime')
def testThreeLevelShadowWithMerge(self):
self.addComponent('test:source', '/localhost@rpl:1/1.0-1',
fileContents = [ ("sourcefile", "source 1.0\n") ] )
self.addComponent('test:source', '/localhost@rpl:1/2.0-1',
fileContents = [ ("sourcefile", "source 2.0\n") ] )
self.mkbranch(['test:source=/localhost@rpl:1/2.0-1'],
'localhost@rpl:shadow1', shadow=True)
self.addComponent('test:source',
'/localhost@rpl:1//shadow1/2.0-1.1',
fileContents = [ ("sourcefile", "source 2.0\n") ] )
self.mkbranch(['test:source=/localhost@rpl:1//shadow1/2.0-1'],
'localhost@rpl:shadow2', shadow=True)
self.addComponent('test:source',
'/localhost@rpl:1//shadow1//shadow2/2.0-1.0.1',
fileContents = [ ("sourcefile", "source 2.0\n") ] )
os.chdir(self.workDir)
self.checkout('test=localhost@rpl:shadow2')
os.chdir('test')
self.merge()
def testShadowBackwards(self):
# Shadowing an earlier trove to a later point on the child
# causes havoc with our merge algorithms.
self.addComponent('test:source', '1.0-1')
self.addComponent('test:source', '2.0-1')
self.mkbranch(['test:source=2.0'], 'localhost@rpl:shadow', shadow=True)
try:
self.mkbranch(['test:source=1.0'], 'localhost@rpl:shadow',
shadow=True)
except branch.BranchError, err:
assert(str(err) == '''\
Cannot shadow backwards - already shadowed
test:source=/localhost@rpl:linux/2.0-1[]
cannot shadow earlier trove
test:source=/localhost@rpl:linux/1.0-1[]
''')
def testMissingFiles(self):
self.openRepository(1)
self.addComponent('test:source', '1.0-1')
self.mkbranch(['test:source=1.0'], 'localhost1@rpl:shadow', shadow=True)
self.stopRepository(0)
os.chdir(self.workDir)
raise testhelp.SkipTestException('CNY-462 Temporarily disabling until we figure out how to pass the OpenError exception')
self.assertRaises(repository.errors.OpenError, self.checkout,
"test", 'localhost1@rpl:shadow')
# this open resets repository 0, which means that files from the
# shadow are missing
self.openRepository(0)
try:
self.checkout("test", 'localhost1@rpl:shadow')
except repository.errors.FileStreamMissing, e:
j = """File Stream Missing
The following file stream was not found on the server:
fileId: 1602c79ea7aeb2cc64c6f11b45bc1be141f610d2
This could be due to an incomplete mirror, insufficient permissions,
or the troves using this filestream having been removed from the server."""
assert(str(e) == j)
def testMergeDuplicateAdd(self):
# CNY-1021
# test cvc merge's handling of a file added locally and in
# repository
self.addComponent('test:source', '1.0-1', ['bar'])
self.mkbranch(['test:source=1.0'], 'localhost@rpl:shadow', shadow=True)
os.chdir(self.workDir)
self.checkout('test=localhost@rpl:shadow')
os.chdir('test')
self.writeFile('foo', 'bar!\n')
self.addfile('foo', text=True)
self.addComponent('test:source', '2.0-1', ['bar', 'foo'])
self.logCheck2('error: path foo added both locally and in repository',
self.merge)
@context('shadow')
@context('labelmultiplicity')
def testShadowCreatesLabelMultiplicity(self):
shadow = 'localhost@rpl:shadow'
self.addComponent('foo:source', '/localhost@rpl:linux/1-1')
self.addComponent('foo:source', '/%s/1-1' % shadow)
self.logFilter.add()
rc, txt = self.captureOutput(self.mkbranch, "1-1", shadow,
"foo:source", shadow = True,
ignoreConflicts = False)
assert(not txt)
def testShadowMetadata(self):
self.addComponent('foo:source=1-1',
metadata=self.createMetadataItem(shortDesc='foo'))
rc, txt = self.captureOutput(self.mkbranch, "1-1",
'localhost@rpl:shadow',
"foo:source", shadow = True)
metadata = self.findAndGetTrove('foo:source=:shadow').getMetadata()
assert(metadata['shortDesc'] == 'foo')
@context('shadow')
@context('labelmultiplicity')
def testShadowSourceDisappears(self):
# CNY-462
class CustomError(Exception):
errorIsUncatchable = True
class MFCheckinCallbackFailure(CheckinCallback):
def missingFiles(self, files):
raise CustomError(files)
class MFCheckinCallbackFailure2(CheckinCallback):
def missingFiles(self, files):
return False
class MFCheckinCallbackSuccess(CheckinCallback):
def missingFiles(self, files):
return True
shadow = 'localhost1@rpl:shadow'
shadowPrefix = self._shadowPrefix(shadow)
self.openRepository(0)
self.resetRepository(1)
repos1 = self.openRepository(1)
self.resetWork()
os.chdir(self.workDir)
self.newpkg("testcase")
os.chdir("testcase")
# the origional version, 1.0-1
self.writeFile("testcase.recipe", recipes.testTransientRecipe1)
self.addfile("testcase.recipe")
self.commit()
# create a shadow of 1.0-1
self.mkbranch("1.0-1", shadow, "testcase:source", shadow = True)
self._checkLatest('testcase:source', shadow, shadowPrefix + '1.0-1')
self.resetWork()
os.chdir(self.workDir)
# Source goes away
self.stopRepository(0)
# This should be in a different test case: repository is not even
# available
self.openRepository(0)
callbackF = MFCheckinCallbackFailure()
callbackF2 = MFCheckinCallbackFailure2()
callbackS = MFCheckinCallbackSuccess()
os.chdir(self.workDir)
# This should fail period. (no callback)
self.assertRaises(repository.errors.FileStreamMissing,
self.checkout, "testcase",
versionStr=shadow)
# This call will fail because the callback throws an exception
self.assertRaises(CustomError, self.checkout, "testcase",
versionStr=shadow, callback=callbackF)
# This call will fail because the callback returns False
self.assertRaises(repository.errors.FileStreamMissing,
self.checkout, "testcase",
versionStr=shadow, callback=callbackF2)
self.stopRepository(0)
# This should fail period. (no callback)
self.assertRaises(repository.errors.OpenError,
self.checkout, "testcase",
versionStr=shadow)
# In passing, test CNY-1415 (missing files while writing a changeset to
# a file)
jobList = [
('testcase:source',
(None, None),
(VFS(shadowPrefix + '1.0-1'), deps.parseFlavor('')),
True),
]
csfile = os.path.join(self.workDir, "changeset-file.ccs")
# fix up the proxy cfg
repos1 = self.openRepository(1)
repos1.createChangeSetFile(jobList, csfile, callback=callbackS)
cs = repository.changeset.ChangeSetFromFile(csfile)
self.assertEqual([ 'testcase:source' ],
[ t.getName() for t in cs.iterNewTroveList()])
# This call succeeds
self.checkout("testcase", versionStr=shadow, callback=callbackS)
# Test the callback
shutil.rmtree('testcase')
c = cvc.CvcMain()
cmd = c._supportedCommands['checkout']
expOutput = """\
Warning: The following files are missing:
testcase.recipe
"""
(ret, strng) = self.captureOutput(
c.runCommand, cmd, self.cfg, {},
['cvc', 'checkout', 'testcase=' + shadow])
self.assertEqual(strng, expOutput)
os.chdir("testcase")
dest = "localhost2@rpl:shadow"
open("testcase.recipe", "w+").write(simpleRedirectRecipe % dest)
self.addfile("testcase.recipe")
self.commit(callback=callbackS)
os.chdir(self.workDir)
shutil.rmtree('testcase')
def testMergeWithConflicts(self):
# CNY-1278
common = "line1\nline2\nline3\nline4\nline5\nline6\nline7\n"
orig = common + "ORIG BOTTOM\n"
newOnParent = "TOP\n" + common + "PARENT BOTTOM\n"
newOnShadow = common + "SHADOW BOTTOM\n"
for version in [ '/localhost@rpl:linux/1.0-1',
'/localhost@rpl:linux//shadow/1.0-1' ]:
self.addComponent('test:source', version,
fileContents =
[ ('test.source', simpleRecipe),
('other', rephelp.RegularFile(contents = orig,
version = '/localhost@rpl:linux/1.0-1',
config = True) ) ] )
self.addComponent('test:source', '/localhost@rpl:linux/1.0-2',
fileContents = [ ('test.source', simpleRecipe),
('other', newOnParent) ] )
self.addComponent('test:source',
'/localhost@rpl:linux//shadow/1.0-1.1',
fileContents = [ ('test.source', simpleRecipe),
('other', newOnShadow) ] )
os.chdir(self.workDir)
self.checkout("test", 'localhost@rpl:shadow')
os.chdir("test")
self.logCheck(self.merge, [],
'warning: conflicts from merging changes from head '
'into %s/test/other saved as %s/test/other.conflicts'
% (self.workDir, self.workDir))
self.logCheck(self.merge, [],
'error: outstanding merge must be committed before '
'merging again')
self.verifyFile(self.workDir + '/test/other',
"TOP\n" + common + "SHADOW BOTTOM\n")
def testCircularShadow(self):
# CNY-847
repos = self.openRepository()
branch1 = "/localhost@rpl:1"
branch2 = "/localhost@rpl:1//2"
branch3 = "/localhost@rpl:1//2//3"
shadow1 = "localhost@rpl:1"
shadow2 = "localhost@rpl:2"
shadow3 = "localhost@rpl:3"
ver1 = branch1 + '/1.0-1'
self.addComponent("foo:source", ver1)
self.addComponent("foo:data", ver1 + '-1')
self.addCollection('foo', ver1 + '-1', ['foo:data'])
trvspec = "foo=%s/1.0-1-1" % branch1
self.mkbranch([ trvspec ], shadow2, shadow=True)
# Try to shadow back to @rpl:1
self.assertRaises(errors.VersionStringError,
self.mkbranch, [ trvspec ], shadow1, shadow=True)
# Create a deeper shadow hierarchy
trvspec = "foo=%s/1.0-1-1" % branch2
self.mkbranch([ trvspec ], shadow3, shadow=True)
# Shadow in the middle of the hierarchy
# (from @rpl:1//2//3 to @rpl:1//2)
trvspec = "foo=%s/1.0-1-1" % branch3
self.assertRaises(errors.VersionStringError,
self.mkbranch, [ trvspec ], shadow2, shadow=True)
# Shadow to the top parent
# (from @rpl:1//2//3 to @rpl:1)
self.assertRaises(errors.VersionStringError,
self.mkbranch, [ trvspec ], shadow1, shadow=True)
def testCrossMerge(self):
self.addComponent('test:source', '/localhost@rpl:r1//a/1.0-1',
fileContents = [ ('test.recipe', simpleRecipe) ] )
self.addComponent('test:source', '/localhost@rpl:r2//a/2.0-1',
fileContents = [ ('test.recipe', simpleRecipe2) ] )
self.addComponent('test:source', '/localhost@rpl:r1//a//b/1.0-1',
fileContents = [ ('test.recipe', simpleRecipe) ] )
os.chdir(self.workDir)
self.checkout('test=localhost@rpl:b')
os.chdir('test')
self.logCheck2(
[ '+ Merging from /localhost@rpl:r1//a onto new shadow '
'/localhost@rpl:r2//a',
'+ patching %s/test/test.recipe' % self.workDir,
'+ patch: applying hunk 1 of 1' ], self.merge,
verbosity = log.INFO)
self.verifyFile('test.recipe', simpleRecipe2)
self.commit()
trvState = state.ConaryStateFromFile('CONARY')
assert(str(trvState.getSourceState().getVersion()) ==
'/localhost@rpl:r2//a//b/2.0-1.0.1')
self.addComponent('test:source', '/localhost@rpl:r1//a/3.0-1',
fileContents = [ ('test.recipe', simpleRecipe3) ] )
self.logCheck2(
[ '+ Merging from /localhost@rpl:r2//a onto /localhost@rpl:r1//a',
'+ patching %s/test/test.recipe' % self.workDir,
'+ patch: applying hunk 1 of 1' ], self.merge,
verbosity = log.INFO)
self.verifyFile('test.recipe', simpleRecipe3)
self.commit()
trvState = state.ConaryStateFromFile('CONARY')
assert(str(trvState.getSourceState().getVersion()) ==
'/localhost@rpl:r1//a//b/3.0-1.0.1')
def testPathIdLookupPermissions(self):
# CNY-1911
label2 = Label('localhost2@rpl:devel')
label1 = Label('localhost1@rpl:devel')
shadowLabel = Label('localhost@rpl:shadow')
self.openRepository(0)
self.openRepository(1)
# Remove anonymous user
repos = self.openRepository(2)
repos.deleteUserByName(label2.asString(), "anonymous")
# Add a file that disappears
recipe1 = simpleRecipe + " r.Create('/usr/blip', contents='abc\\n')\n"
self.makeSourceTrove('test', recipe1, buildLabel = label2)
built = self.cookFromRepository('test', buildLabel = label2)
# Extra file goes away
self.updateSourceTrove('test', simpleRecipe,
versionStr = label2.asString())
built = self.cookFromRepository('test', buildLabel = label2)
self.assertEqual(built[0][1], '/localhost2@rpl:devel/1.0-2-1')
# Now shadow
self.mkbranch(['test:source=' + label2.asString()], label1, shadow=True)
# Noop change
newRecipe = simpleRecipe.replace("mode=0755",
"contents='foobar\\n', mode=0755")
self.updateSourceTrove('test', newRecipe, versionStr = label1.asString())
# And build in the repo
built = self.cookFromRepository('test', buildLabel = label1)
self.assertEqual(built[0][1], '/localhost2@rpl:devel//localhost1@rpl:devel/1.0-2.1-1')
# Now shadow again
self.mkbranch(['test:source=' + label1.asString()], shadowLabel, shadow=True)
# Add the original file back
self.updateSourceTrove('test', recipe1,
versionStr = shadowLabel.asString())
# Reset users, client-side
self.cfg.user.addServerGlob('localhost2', ('test', 'wrongpass'))
client = conaryclient.ConaryClient(self.cfg)
repos = client.getRepos()
# And build in the repo
built = self.cookFromRepository('test', buildLabel = shadowLabel,
repos = repos)
self.assertEqual(built[0][1], '/localhost2@rpl:devel//localhost1@rpl:devel//localhost@rpl:shadow/1.0-2.1.1-1')
trvList = repos.getTroves([ (x[0], VFS(x[1]), x[2])
for x in built ])
trv = trvList[0]
# Iterate over all files
for _, _, _, vr in trv.iterFileList():
# Make sure the file version is the same as the trove version. If
# the originating repo didn't reject the request (and the client
# didn't ignore the reject), then we'd see the file version be on
# the /localhost2 branch.
self.assertEqual(vr.asString(), built[0][1])
def testPathIdLookupShortcut(self):
# CNY-1911
self.openRepository(1)
self.openRepository(2)
label2 = Label('localhost2@rpl:devel')
label1 = Label('localhost1@rpl:devel')
shadowLabel = Label('localhost@rpl:shadow')
self.makeSourceTrove('test', simpleRecipe, buildLabel = label2)
built = self.cookFromRepository('test', buildLabel = label2)
self.assertEqual(built[0][1], '/localhost2@rpl:devel/1.0-1-1')
# Now shadow
self.mkbranch(['test:source=' + label2.asString()], label1, shadow=True)
# Noop change
newRecipe = simpleRecipe.replace("mode=0755",
"contents='foobar\\n', mode=0755")
self.updateSourceTrove('test', newRecipe, versionStr = label1.asString())
# And build in the repo
built = self.cookFromRepository('test', buildLabel = label1)
self.assertEqual(built[0][1], '/localhost2@rpl:devel//localhost1@rpl:devel/1.0-1.1-1')
# Now shadow again
self.mkbranch(['test:source=' + label1.asString()], shadowLabel, shadow=True)
# And build in the repo
built = self.cookFromRepository('test', buildLabel = shadowLabel)
# Stop the ancestor repo
self.stopRepository(2)
# We should still be able to cook
built = self.cookFromRepository('test', buildLabel = shadowLabel)
simpleRecipe = '''
class SimpleRecipe(PackageRecipe):
name = 'test'
version = '1.0'
clearBuildReqs()
def setup(r):
r.Create('/usr/bin/foo', mode=0755)
'''
simpleRecipe2 = '''
class SimpleRecipe(PackageRecipe):
name = 'test'
version = '2.0'
clearBuildReqs()
def setup(r):
r.Create('/usr/bin/foo', mode=0755)
r.Create('/usr/bin/bar', mode=0755)
'''
simpleRecipe3 = '''
class SimpleRecipe(PackageRecipe):
name = 'test'
version = '3.0'
clearBuildReqs()
def setup(r):
r.Create('/usr/bin/foo', mode=0755)
r.Create('/usr/bin/bar', mode=0755)
r.Create('/usr/bin/baz', mode=0755)
'''
simpleRedirectRecipe = r"""\
class SimpleRedirectRecipe(RedirectRecipe):
name = 'testcase'
version = '0'
def setup(r):
r.addRedirect('testcase', '%s')
"""
|
|
"""
Python-specific functions for finding information about dependencies.
Classes
-------
Dependency - contains information about a Python module or package, and tries
to determine version information.
Functions
---------
find_version_by_attribute() - tries to find version information from the
attributes of a Python module.
find_version_from_egg() - determines whether a Python module is provided as
an egg, and if so, obtains version information
from this.
find_version_from_versioncontrol() - determines whether a Python module is
under version control, and if so, obtains
version information from this.
find_imported_packages() - finds all imported top-level packages for a given
Python file.
find_dependencies() - returns a list of Dependency objects representing
all the top-level modules or packages imported
(directly or indirectly) by a given Python file.
Module variables
----------------
heuristics - a list of functions that will be called in sequence by
find_version()
:copyright: Copyright 2006-2015 by the Sumatra team, see doc/authors.txt
:license: BSD 2-clause, see LICENSE for details.
"""
from __future__ import with_statement
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import os
import sys
from modulefinder import Module
import textwrap
import warnings
import inspect
import logging
from sumatra.dependency_finder import core
from ..core import get_encoding
logger = logging.getLogger("Sumatra")
SENTINEL = "<SUMATRA>"
def run_script(executable_path, script):
"""
Execute a script provided as a multi-line string using the given executable,
and evaluate the script stdout.
"""
# if sys.executable == executable_path, we can just eval it and save the
# process-creation overhead.
import textwrap
import subprocess
script = str(script) # get problems if script is is unicode
p = subprocess.Popen(executable_path, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
encoding = get_encoding()
output, err = p.communicate(textwrap.dedent(script).encode(encoding)) # should handle err
output = output.decode(encoding)
output = output[output.find(SENTINEL) + len(SENTINEL):]
try:
return_value = eval(output)
except SyntaxError as err:
warnings.warn("Error in evaluating script output\n. Executable: %s\nScript: %s\nOutput: '%s'\nError: %s" % (executable_path, script, output, err))
return_value = {}
return return_value
def find_version_by_attribute(module):
from types import ModuleType
version = 'unknown'
for attr_name in '__version__', 'version', 'get_version', 'VERSION', 'Version':
if hasattr(module, attr_name):
attr = getattr(module, attr_name)
if callable(attr):
try:
version = attr()
except TypeError:
continue
elif isinstance(attr, ModuleType):
version = find_version_by_attribute(attr)
else:
version = attr
break
if isinstance(version, tuple):
version = ".".join(str(c) for c in version)
elif version is None:
version = "unknown"
return version
find_versions_by_attribute_template = """
import sys
%(def_find_version_by_attribute)s
module_names = %(module_names)s
versions = []
for name in module_names:
try:
module = __import__(name)
except ImportError as err: # should do something with err
module = None
version = 'unknown'
if module:
version = find_version_by_attribute(module)
versions.append(version)
sys.stdout.write("%(sentinel)s" + str(versions))
"""
def find_versions_by_attribute(dependencies, executable):
"""Try to find version information from the attributes of a Python module."""
context = {
'module_names': [d.name for d in dependencies if d.version == 'unknown'],
'def_find_version_by_attribute': inspect.getsource(find_version_by_attribute),
'sentinel': SENTINEL,
}
script = find_versions_by_attribute_template % context
if executable.version[0] == '2':
script = script.replace(' as', ',') # Python 2.5 and earlier do not have the 'as' keyword
versions = run_script(executable.path, script)
i = 0
for d in dependencies:
if d.version == 'unknown':
d.version = versions[i]
if d.version != 'unknown':
d.source = "attribute" # would be nice to pass back the attribute name
i += 1
return dependencies
def find_versions_from_egg(dependencies):
"""Determine whether a Python module is provided as an egg, and if so,
obtain version information from this."""
for dependency in dependencies:
if dependency.version == 'unknown':
dir = os.path.dirname(dependency.path) # should check if dirname ends in ".egg" - may need parent directory
if os.path.isdir(dir):
if 'EGG-INFO' in os.listdir(dir):
with open(os.path.join(dir, 'EGG-INFO', 'PKG-INFO')) as f:
for line in f.readlines():
if line[:7] == 'Version':
dependency.version = line.split(' ')[1].strip()
dependency.source = 'egg-info'
break
return dependencies
# Other possible heuristics:
# * check for an egg-info file with a similar name to the module
# although this is not really safe, as there can be old egg-info files
# lying around.
# * could also look in the __init__.py for a Subversion $Id:$ tag
class Dependency(core.BaseDependency):
"""
Contains information about a Python module or package, and tries to
determine version information.
"""
module = 'python'
def __init__(self, module_name, path, version='unknown', diff='', source=None):
super(Dependency, self).__init__(module_name, path, version, diff, source)
@classmethod
def from_module(cls, module, executable_path):
"""Create from modulefinder.Module instance."""
path = os.path.realpath(module.__path__[0]) # resolve any symbolic links
if len(module.__path__) > 1:
raise Exception("This is not supposed to happen. Please tell the package developers about this.") # or I could figure out for myself when this could happen
return cls(module.__name__, module.__path__[0])
def find_imported_packages(filename, executable_path, debug=0, exclude_stdlib=True):
"""
Find all imported top-level packages for a given Python file.
We cannot assume that the version of Python being used to run Sumatra is the
same as that used to run the simulation/analysis. Therefore we need to run
all the dependency finding and version checking in a subprocess with the
correct version of Python.
"""
# Actually, we could check whether executable_path matches sys.executable, and
# then do it in this process. On the other hand, the dependency finding
# could run in parallel with the simulation (good for multicore): we could
# move setting of dependencies to after the simulation, rather than having it
# in record.register()
# HACK Added excludes=['jinja2.asyncsupport']
script = textwrap.dedent("""
from modulefinder import ModuleFinder
import sys, os
import distutils.sysconfig
stdlib_path = distutils.sysconfig.get_python_lib(standard_lib=True)
stdlib_paths = (stdlib_path,
os.path.join(stdlib_path, "plat-mac"),
os.path.join(stdlib_path, "plat-mac", "lib-scriptpackages"))
exclude_stdlib = %s
finder = ModuleFinder(path=sys.path, debug=%d, excludes=['jinja2.asyncsupport'])
try:
finder.run_script("%s")
except Exception as ex:
sys.stdout.write("Determining dependencies failed for some Python modules.")
top_level_packages = {}
for name, module in finder.modules.items():
if module.__path__ and "." not in name:
if not(exclude_stdlib and os.path.dirname(module.__path__[0]) in stdlib_paths):
top_level_packages[name] = module
sys.stdout.write("%s" + str(top_level_packages))""" % (exclude_stdlib, int(debug), filename, SENTINEL))
return run_script(executable_path, script)
def find_dependencies(filename, executable):
"""Return a list of Dependency objects representing all the top-level
modules or packages imported (directly or indirectly) by a given Python file."""
heuristics = [core.find_versions_from_versioncontrol,
lambda deps: find_versions_by_attribute(deps, executable),
find_versions_from_egg]
logger.debug("Finding imported packages")
packages = find_imported_packages(filename, executable.path, exclude_stdlib=True)
dependencies = [Dependency.from_module(module, executable.path) for module in packages.values()]
logger.debug("Finding versions of dependencies")
return core.find_versions(dependencies, heuristics)
if __name__ == "__main__":
import sys
from sumatra import programs
print("\n".join(str(d)
for d in find_dependencies(sys.argv[1],
programs.PythonExecutable(None),
on_changed='store-diff')))
|
|
import RPi.GPIO as GPIO, time, os
import random
GPIO.setmode(GPIO.BCM)
width = 26
height = 10
ledpixels = []
for i in range(0,width):
ledpixels.append([0]*height)
spidev = file("/dev/spidev0.0", "w")
characters = {}
with open('/home/pi/font.txt', 'r') as m_f:
m_lines = m_f.readlines()
for m_i in range(0,len(m_lines),10):
m_character = m_lines[m_i][0]
#characters are 8 rows of width
m_width = int(m_lines[m_i+1])
m_columns = []
for m_j in range(0,m_width):
m_columns.append([0]*10)
for m_j in range(0,8):
m_line = str(m_lines[m_i+m_j+2])[:-1] # drop newline
for ind,m_char in enumerate(m_line):
m_columns[ind][m_j+1] = 1 if m_char == "#" else 0
characters[m_character]=m_width,m_columns
def scrollText(pixels, characters,text, text_c, background_c, speed):
setFullColour(pixels,background_c)
text_matrix = []
padding_pixels = pixels
character_spacing = [0 for i in range(height)]
#assemble the matrix components of the text
for char in text:
w, columns = characters[char.upper()]
for i,c in enumerate(columns):
# ick - our matrix is indexed from the bottom counting up, but the characters are
#edited going down. reverse the row ordering.
text_matrix.append(c[::-1])
text_matrix.append(character_spacing)
for x,col in enumerate(text_matrix):
for y,row in enumerate(col):
text_matrix[x][y] = text_c if row==1 else background_c
text_matrix = pixels+text_matrix+pixels
for i in range(len(text_matrix)-len(pixels)+1):
writestrip(text_matrix[i:len(pixels)+i])
time.sleep(speed)
def writestrip(pixels):
for i in range(0,width,1):
start = 0
end = height
step = 1
if (i % 2 == 1):
start = height-1
end = -1
step = -1
for j in range(start,end,step):
spidev.write(chr((pixels[i][j]>>16) & 0xFF))
spidev.write(chr((pixels[i][j]>>8) & 0xFF))
spidev.write(chr(pixels[i][j] & 0xFF))
spidev.flush()
def Color(r,g,b):
return ((b & 0xFF) << 16) | ((r & 0xFF) << 8) | (g & 0xFF)
def setpixelcolor(pixels, x,y, r, g, b):
pixels[x][y] = Color(r,g,b)
def setpixelcolor(pixels, x,y, c):
pixels[x][y] = c
def colorwipe_vertical(pixels, c, delay,direction):
for i in range(width)[::direction]:
for j in range(height)[::direction]:
setpixelcolor(pixels, i,j, c)
writestrip(pixels)
time.sleep(delay)
def colorwipe_horiz(pixels, c, delay,direction):
for i in range(0,height)[::direction]:
for j in range(0,width)[::direction]:
setpixelcolor(pixels, j,i, c)
writestrip(pixels)
time.sleep(delay)
def simpleGol(pixels, initial_points, live_c, dead_c, step_time, iterations):
#not actually working yet.
return
setFullColour(pixels,dead_c)
for i in range(initial_points):
pixels[random.randint(0,width)][random.randint(0,height)] = live_c
for i in range(iterations):
new_pixels = pixels
# for x in range(width):
# for y in range(height):
#
def countdown_timer(pixels, c, time_s):
setFullColour(pixels,c)
for i in range (0,height):
for j in range(0,width):
setpixelcolor(pixels,j,i,Color(0,0,0))
writestrip(pixels)
time.sleep(time_s/(width*height))
def Wheel(WheelPos):
if (WheelPos < 85):
return Color(WheelPos * 3, 255 - WheelPos * 3, 0)
elif (WheelPos < 170):
WheelPos -= 85;
return Color(255 - WheelPos * 3, 0, WheelPos * 3)
else:
WheelPos -= 170;
return Color(0, WheelPos * 3, 255 - WheelPos * 3)
def rainbowBoard(pixels, wait):
for j in range(256): # one cycle of all 256 colors in the wheel
for i in range(width):
for k in range(height):
# tricky math! we use each pixel as a fraction of the full 96-color wheel
# (thats the i / strip.numPixels() part)
# Then add in j which makes the colors go around per pixel
# the % 96 is to make the wheel cycle around
setpixelcolor(pixels, i,k, Wheel( ((i * 256 / (width*height)) + j) % 256) )
writestrip(pixels)
time.sleep(wait)
def colourFlashMode(pixels,iterations, delay):
for i in range(0,iterations):
c = randomColour()
for i in range(width):
for j in range(height):
setpixelcolor(ledpixels, i,j, c)
writestrip(pixels)
time.sleep(delay)
def rainbowCycle(pixels, wait):
for j in range(256): # one cycle of all 256 colors in the wheel
for i in range(width):
for k in range(height):
# tricky math! we use each pixel as a fraction of the full 96-color wheel
# (thats the i / strip.numPixels() part)
# Then add in j which makes the colors go around per pixel
# the % 96 is to make the wheel cycle around
setpixelcolor(pixels, i,k, Wheel( (((i*width+k) * 256 / ((width*height)) + j)) % 256) )
writestrip(pixels)
time.sleep(wait)
def setFullColour(pixels, c):
for i in range(width):
for j in range(height):
setpixelcolor(pixels, i,j, c)
writestrip(pixels)
def randomColour():
#lots of magic here - this produces values that look "nice" to me.
c = [0,0,0]
c[0] = random.randrange(150,255,3)
c[1] = random.randrange(25,90,3)
c[2] = random.randrange(0,50,3)
random.shuffle(c)
return Color(c[0],c[1],c[2])
c = randomColour()
setFullColour(ledpixels,c)
bright_colours = [Color(255,0,0),Color(0,255,0),Color(0,0,255),Color(255,255,255),Color(255,255,0),Color(255,0,255),Color(0,255,255)]
a="""
while True:
text = raw_input("say string (or empty to start countdown):")
if len(text) > 0:
if text == "flash":
while True:
colourFlashMode(ledpixels,10,0.1)
else:
scrollText(ledpixels,characters, text, random.choice(bright_colours),Color(0,0,0),0.05)
else:
countdown_timer(ledpixels, random.choice(bright_colours),90.0)
setFullColour(ledpixels,Color(0,0,0))
"""
while True:
action = random.randint(0,7)
if action == 0:
colourFlashMode(ledpixels,random.randint(0,20),0.1)
elif action == 1:
wipe = random.choice([0,1])
if wipe == 0:
colorwipe_vertical(ledpixels,randomColour(), 0.0005,random.choice([-1,1]))
elif wipe == 1:
colorwipe_horiz(ledpixels,randomColour(),0.0005,random.choice([-1,1]))
elif action == 2:
rainbowCycle(ledpixels, 0.00)
elif action == 3:
rainbowBoard(ledpixels,0.0)
elif action >=4 and action < 7:
strings= ["VHS! VHS!", "Welcome to the Bunker","drink beer", "sorry, We Lied about the cake","hack the planet", "42", "feed donatio"]
scrollText(ledpixels,characters, random.choice(strings),randomColour(),Color(0,0,0),0.05)
elif action ==7:
oddityroll = random.randint(0,100)
oddstrings = ["fnord", "subliminal message"]
bright_colours = [Color(255,0,0),Color(0,255,0),Color(0,0,255),Color(255,255,255)]
if oddityroll > 95:
scrollText(ledpixels,characters,random.choice(oddstrings),random.choice(bright_colours),random.choice(bright_colours),0.001)
random.seed()
spidev.close()
|
|
"""
Templatetags for django-treebeard to add drag and drop capabilities to the
nodes change list - @jjdelc
"""
import datetime
from django.db import models
from django.contrib.admin.templatetags.admin_list import (
result_headers, result_hidden_fields)
from django.contrib.admin.utils import (
lookup_field, display_for_field, display_for_value)
from django.core.exceptions import ObjectDoesNotExist
from django.template import Library
from django.utils.encoding import force_str
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from django.templatetags.static import static
from treebeard.templatetags import needs_checkboxes
register = Library()
def get_result_and_row_class(cl, field_name, result):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
# WARNING: this will be deprecated in Django 2.0
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(getattr(f, 'remote_field'), models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField,
models.ForeignKey)):
row_classes.append('nowrap')
if force_str(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
return result_repr, row_class
def get_spacer(first, result):
if first:
spacer = '<span class="spacer"> </span>' * (
result.get_depth() - 1)
else:
spacer = ''
return spacer
def get_collapse(result):
if result.get_children_count():
collapse = ('<a href="#" title="" class="collapse expanded">'
'-</a>')
else:
collapse = '<span class="collapse"> </span>'
return collapse
def get_drag_handler(first):
drag_handler = ''
if first:
drag_handler = ('<td class="drag-handler">'
'<span> </span></td>')
return drag_handler
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
@jjdelc:
This has been shamelessly copied from original
django.contrib.admin.templatetags.admin_list.items_for_result
in order to alter the dispay for the first element
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
result_repr, row_class = get_result_and_row_class(cl, field_name,
result)
# If list_display_links not defined, add the link tag to the
# first field
if (first and not cl.list_display_links) or \
field_name in cl.list_display_links:
table_tag = {True: 'th', False: 'td'}[first]
# This spacer indents the nodes based on their depth
spacer = get_spacer(first, result)
# This shows a collapse or expand link for nodes with childs
collapse = get_collapse(result)
# Add a <td/> before the first col to show the drag handler
drag_handler = get_drag_handler(first)
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = "'%s'" % force_str(value)
onclickstr = (
' onclick="opener.dismissRelatedLookupPopup(window, %s);'
' return false;"')
yield mark_safe(
'%s<%s%s>%s %s <a href="%s"%s>%s</a></%s>' % (
drag_handler, table_tag, row_class, spacer, collapse, url,
(cl.is_popup and onclickstr % result_id or ''),
conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if
# we pull the fields out of the form instead of list_editable
# custom admins can provide fields on a per request basis
if (
form and
field_name in form.fields and
not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden
)
):
bf = form[field_name]
result_repr = mark_safe(force_str(bf.errors) + force_str(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_str(form[cl.model._meta.pk.name]))
def get_parent_id(node):
"""Return the node's parent id or 0 if node is a root node."""
if node.is_root():
return 0
return node.get_parent().pk
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield (res.pk, get_parent_id(res), res.get_depth(),
res.get_children_count(),
list(items_for_result(cl, res, form)))
else:
for res in cl.result_list:
yield (res.pk, get_parent_id(res), res.get_depth(),
res.get_children_count(),
list(items_for_result(cl, res, None)))
def check_empty_dict(GET_dict):
"""
Returns True if the GET querstring contains on values, but it can contain
empty keys.
This is better than doing not bool(request.GET) as an empty key will return
True
"""
empty = True
for k, v in GET_dict.items():
# Don't disable on p(age) or 'all' GET param
if v and k != 'p' and k != 'all':
empty = False
return empty
@register.inclusion_tag(
'admin/tree_change_list_results.html', takes_context=True)
def result_tree(context, cl, request):
"""
Added 'filtered' param, so the template's js knows whether the results have
been affected by a GET param or not. Only when the results are not filtered
you can drag and sort the tree
"""
# Here I'm adding an extra col on pos 2 for the drag handlers
headers = list(result_headers(cl))
headers.insert(1 if needs_checkboxes(context) else 0, {
'text': '+',
'sortable': True,
'url': request.path,
'tooltip': _('Return to ordered tree'),
'class_attrib': mark_safe(' class="oder-grabber"')
})
return {
'filtered': not check_empty_dict(request.GET),
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'results': list(results(cl)),
}
@register.simple_tag
def treebeard_css():
"""
Template tag to print out the proper <link/> tag to include a custom .css
"""
css_file = static('treebeard/treebeard-admin.css')
return format_html(
"""<link rel="stylesheet" type="text/css" href="{}"/>""",
mark_safe(css_file)
)
@register.simple_tag
def treebeard_js():
"""
Template tag to print out the proper <script/> tag to include a custom .js
"""
js_file = static('treebeard/treebeard-admin.js')
jquery_ui = static('treebeard/jquery-ui-1.8.5.custom.min.js')
# Jquery UI is needed to call disableSelection() on drag and drop so
# text selections arent marked while dragging a table row
# http://www.lokkju.com/blog/archives/143
TEMPLATE = (
'<script type="text/javascript" src="{}"></script>'
'<script type="text/javascript" src="{}"></script>'
'<script>'
'(function($){{jQuery = $.noConflict(true);}})(django.jQuery);'
'</script>'
'<script type="text/javascript" src="{}"></script>')
return format_html(
TEMPLATE, "jsi18n", mark_safe(js_file), mark_safe(jquery_ui))
|
|
from math import *
from numpy import *
from scipy import integrate
from scipy.interpolate import InterpolatedUnivariateSpline
fg = open("lrg", "r")
fd = open("random", "r")
sd = open("bao", "w")
# Values for the cosmic parameters
c = 3.0*10**5
Om = 0.3
H0 = 72.0
Ol = 0.7
rty = 2.0
# Formula for radial diameter distance
func = lambda z: 1.0/sqrt(Om*(1.0 + z)**3 +0.7)
listyy = []
listss = []
ss = 0.01
# Integrating the radial distance and forming the angular
diameter distance with a spline of the radial distance
while ss <= rty:
y, err = integrate.quad(func, 0.0, ss)
listss.append(ss)
listyy.append(y)
ss = ss + 0.01
Hz = InterpolatedUnivariateSpline(listss, listyy)
def angdist(zz):
value = c*(1.0 + zz)/H0 * Hz(zz)
return(value)
listxD = [] listyD = [] listzD = [] weightD = [] listxR = [] listyR = [] listzR = [] weightR = [] a=1
b=2
n=0
k=0
for line in fg:
red = float(line.split()[0])
angle = float(line.split()[1])
dec = float(line.split()[2])
weight = float(line.split()[3])
dist = angdist(red)
# Converting declination into polar angle
dd = (pi/2.0) - dec
# Converting into spherical coordinates
xx = dist*cos(angle)*sin(dd)
yy = dist*sin(angle)*sin(dd)
zz = dist*cos(dd)
listxD.append(xx)
listyD.append(yy)
listzD.append(zz)
weightD.append(weight)
n=n+1
# As above but for the random catalogue for line in fd:
red = float(line.split()[0])
angle = float(line.split()[1])
dec = float(line.split()[2])
weight = float(line.split()[3])
dist = angdist(red)
dd = (pi/2.0) - dec
xx = dist*cos(angle)*sin(dd) yy = dist*sin(angle)*sin(dd) zz = dist*cos(dd) listxR.append(xx) listyR.append(yy) listzR.append(zz) weightR.append(weight) k=k+1
fuzzy = 0
compare = 100000
bins = 201
# This is the size of the bins
size = 1.0
counter = 0
listD = bins*[0]
listR = bins*[0]
listDR = bins*[0]
# To reduce computation time, instead of checking if distances are less than 200 Mpc, instead check off against 200^2. This prevents the code from unnecessarily doing a square root for every calculation.
comp = 40000.0
m=0
while m < n:
x0 = listxD[m]
y0 = listyD[m]
z0 = listzD[m]
w = weightD[m]
# Setting the parameter space around the origin galaxy
u = x0 + 200.0
i = x0 - 200.0
o = y0 + 200.0
p = y0 - 200.0
r = z0 + 200.0
t = z0 - 200.0
listxt = []
listyt = []
listzt = []
wwt = []
oo = 0
for j in range(m + 1, n):
x1 = listxD[j]
y1 = listyD[j]
z1 = listzD[j]
ww1 = weightD[j]
#Checking to see which galaxies are within the volume demarcated around
the origin galaxy
if i < x1 < u and p < y1 < o and t < z1 < r:
listxt.append(x1)
listyt.append(y1)
listzt.append(z1)
wwt.append(ww1)
oo = oo + 1
for e in range(0, oo):
x2 = listxt[e]
y2 = listyt[e]
z2 = listzt[e]
ww2 = wwt[e]
# Calculating the distance from the x, y, z coordinates
dd = (x0 - x2)**2 + (y0 - y2)**2 + (z0 - z2)**2
if dd <= comp:
# Checking to see which bin the distance is to be assigned to
ds = int(sqrt(dd)/size)
io = ww2 * w
listD[ds] = listD[ds] + io
counter = counter + 1
fuzzy = fuzzy + 1
if fuzzy == compare:
print fuzzy
compare = compare + 100000 m=m+1
# As above but now for the DR correlation v=0
while v < n:
x0 = listxD[v]
y0 = listyD[v]
z0 = listzD[v]
w = weightD[v]
u = x0 + 200.0
i = x0 - 200.0
o = y0 + 200.0
p = y0 - 200.0
r = z0 + 200.0
t = z0 - 200.0
listxt = []
listyt = []
listzt = []
wwt = []
oo = 0
for j in range(0, k):
x1 = listxR[j]
y1 = listyR[j]
z1 = listzR[j]
ww1 = weightR[j]
if i < x1 < u and p < y1 < o and t < z1 < r:
listxt.append(x1)
listyt.append(y1)
listzt.append(z1)
wwt.append(ww1)
oo = oo + 1
for e in range(0, oo):
x2 = listxt[e]
y2 = listyt[e]
z2 = listzt[e]
ww2 = wwt[e]
dd = (x0 - x2)**2 + (y0 - y2)**2 + (z0 - z2)**2
if dd <= comp:
ds = int(sqrt(dd)/size)
io = ww2 * w
listDR[ds] = listDR[ds] + io
counter = counter + 1
fuzzy = fuzzy + 1
if fuzzy == compare:
print fuzzy
compare = compare + 100000 v=v+1
# As above for the RR correlation q=0
while q < k:
x0 = listxR[q]
y0 = listyR[q]
z0 = listzR[q]
w = weightR[q]
u = x0 + 200.0
i = x0 - 200.0
o = y0 + 200.0
p = y0 - 200.0
r = z0 + 200.0
t = z0 - 200.0
listxt = []
listyt = []
listzt = []
wwt = []
oo = 0
for j in range(q + 1, k):
x1 = listxR[j]
y1 = listyR[j]
z1 = listzR[j]
ww1 = weightR[j]
if i < x1 < u and p < y1 < o and t < z1 < r:
listxt.append(x1)
listyt.append(y1)
listzt.append(z1)
wwt.append(ww1)
oo = oo + 1
for e in range(0, oo):
x2 = listxt[e]
y2 = listyt[e]
z2 = listzt[e]
ww2 = wwt[e]
dd = (x0 - x2)**2 + (y0 - y2)**2 + (z0 - z2)**2
if dd <= comp:
ds = int(sqrt(dd)/size)
io = ww2 * w
listR[ds] = listR[ds] + io
counter = counter + 1
fuzzy = fuzzy + 1
if fuzzy == compare:
print fuzzy
compare = compare + 100000
q=q+1
# Writing the DD, DR and RR bins to file where they will then be used to calculate the 2-point correlation function
for l in range(0, bins):
xl = listD[l]
fr = listDR[l]
op = listR[l]
er = l * size
sd.write("%f %f %f %f\n" % (er, xl, fr, op))
print counter
fg.close()
fd.close()
sd.close()
|
|
#!/usr/bin/env python
import sys
import time
import random
import threading
import re
import heapq
import socket
import numpy
from UserString import MutableString
from datetime import timedelta
from datetime import datetime
from ReadShore import ReadShore
import ShoreParser as sp
import LaughStateEstimator as lse
from FileLogger import FileLogger
# size of the buffered values for each person
buffer_time = 3 # secs
class AudienceAnalyser:
''' Audience class '''
def __init__(self, expectedPeople, laughStartCallback, laughStopCallback):
# init FileLoger
self._logger = FileLogger('log_ShoreFrames')
# save the callbacks
self._laughStartCallback = laughStartCallback
self._laughStopCallback = laughStopCallback
# init the Audience
self.audience = Audience(expectedPeople)
# init ReadShore module
self._readShore = ReadShore(self._shoreDataReceived)
# initialise the basic random generator
random.seed()
# last known audience laugh state
self.laughStateLast = 'Not Laughing'
def _shoreDataReceived(self, data):
# read the data
self.read(data)
#print data
def start(self):
# start the readShore thread
self._readShore.start()
def stop(self):
pass
def read(self, shoreLine):
# convert shoreLine into dict
line = sp.parseLine(shoreLine)
if ('Frame' in line.keys() and
'Left' in line.keys() and
'Top' in line.keys() and
'Right' in line.keys() and
'Bottom' in line.keys()):
# pass the dict to the Audience object
self.audience.read(line)
# log frame
self._logger.log("Shore frame '%d'" % (line['Frame']))
# determine audience laugh state
if self.laughStateLast == 'Not Laughing':
if self.audience.laughProportion() > 0.333:
self.laughStateLast = 'Laughing'
self._laughStartCallback()
elif self.laughStateLast == 'Laughing':
if self.audience.laughProportion() < 0.111:
self.laughStateLast = 'Not Laughing'
self._laughStopCallback()
class Audience:
'''Audience class'''
def __init__(self, expectedPeople):
# init the list of Persons
self._people = []
# init the frames counter
self._frames = 0
self._lastFrame = None
# save expected people
self._expectedPeople = expectedPeople
# init refered people dict
self._refPeople = {}
def read(self, shoreDict):
if ('Frame' in shoreDict.keys() and
'Left' in shoreDict.keys() and
'Top' in shoreDict.keys() and
'Right' in shoreDict.keys() and
'Bottom' in shoreDict.keys()):
# check if it is a new frame
if shoreDict['Frame'] != self._lastFrame:
# set new frame to all People
self.newFrame(self._lastFrame)
# increase it and save it
self._frames += 1
self._lastFrame = shoreDict['Frame']
# add the person to the list
self._addPerson(shoreDict)
def newFrame(self, frame):
# iterate through people list
for person in self._people:
# end frame
person.newFrame(frame)
def _addPerson(self, shoreDict):
# get the frame
frame = Frame(shoreDict['Left'], shoreDict['Top'],
shoreDict['Right'], shoreDict['Bottom'])
# check if that person exists on the list
person = self._personExists(frame)
# if not
if person is None:
# create the object
person = Person()
# update it with current data
person.update(frame, shoreDict)
# add it to the list
self._people.append(person)
else:
# just update it
person.update(frame, shoreDict)
x, y = frame.center()
# Calibrate
#print "x:" + str(x) + " y:" + str(y)
def _personExists(self, frame):
''' check if that person exists in the list '''
# iterate through people list
for person in self._people:
# if a person exists
if (person.isCloseTo(frame)):
return person
return None
def getValidPeople(self):
''' Check the people array and only return the valid ones '''
# return the max N persons of the array
# N = self._expectedPeople
if self._people:
return heapq.nlargest(self._expectedPeople,
self._people, key=lambda x: x.identified)
else:
return None
def laughProportion(self):
laughStates = [x.laughState() for x in self._people]
laughCount = laughStates.count('Laughing')
totalCount = len(self.getValidPeople())
laughProp = float(laughCount)/totalCount
# print 'Laughing {}/{} {}'.format(laughCount, totalCount, laughProp)
return laughProp
def laughState(self):
if self.laughProportion() > 0.3333333333:
return 'Laughing'
else:
return 'Not Laughing'
# # we could try a more sophisticated audience laugh state evaluation than just comparing indiviual states
# audienceSmileValence = 0
# audienceBobValence = 0
#
# for person in self._people:
# plse = person.laughStateEstimator
#
# if None not in [plse.happinessCurrent, plse.happinessThreshold, plse.bobbingCurrent, plse.bobbingThreshold]:
# audienceSmileValence += plse.happinessCurrent - plse.happinessThreshold
# audienceBobValence += plse.bobbingCurrent - plse.bobbingThreshold
#
# if audienceSmileValence > 0:
#
# if audienceBobValence > 0:
# return 'Laughing'
# else:
# return 'Smiling'
# else:
# return 'Not Laughing'
def statistics(self):
# use MutableString for efficiency
statistics = MutableString()
# count of persons (all and valid)
validPeople = len(self.getValidPeople())
allPeople = len(self._people)
statistics += ("Valid People: " + str(validPeople) +
" of " + str(allPeople) + "\n")
# add statistics about each identified person
for person in self.getValidPeople():
# statistics
statistics += "Person_" + str(person.id) + ": "
statistics += str(person.identified) + "\n"
return statistics
def getRandomPerson(self):
# get all valid people
validPeople = self.getValidPeople()
# random
if validPeople:
return random.choice(validPeople)
else:
return None
def savePerson(self, person, reference_id):
# save the person's id with the reference_id
self._refPeople[reference_id] = person.id
def getRefPerson(self, reference_id):
# get the real id from the reference_id
personId = self._refPeople[reference_id]
# search for the previously saved person
# should be in all peopla and not only valid ones
return filter(lambda x: x.id == personId, self._people)[0]
def getHappiestPerson(self):
# get all valid people
validPeople = self.getValidPeople()
# get the person with the highest value in happiness
if validPeople:
# get the max happy value
maxValue = max(person.happy() for person in validPeople)
# get all people with that value
maxPeople = filter(lambda x: x.happy() == maxValue, validPeople)
# return a random one
if maxPeople:
return random.choice(maxPeople)
else:
return None
else:
return None
def getUnhappiestPerson(self):
# get all valid people
validPeople = self.getValidPeople()
# get the person with the lowest value in happiness
if validPeople:
# get the min happy value
minValue = min(person.happy() for person in validPeople)
# get all people with that value
minPeople = filter(lambda x: x.happy() == minValue, validPeople)
# return a random one
if minPeople:
return random.choice(minPeople)
else:
return None
else:
return None
def getYoungestPerson(self):
# get all valid people
validPeople = self.getValidPeople()
# get the person with the lowest value in happiness
if validPeople:
# get the min age value
minValue = min(person.age() for person in validPeople)
# get all people with that value
minPeople = filter(lambda x: x.age() == minValue, validPeople)
# return a random one
if minPeople:
return random.choice(minPeople)
else:
return None
else:
return None
def getHappiestPersonWithGender(self, gender):
# get all valid people
validPeople = self.getValidPeople()
# get the person with the highest value in happiness
if validPeople:
# get the max happy value
maxValue = max(person.happy() for person in validPeople
if person.gender() == gender)
# get all people with that value
maxPeople = filter(lambda x: x.happy() == maxValue
and x.gender() == gender,
validPeople)
# return a random one
if maxPeople:
return random.choice(maxPeople)
else:
return None
else:
return None
class Person:
'''Person class'''
_counter = 0
def __init__(self):
# set the id
self.id = Person._counter
Person._counter += 1
# Debug
#print "New Person: " + str(Person._counter)
# init the identified
self.identified = 0
# init list structures
self._systemtimestamp = []
self._timestamp = []
self._uptime = []
self._score = []
self._gender = []
self._surprised = []
self._sad = []
self._happy = []
self._angry = []
self._age = []
self._mouthOpen = []
self._leftEyeClosed = []
self._rightEyeClosed = []
self._laughState = []
# init response as None [None, 'Smiling', 'Laughing']
self.response = None
# init laughStateEstimator instance
self.laughStateEstimator = lse.LaughStateEstimator()
def update(self, frame, shoreDict):
# increase the identified var
self.identified += 1
# update the frame
self.frame = frame
# add system datetime.now()
self._systemtimestamp.append(datetime.now())
# add values to buffer lists
self._addToBuffer(shoreDict, 'TimeStamp', self._timestamp)
self._addToBuffer(shoreDict, 'Uptime', self._uptime)
self._addToBuffer(shoreDict, 'Score', self._score)
self._addToBuffer(shoreDict, 'Gender', self._gender)
self._addToBuffer(shoreDict, 'Surprised', self._surprised)
self._addToBuffer(shoreDict, 'Sad', self._sad)
self._addToBuffer(shoreDict, 'Happy', self._happy)
self._addToBuffer(shoreDict, 'Angry', self._angry)
self._addToBuffer(shoreDict, 'Age', self._age)
self._addToBuffer(shoreDict, 'MouthOpen', self._mouthOpen)
self._addToBuffer(shoreDict, 'LeftEyeClosed', self._leftEyeClosed)
self._addToBuffer(shoreDict, 'RightEyeClosed', self._rightEyeClosed)
self.laughStateEstimator.analyseWithShoreDict(shoreDict)
self._addToBuffer(shoreDict, 'LaughState', self._laughState)
def _addToBuffer(self, shoreDict, dictkey, bufferlist):
# check if the key exists
if dictkey in shoreDict.keys():
# add it the the appropriate list
bufferlist.append(shoreDict[dictkey])
else:
# add None
bufferlist.append(None)
def newFrame(self, frame):
# check if it should remove items from the buffer
self._checkTimeStamp()
def _checkTimeStamp(self):
# find the timestamp - buffer_time
comparetimestamp = datetime.now() - timedelta(seconds=buffer_time)
# find the indexes
indexes = [index for index, _timestamp in enumerate(self._timestamp)
if _timestamp < comparetimestamp]
# choose the max
if indexes:
position = max(indexes)
# remove elements prior to that position
self._removeBufferAtPosition(position)
def _removeBufferAtPosition(self, position):
del self._systemtimestamp[:position]
del self._timestamp[:position]
del self._uptime[:position]
del self._score[:position]
del self._gender[:position]
del self._surprised[:position]
del self._sad[:position]
del self._happy[:position]
del self._angry[:position]
del self._age[:position]
del self._mouthOpen[:position]
del self._leftEyeClosed[:position]
del self._rightEyeClosed[:position]
del self._laughState[:position]
def isCloseTo(self, frame):
midX, midY = self.frame.center()
midXframe, midYframe = frame.center()
return (abs(midX - midXframe) < 200 and
abs(midY - midYframe) < 200)
def uptime(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
return self._uptime[-1]
def score(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
return self._score[-1]
def gender(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
return self._gender[-1]
def surprised(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
return self._surprised[-1]
def sad(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
filtered = filter(lambda x: x is not None, self._sad)
return numpy.mean(filtered)
def happy(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
filtered = filter(lambda x: x is not None, self._happy)
return numpy.mean(filtered)
def angry(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
filtered = filter(lambda x: x is not None, self._angry)
return numpy.mean(filtered)
def age(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
filtered = filter(lambda x: x is not None, self._age)
return numpy.mean(filtered)
def mouthOpen(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
filtered = filter(lambda x: x is not None, self._mouthOpen)
return numpy.mean(filtered)
def leftEyeClosed(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
return self._leftEyeClosed[-1]
def rightEyeClosed(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
return self._rightEyeClosed[-1]
def laughState(self):
# check if it should remove items from the buffer
self._checkTimeStamp()
return self._laughState[-1]
class Frame:
'''Frame class'''
def __init__(self, left, top, right, bottom):
# save the properties
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def width(self):
return self.right - self.left
def height(self):
return self.bottom - self.top
def center(self):
# calculate middle point of X and Y
midX = self.left + self.width() / 2
midY = self.top + self.height() / 2
# Calibration
#print "(" + str(midX) + ", " + str(midY) + ")"
return midX, midY
''' main '''
if __name__ == '__main__':
# define the callback functions
def laughStart():
print datetime.now().strftime("%M:%S") + " laughStart called"
def laughStop():
print datetime.now().strftime("%M:%S") + " laughStop called"
# init the Audience Analyser with X people audience
analyser = AudienceAnalyser(11, laughStart, laughStop)
# start the analyser and wait for a connection
analyser.start()
|
|
import copy
import tempfile
import xmlsec
from tests import base
consts = xmlsec.constants
class TestKeys(base.TestMemoryLeaks):
def test_key_from_memory(self):
key = xmlsec.Key.from_memory(self.load("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
def test_key_from_memory_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.from_memory(1, format="")
def test_key_from_memory_invalid_data(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load key.*'):
xmlsec.Key.from_memory(b'foo', format=consts.KeyDataFormatPem)
def test_key_from_file(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
def test_key_from_file_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.from_file(1, format="")
def test_key_from_invalid_file(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot read key.*'):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(b'foo')
xmlsec.Key.from_file(tmpfile.name, format=consts.KeyDataFormatPem)
def test_key_from_fileobj(self):
with open(self.path("rsakey.pem"), "rb") as fobj:
key = xmlsec.Key.from_file(fobj, format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
def test_key_from_invalid_fileobj(self):
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'foo')
with self.assertRaisesRegex(xmlsec.Error, '.*cannot read key.*'), open(tmpfile.name) as fp:
xmlsec.Key.from_file(fp, format=consts.KeyDataFormatPem)
def test_generate(self):
key = xmlsec.Key.generate(klass=consts.KeyDataAes, size=256, type=consts.KeyDataTypeSession)
self.assertIsNotNone(key)
def test_generate_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.generate(klass="", size="", type="")
def test_generate_invalid_size(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot generate key.*'):
xmlsec.Key.generate(klass=consts.KeyDataAes, size=0, type=consts.KeyDataTypeSession)
def test_from_binary_file(self):
key = xmlsec.Key.from_binary_file(klass=consts.KeyDataDes, filename=self.path("deskey.bin"))
self.assertIsNotNone(key)
def test_from_binary_file_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.from_binary_file(klass="", filename=1)
def test_from_invalid_binary_file(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot read key.*'):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(b'foo')
xmlsec.Key.from_binary_file(klass=consts.KeyDataDes, filename=tmpfile.name)
def test_from_binary_data(self):
key = xmlsec.Key.from_binary_data(klass=consts.KeyDataDes, data=self.load("deskey.bin"))
self.assertIsNotNone(key)
def test_from_binary_data_with_bad_args(self):
with self.assertRaises(TypeError):
xmlsec.Key.from_binary_data(klass="", data=1)
def test_from_invalid_binary_data(self):
with self.assertRaisesRegex(xmlsec.Error, '.*cannot read key.*'):
xmlsec.Key.from_binary_data(klass=consts.KeyDataDes, data=b'')
def test_load_cert_from_file(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
key.load_cert_from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatPem)
def test_load_cert_from_file_with_bad_args(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaises(TypeError):
key.load_cert_from_file(1, format="")
def test_load_cert_from_invalid_file(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(b'foo')
key.load_cert_from_file(tmpfile.name, format=consts.KeyDataFormatPem)
def test_load_cert_from_fileobj(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with open(self.path("rsacert.pem"), "rb") as fobj:
key.load_cert_from_file(fobj, format=consts.KeyDataFormatPem)
def test_load_cert_from_fileobj_with_bad_args(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaises(TypeError), open(self.path("rsacert.pem"), "rb") as fobj:
key.load_cert_from_file(fobj, format='')
def test_load_cert_from_invalid_fileobj(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
tmpfile.write(b'foo')
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'), open(tmpfile.name) as fp:
key.load_cert_from_file(fp, format=consts.KeyDataFormatPem)
def test_load_cert_from_memory(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
key.load_cert_from_memory(self.load("rsacert.pem"), format=consts.KeyDataFormatPem)
def test_load_cert_from_memory_with_bad_args(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaises(TypeError):
key.load_cert_from_memory(1, format="")
def test_load_cert_from_memory_invalid_data(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNotNone(key)
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'):
key.load_cert_from_memory(b'', format=consts.KeyDataFormatPem)
def test_get_name(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
self.assertIsNone(key.name)
def test_get_name_invalid_key(self):
key = xmlsec.Key()
with self.assertRaisesRegex(ValueError, 'key is not ready'):
key.name
def test_del_name(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
key.name = "rsakey"
del key.name
self.assertIsNone(key.name)
def test_set_name(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
key.name = "rsakey"
self.assertEqual("rsakey", key.name)
def test_set_name_invalid_key(self):
key = xmlsec.Key()
with self.assertRaisesRegex(ValueError, 'key is not ready'):
key.name = 'foo'
def test_copy(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
key2 = copy.copy(key)
del key
key2.load_cert_from_file(self.path("rsacert.pem"), format=consts.KeyDataFormatPem)
class TestKeysManager(base.TestMemoryLeaks):
def test_add_key(self):
key = xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem)
mngr = xmlsec.KeysManager()
mngr.add_key(key)
def test_add_key_with_bad_args(self):
mngr = xmlsec.KeysManager()
with self.assertRaises(TypeError):
mngr.add_key("")
def test_load_cert(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
mngr.load_cert(self.path("rsacert.pem"), format=consts.KeyDataFormatPem, type=consts.KeyDataTypeTrusted)
def test_load_cert_with_bad_args(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(b'foo')
mngr.load_cert(tmpfile.name, format=consts.KeyDataFormatPem, type=consts.KeyDataTypeTrusted)
def test_load_invalid_cert(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
with self.assertRaises(TypeError):
mngr.load_cert(1, format="", type="")
def test_load_cert_from_memory(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
mngr.load_cert_from_memory(self.load("rsacert.pem"), format=consts.KeyDataFormatPem, type=consts.KeyDataTypeTrusted)
def test_load_cert_from_memory_with_bad_args(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
with self.assertRaises(TypeError):
mngr.load_cert_from_memory(1, format="", type="")
def test_load_cert_from_memory_invalid_data(self):
mngr = xmlsec.KeysManager()
mngr.add_key(xmlsec.Key.from_file(self.path("rsakey.pem"), format=consts.KeyDataFormatPem))
with self.assertRaisesRegex(xmlsec.Error, '.*cannot load cert.*'):
mngr.load_cert_from_memory(b'', format=consts.KeyDataFormatPem, type=consts.KeyDataTypeTrusted)
def test_load_invalid_key(self):
mngr = xmlsec.KeysManager()
with self.assertRaises(ValueError):
mngr.add_key(xmlsec.Key())
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for the Hyper-V driver and related APIs.
"""
import io
import mox
import os
import platform
import shutil
import time
import uuid
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import db
from nova.image import glance
from nova import test
from nova.tests import fake_network
from nova.tests.hyperv import db_fakes
from nova.tests.hyperv import fake
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
from nova.virt import images
CONF = cfg.CONF
CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
class HyperVAPITestCase(test.TestCase):
"""Unit tests for Hyper-V driver calls."""
def __init__(self, test_case_name):
self._mox = mox.Mox()
super(HyperVAPITestCase, self).__init__(test_case_name)
def setUp(self):
super(HyperVAPITestCase, self).setUp()
self._user_id = 'fake'
self._project_id = 'fake'
self._instance_data = None
self._image_metadata = None
self._fetched_image = None
self._update_image_raise_exception = False
self._volume_target_portal = 'testtargetportal:3260'
self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
self._context = context.RequestContext(self._user_id, self._project_id)
self._instance_ide_disks = []
self._instance_ide_dvds = []
self._instance_volume_disks = []
self._test_vm_name = None
self._test_instance_dir = 'C:\\FakeInstancesPath\\instance-0000001'
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
network_api_class='nova.network.quantumv2.api.API')
self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
fake_image.stub_out_image_service(self.stubs)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def fake_fetch(context, image_id, target, user, project):
self._fetched_image = target
self.stubs.Set(images, 'fetch', fake_fetch)
def fake_get_remote_image_service(context, name):
class FakeGlanceImageService(object):
def update(self_fake, context, image_id, image_metadata, f):
if self._update_image_raise_exception:
raise vmutils.HyperVException(
"Simulated update failure")
self._image_metadata = image_metadata
return (FakeGlanceImageService(), 1)
self.stubs.Set(glance, 'get_remote_image_service',
fake_get_remote_image_service)
def fake_sleep(ms):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
def fake_vmutils__init__(self, host='.'):
pass
vmutils.VMUtils.__init__ = fake_vmutils__init__
def fake_get_volume_utils(self):
return volumeutils.VolumeUtils()
volumeops.VolumeOps._get_volume_utils = fake_get_volume_utils
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
self._mox.StubOutWithMock(fake.PathUtils, 'copy')
self._mox.StubOutWithMock(fake.PathUtils, 'remove')
self._mox.StubOutWithMock(fake.PathUtils, 'rename')
self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
self._mox.StubOutWithMock(fake.PathUtils,
'get_instance_migr_revert_dir')
self._mox.StubOutWithMock(fake.PathUtils, 'get_instance_dir')
self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks_count')
self._mox.StubOutWithMock(vmutils.VMUtils,
'attach_volume_to_controller')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_mounted_disk_by_drive_number')
self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_controller_volume_paths')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'validate_vhd')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
self._mox.StubOutWithMock(hostutils.HostUtils,
'is_cpu_feature_present')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'get_external_vswitch')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'create_vswitch_port')
self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
'live_migrate_vm')
self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
'check_live_migration_config')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'volume_in_mapping')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_session_id_from_mounted_disk')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_device_number_for_target')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_target_from_disk_path')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'login_storage_target')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'logout_storage_target')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'execute_log_out')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'login_storage_target')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'logout_storage_target')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'execute_log_out')
self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'metadata_for_config_drive')
# Can't use StubOutClassWithMocks due to __exit__ and __enter__
self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
self._mox.StubOutWithMock(utils, 'execute')
def tearDown(self):
self._mox.UnsetStubs()
super(HyperVAPITestCase, self).tearDown()
def test_get_available_resource(self):
cpu_info = {'Architecture': 'fake',
'Name': 'fake',
'Manufacturer': 'ACME, Inc.',
'NumberOfCores': 2,
'NumberOfLogicalProcessors': 4}
tot_mem_kb = 2000000L
free_mem_kb = 1000000L
tot_hdd_b = 4L * 1024 ** 3
free_hdd_b = 3L * 1024 ** 3
windows_version = '6.2.9200'
hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
free_mem_kb))
m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
m.AndReturn((tot_hdd_b, free_hdd_b))
hostutils.HostUtils.get_cpus_info().AndReturn([cpu_info])
m = hostutils.HostUtils.is_cpu_feature_present(mox.IsA(int))
m.MultipleTimes()
m = hostutils.HostUtils.get_windows_version()
m.AndReturn(windows_version)
self._mox.ReplayAll()
dic = self._conn.get_available_resource(None)
self._mox.VerifyAll()
self.assertEquals(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
self.assertEquals(dic['hypervisor_hostname'], platform.node())
self.assertEquals(dic['memory_mb'], tot_mem_kb / 1024)
self.assertEquals(dic['memory_mb_used'],
tot_mem_kb / 1024 - free_mem_kb / 1024)
self.assertEquals(dic['local_gb'], tot_hdd_b / 1024 ** 3)
self.assertEquals(dic['local_gb_used'],
tot_hdd_b / 1024 ** 3 - free_hdd_b / 1024 ** 3)
self.assertEquals(dic['hypervisor_version'],
windows_version.replace('.', ''))
def test_get_host_stats(self):
tot_mem_kb = 2000000L
free_mem_kb = 1000000L
tot_hdd_b = 4L * 1024 ** 3
free_hdd_b = 3L * 1024 ** 3
hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
free_mem_kb))
m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
m.AndReturn((tot_hdd_b, free_hdd_b))
self._mox.ReplayAll()
dic = self._conn.get_host_stats(True)
self._mox.VerifyAll()
self.assertEquals(dic['disk_total'], tot_hdd_b / 1024 ** 3)
self.assertEquals(dic['disk_available'], free_hdd_b / 1024 ** 3)
self.assertEquals(dic['host_memory_total'], tot_mem_kb / 1024)
self.assertEquals(dic['host_memory_free'], free_mem_kb / 1024)
self.assertEquals(dic['disk_total'],
dic['disk_used'] + dic['disk_available'])
self.assertEquals(dic['host_memory_total'],
dic['host_memory_overhead'] +
dic['host_memory_free'])
def test_list_instances(self):
fake_instances = ['fake1', 'fake2']
vmutils.VMUtils.list_instances().AndReturn(fake_instances)
self._mox.ReplayAll()
instances = self._conn.list_instances()
self._mox.VerifyAll()
self.assertEquals(instances, fake_instances)
def test_get_info(self):
self._instance_data = self._get_instance_data()
summary_info = {'NumberOfProcessors': 2,
'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
'MemoryUsage': 1000,
'UpTime': 1}
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
func = mox.Func(self._check_instance_name)
m = vmutils.VMUtils.get_vm_summary_info(func)
m.AndReturn(summary_info)
self._mox.ReplayAll()
info = self._conn.get_info(self._instance_data)
self._mox.VerifyAll()
self.assertEquals(info["state"], power_state.RUNNING)
def test_spawn_cow_image(self):
self._test_spawn_instance(True)
def test_spawn_no_cow_image(self):
self._test_spawn_instance(False)
def _setup_spawn_config_drive_mocks(self, use_cdrom):
im = instance_metadata.InstanceMetadata(mox.IgnoreArg(),
content=mox.IsA(list),
extra_md=mox.IsA(dict))
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
cdb = self._mox.CreateMockAnything()
m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
m.AndReturn(cdb)
# __enter__ and __exit__ are required by "with"
cdb.__enter__().AndReturn(cdb)
cdb.make_drive(mox.IsA(str))
cdb.__exit__(None, None, None).AndReturn(None)
if not use_cdrom:
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
mox.IsA(str),
mox.IsA(str),
attempts=1)
fake.PathUtils.remove(mox.IsA(str))
m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk)
def _test_spawn_config_drive(self, use_cdrom):
self.flags(force_config_drive=True)
self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
self.flags(mkisofs_cmd='mkisofs.exe')
if use_cdrom:
expected_ide_disks = 1
expected_ide_dvds = 1
else:
expected_ide_disks = 2
expected_ide_dvds = 0
self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
expected_ide_dvds=expected_ide_dvds,
config_drive=True,
use_cdrom=use_cdrom)
def test_spawn_config_drive(self):
self._test_spawn_config_drive(False)
def test_spawn_config_drive_cdrom(self):
self._test_spawn_config_drive(True)
def test_spawn_no_config_drive(self):
self.flags(force_config_drive=False)
expected_ide_disks = 1
expected_ide_dvds = 0
self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
expected_ide_dvds=expected_ide_dvds)
def test_spawn_nova_net_vif(self):
self.flags(network_api_class='nova.network.api.API')
# Reinstantiate driver, as the VIF plugin is loaded during __init__
self._conn = driver_hyperv.HyperVDriver(None)
def setup_vif_mocks():
fake_vswitch_path = 'fake vswitch path'
fake_vswitch_port = 'fake port'
m = networkutils.NetworkUtils.get_external_vswitch(
CONF.hyperv.vswitch_name)
m.AndReturn(fake_vswitch_path)
m = networkutils.NetworkUtils.create_vswitch_port(
fake_vswitch_path, mox.IsA(str))
m.AndReturn(fake_vswitch_port)
vmutils.VMUtils.set_nic_connection(mox.IsA(str), mox.IsA(str),
fake_vswitch_port)
self._test_spawn_instance(setup_vif_mocks_func=setup_vif_mocks)
def test_spawn_nova_net_vif_no_vswitch_exception(self):
self.flags(network_api_class='nova.network.api.API')
# Reinstantiate driver, as the VIF plugin is loaded during __init__
self._conn = driver_hyperv.HyperVDriver(None)
def setup_vif_mocks():
m = networkutils.NetworkUtils.get_external_vswitch(
CONF.hyperv.vswitch_name)
m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
setup_vif_mocks_func=setup_vif_mocks,
with_exception=True)
def _check_instance_name(self, vm_name):
return vm_name == self._instance_data['name']
def _test_vm_state_change(self, action, from_state, to_state):
self._instance_data = self._get_instance_data()
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
to_state)
self._mox.ReplayAll()
action(self._instance_data)
self._mox.VerifyAll()
def test_pause(self):
self._test_vm_state_change(self._conn.pause, None,
constants.HYPERV_VM_STATE_PAUSED)
def test_pause_already_paused(self):
self._test_vm_state_change(self._conn.pause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_PAUSED)
def test_unpause(self):
self._test_vm_state_change(self._conn.unpause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_ENABLED)
def test_unpause_already_running(self):
self._test_vm_state_change(self._conn.unpause, None,
constants.HYPERV_VM_STATE_ENABLED)
def test_suspend(self):
self._test_vm_state_change(self._conn.suspend, None,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_suspend_already_suspended(self):
self._test_vm_state_change(self._conn.suspend,
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_resume(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None),
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_ENABLED)
def test_resume_already_running(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None), None,
constants.HYPERV_VM_STATE_ENABLED)
def test_power_off(self):
self._test_vm_state_change(self._conn.power_off, None,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_already_powered_off(self):
self._test_vm_state_change(self._conn.power_off,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_on(self):
self._test_vm_state_change(self._conn.power_on,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_ENABLED)
def test_power_on_already_running(self):
self._test_vm_state_change(self._conn.power_on, None,
constants.HYPERV_VM_STATE_ENABLED)
def test_reboot(self):
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
self._instance_data = self._get_instance_data()
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_REBOOT)
self._mox.ReplayAll()
self._conn.reboot(self._context, self._instance_data, network_info,
None)
self._mox.VerifyAll()
def _setup_destroy_mocks(self, destroy_disks=True):
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
func = mox.Func(self._check_instance_name)
vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
m = vmutils.VMUtils.get_vm_storage_paths(func)
m.AndReturn(([], []))
vmutils.VMUtils.destroy_vm(func)
if destroy_disks:
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
def test_destroy(self):
self._instance_data = self._get_instance_data()
self._setup_destroy_mocks()
self._mox.ReplayAll()
self._conn.destroy(self._instance_data, None)
self._mox.VerifyAll()
def test_live_migration_without_volumes(self):
self._test_live_migration()
def test_live_migration_with_volumes(self):
self._test_live_migration(with_volumes=True)
def test_live_migration_with_target_failure(self):
self._test_live_migration(test_failure=True)
def _test_live_migration(self, test_failure=False,
with_volumes=False):
dest_server = 'fake_server'
instance_data = self._get_instance_data()
instance_name = instance_data['name']
fake_post_method = self._mox.CreateMockAnything()
if not test_failure:
fake_post_method(self._context, instance_data, dest_server,
False)
fake_recover_method = self._mox.CreateMockAnything()
if test_failure:
fake_recover_method(self._context, instance_data, dest_server,
False)
fake_ide_controller_path = 'fakeide'
fake_scsi_controller_path = 'fakescsi'
if with_volumes:
fake_scsi_disk_path = 'fake_scsi_disk_path'
fake_target_iqn = 'fake_target_iqn'
fake_target_lun = 1
fake_scsi_paths = {0: fake_scsi_disk_path}
else:
fake_scsi_paths = {}
m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
instance_data['name'], dest_server)
if test_failure:
m.AndRaise(vmutils.HyperVException('Simulated failure'))
if with_volumes:
m.AndReturn([(fake_target_iqn, fake_target_lun)])
volumeutils.VolumeUtils.logout_storage_target(fake_target_iqn)
else:
m.AndReturn([])
self._mox.ReplayAll()
try:
self._conn.live_migration(self._context, instance_data,
dest_server, fake_post_method,
fake_recover_method)
exception_raised = False
except vmutils.HyperVException:
exception_raised = True
self.assertTrue(not test_failure ^ exception_raised)
self._mox.VerifyAll()
def test_pre_live_migration_cow_image(self):
self._test_pre_live_migration(True, False)
def test_pre_live_migration_no_cow_image(self):
self._test_pre_live_migration(False, False)
def test_pre_live_migration_with_volumes(self):
self._test_pre_live_migration(False, True)
def _test_pre_live_migration(self, cow, with_volumes):
self.flags(use_cow_images=cow)
instance_data = self._get_instance_data()
instance = db.instance_create(self._context, instance_data)
instance['system_metadata'] = {}
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
m = livemigrationutils.LiveMigrationUtils.check_live_migration_config()
m.AndReturn(True)
if cow:
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
None)
m.AndReturn(False)
m = vhdutils.VHDUtils.get_vhd_info(mox.Func(self._check_img_path))
m.AndReturn({'MaxInternalSize': 1024})
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
if with_volumes:
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
else:
block_device_info = None
self._mox.ReplayAll()
self._conn.pre_live_migration(self._context, instance,
block_device_info, network_info)
self._mox.VerifyAll()
if cow:
self.assertTrue(self._fetched_image is not None)
else:
self.assertTrue(self._fetched_image is None)
def test_snapshot_with_update_failure(self):
(snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
self._update_image_raise_exception = True
self._mox.ReplayAll()
self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
self._context, self._instance_data, snapshot_name,
func_call_matcher.call)
self._mox.VerifyAll()
# Assert states changed in correct order
self.assertIsNone(func_call_matcher.match())
def _setup_snapshot_mocks(self):
expected_calls = [
{'args': (),
'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs': {'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}
]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
fake_hv_snapshot_path = 'fake_snapshot_path'
fake_parent_vhd_path = 'C:\\fake_vhd_path\\parent.vhd'
self._instance_data = self._get_instance_data()
func = mox.Func(self._check_instance_name)
m = vmutils.VMUtils.take_vm_snapshot(func)
m.AndReturn(fake_hv_snapshot_path)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
m = vhdutils.VHDUtils.get_vhd_parent_path(mox.IsA(str))
m.AndReturn(fake_parent_vhd_path)
self._fake_dest_disk_path = None
def copy_dest_disk_path(src, dest):
self._fake_dest_disk_path = dest
m = fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
m.WithSideEffects(copy_dest_disk_path)
self._fake_dest_base_disk_path = None
def copy_dest_base_disk_path(src, dest):
self._fake_dest_base_disk_path = dest
m = fake.PathUtils.copyfile(fake_parent_vhd_path, mox.IsA(str))
m.WithSideEffects(copy_dest_base_disk_path)
def check_dest_disk_path(path):
return path == self._fake_dest_disk_path
def check_dest_base_disk_path(path):
return path == self._fake_dest_base_disk_path
func1 = mox.Func(check_dest_disk_path)
func2 = mox.Func(check_dest_base_disk_path)
# Make sure that the hyper-v base and differential VHDs are merged
vhdutils.VHDUtils.reconnect_parent_vhd(func1, func2)
vhdutils.VHDUtils.merge_vhd(func1, func2)
def check_snapshot_path(snapshot_path):
return snapshot_path == fake_hv_snapshot_path
# Make sure that the Hyper-V snapshot is removed
func = mox.Func(check_snapshot_path)
vmutils.VMUtils.remove_vm_snapshot(func)
fake.PathUtils.rmtree(mox.IsA(str))
m = fake.PathUtils.open(func2, 'rb')
m.AndReturn(io.BytesIO(b'fake content'))
return (snapshot_name, func_call_matcher)
def test_snapshot(self):
(snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
self._mox.ReplayAll()
self._conn.snapshot(self._context, self._instance_data, snapshot_name,
func_call_matcher.call)
self._mox.VerifyAll()
self.assertTrue(self._image_metadata and
"disk_format" in self._image_metadata and
self._image_metadata["disk_format"] == "vhd")
# Assert states changed in correct order
self.assertIsNone(func_call_matcher.match())
def _get_instance_data(self):
instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
return db_fakes.get_fake_instance_data(instance_name,
self._project_id,
self._user_id)
def _spawn_instance(self, cow, block_device_info=None):
self.flags(use_cow_images=cow)
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
self._conn.spawn(self._context, instance, image,
injected_files=[], admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
def _add_ide_disk(self, vm_name, path, ctrller_addr,
drive_addr, drive_type):
if drive_type == constants.IDE_DISK:
self._instance_ide_disks.append(path)
elif drive_type == constants.IDE_DVD:
self._instance_ide_dvds.append(path)
def _add_volume_disk(self, vm_name, controller_path, address,
mounted_disk_path):
self._instance_volume_disks.append(mounted_disk_path)
def _check_img_path(self, image_path):
return image_path == self._fetched_image
def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
boot_from_volume=False,
block_device_info=None):
vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
mox.IsA(int), mox.IsA(bool))
if not boot_from_volume:
m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk).InAnyOrder()
func = mox.Func(self._check_vm_name)
m = vmutils.VMUtils.create_scsi_controller(func)
m.InAnyOrder()
if boot_from_volume:
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
target_lun, target_portal, True)
vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name), mox.IsA(str),
mox.IsA(str)).InAnyOrder()
if setup_vif_mocks_func:
setup_vif_mocks_func()
def _set_vm_name(self, vm_name):
self._test_vm_name = vm_name
def _check_vm_name(self, vm_name):
return vm_name == self._test_vm_name
def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
with_exception=False,
block_device_info=None,
boot_from_volume=False,
config_drive=False,
use_cdrom=False):
m = vmutils.VMUtils.vm_exists(mox.IsA(str))
m.WithSideEffects(self._set_vm_name).AndReturn(False)
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
mox.IsA(str), block_device_info)
m.AndReturn(boot_from_volume)
if not boot_from_volume:
m = vhdutils.VHDUtils.get_vhd_info(mox.Func(self._check_img_path))
m.AndReturn({'MaxInternalSize': 1024})
if cow:
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
mox.IsA(str))
else:
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
self._setup_create_instance_mocks(setup_vif_mocks_func,
boot_from_volume,
block_device_info)
if config_drive:
self._setup_spawn_config_drive_mocks(use_cdrom)
# TODO(alexpilotti) Based on where the exception is thrown
# some of the above mock calls need to be skipped
if with_exception:
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_vm_name))
m.AndReturn(True)
vmutils.VMUtils.destroy_vm(mox.Func(self._check_vm_name))
else:
vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
constants.HYPERV_VM_STATE_ENABLED)
def _test_spawn_instance(self, cow=True,
expected_ide_disks=1,
expected_ide_dvds=0,
setup_vif_mocks_func=None,
with_exception=False,
config_drive=False,
use_cdrom=False):
self._setup_spawn_instance_mocks(cow,
setup_vif_mocks_func,
with_exception,
config_drive=config_drive,
use_cdrom=use_cdrom)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
self._mox.ReplayAll()
self._spawn_instance(cow)
self._mox.VerifyAll()
self.assertEquals(len(self._instance_ide_disks), expected_ide_disks)
self.assertEquals(len(self._instance_ide_dvds), expected_ide_dvds)
vhd_path = os.path.join(self._test_instance_dir, 'root.vhd')
self.assertEquals(vhd_path, self._instance_ide_disks[0])
def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
fake_mounted_disk,
fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
fake_device_number)
m.AndReturn(fake_mounted_disk)
def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
fake_mounted_disk, fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
volumeutils.VolumeUtils.login_storage_target(target_lun,
target_iqn,
target_portal)
self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
target_portal=None, boot_from_volume=False):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_controller_path = 'fake_scsi_controller_path'
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
if boot_from_volume:
m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
m.AndReturn(fake_controller_path)
fake_free_slot = 0
else:
m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
m.AndReturn(fake_controller_path)
fake_free_slot = 1
m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path)
m.AndReturn(fake_free_slot)
m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
fake_controller_path,
fake_free_slot,
fake_mounted_disk)
m.WithSideEffects(self._add_volume_disk)
def test_attach_volume(self):
instance_data = self._get_instance_data()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
mount_point = '/dev/sdc'
self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
target_portal)
self._mox.ReplayAll()
self._conn.attach_volume(connection_info, instance_data, mount_point)
self._mox.VerifyAll()
self.assertEquals(len(self._instance_volume_disks), 1)
def _mock_detach_volume(self, target_iqn, target_lun):
mount_point = '/dev/sdc'
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
fake_device_number)
m.AndReturn(fake_mounted_disk)
vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
volumeutils.VolumeUtils.logout_storage_target(mox.IsA(str))
def test_detach_volume(self):
instance_data = self._get_instance_data()
instance_name = instance_data['name']
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
mount_point = '/dev/sdc'
self._mock_detach_volume(target_iqn, target_lun)
self._mox.ReplayAll()
self._conn.detach_volume(connection_info, instance_data, mount_point)
self._mox.VerifyAll()
def test_boot_from_volume(self):
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
self._setup_spawn_instance_mocks(cow=False,
block_device_info=block_device_info,
boot_from_volume=True)
self._mox.ReplayAll()
self._spawn_instance(False, block_device_info)
self._mox.VerifyAll()
self.assertEquals(len(self._instance_volume_disks), 1)
def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
with_exception=False):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(
self.stubs, spectacular=True)
fake_local_ip = '10.0.0.1'
if same_host:
fake_dest_ip = fake_local_ip
else:
fake_dest_ip = '10.0.0.2'
fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
fake_revert_path = os.path.join(self._test_instance_dir, '_revert')
func = mox.Func(self._check_instance_name)
vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
m = vmutils.VMUtils.get_vm_storage_paths(func)
m.AndReturn(([fake_root_vhd_path], []))
m = hostutils.HostUtils.get_local_ips()
m.AndReturn([fake_local_ip])
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
m.AndReturn(fake_revert_path)
if same_host:
fake.PathUtils.makedirs(mox.IsA(str))
m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
if with_exception:
m.AndRaise(shutil.Error('Simulated copy error'))
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
mox.IsA(str),
remove_dir=True)
m.AndReturn(self._test_instance_dir)
else:
fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
destroy_disks = True
if same_host:
fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
destroy_disks = False
self._setup_destroy_mocks(False)
if destroy_disks:
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
mox.IsA(str),
remove_dir=True)
m.AndReturn(self._test_instance_dir)
return (instance, fake_dest_ip, network_info)
def test_migrate_disk_and_power_off(self):
(instance,
fake_dest_ip,
network_info) = self._setup_test_migrate_disk_and_power_off_mocks()
self._mox.ReplayAll()
self._conn.migrate_disk_and_power_off(self._context, instance,
fake_dest_ip, None,
network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_same_host(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
same_host=True)
(instance, fake_dest_ip, network_info) = args
self._mox.ReplayAll()
self._conn.migrate_disk_and_power_off(self._context, instance,
fake_dest_ip, None,
network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_exception(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
with_exception=True)
(instance, fake_dest_ip, network_info) = args
self._mox.ReplayAll()
self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
self._context, instance, fake_dest_ip, None,
network_info)
self._mox.VerifyAll()
def test_finish_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
network_info = fake_network.fake_get_instance_nw_info(
self.stubs, spectacular=True)
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
None)
m.AndReturn(False)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
self._mox.StubOutWithMock(fake.PathUtils, 'exists')
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
instance["image_ref"]))
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'ParentPath': fake_parent_vhd_path,
'MaxInternalSize': 1})
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'MaxInternalSize': 1024})
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
self._set_vm_name(instance['name'])
self._setup_create_instance_mocks(None, False)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.finish_migration(self._context, None, instance, "",
network_info, None, False, None)
self._mox.VerifyAll()
def test_confirm_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(
self.stubs, spectacular=True)
pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
self._mox.ReplayAll()
self._conn.confirm_migration(None, instance, network_info)
self._mox.VerifyAll()
def test_finish_revert_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(
self.stubs, spectacular=True)
fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
instance['name'])
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
None)
m.AndReturn(False)
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
m.AndReturn(fake_revert_path)
fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
self._set_vm_name(instance['name'])
self._setup_create_instance_mocks(None, False)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
self._conn.finish_revert_migration(instance, network_info, None)
self._mox.VerifyAll()
|
|
"""AMQP 0.9.1 Adapter to connect to RabbitMQ using pika library.
Publish and subscribe to queues and exchanges in RabbitMQ
"""
import pika
import functools
import threading
from message_queue import logger
from message_queue.adapters import BaseAdapter
LOGGER = logger.get(__name__)
class AMQPAdapter(BaseAdapter):
__name__ = 'amqp'
def __init__(self, host='localhost', port=5672, user='guest', password='guest', vhost='/'):
"""Create the connection credentials and parameters then connect.
:param string host: Server host
:param int port: Server port
:param string user: Server server user
:param string password: Server server password
:param string vhost: Server virutal host
"""
self.threads = []
self.queue = None
self._host = host
self._credentials = pika.PlainCredentials(user, password)
self._parameters = pika.ConnectionParameters(host, port, vhost, self._credentials)
self.connect()
def configurate_queue(self, **kwargs):
"""Configurate the queue.
:param int prefetch_count: Specifies a prefetch window in terms of whole messages
:param string queue: Queue name to connect
:param bool passive: Only check to see if the queue exists
:param bool dureble: Survive reboots of the broker
:param bool exclusive: Only allow access by the current connection
:param bool auto_delete: Delete after consumer cancels or disconnects
:param bool arguments: Custom key/value arguments for the queue
"""
if not self.queue:
self.queue = kwargs.get('queue', '')
self.basic_ack = kwargs.get('basic_ack', True)
self.prefetch_count = kwargs.get('prefetch_count', 1)
self.channel.queue_declare(
queue = self.queue,
passive = kwargs.get('passive', False),
durable = kwargs.get('durable', True),
exclusive = kwargs.get('exclusive', False),
auto_delete = kwargs.get('auto_delete', False),
arguments = kwargs.get('arguments', None),
)
if self.prefetch_count > 0:
self.channel.basic_qos(prefetch_count=self.prefetch_count)
LOGGER.debug('Queue configured: queue=%r, basic_ack=%r, prefetch_count=%r',
self.queue, self.basic_ack, self.prefetch_count)
def configurate_exchange(self, **kwargs):
"""Configurate the exchange.
:param string exchange: Exchange name to connect
:param string exchange_type: Exchange type
"""
if not self.queue:
self.queue = kwargs.get('exchange', '')
self.channel.exchange_declare(
exchange = self.queue,
exchange_type = kwargs.get('exchange_type', 'fanout')
)
LOGGER.debug('Exchange configured: exchange=%r', self.queue)
def connect(self):
"""Connect to AMQP server usgin BlockingConnection.
"""
try:
self.connection = pika.BlockingConnection(self._parameters)
self.channel = self.connection.channel()
self.channel.confirm_delivery()
LOGGER.debug('Connected')
except Exception as e:
LOGGER.warning('Could not connect to host: %r', e)
self.connect()
def close(self):
"""Close connection and channel.
"""
self.channel.close()
self.connection.close()
self.queue = self.connection = self.channel = None
def send(self, message):
"""Publish a message in the queue.
:param Message message: Message to publish in the channel
"""
amqp_message = self.format_message(message.get_content())
self.channel.basic_publish(**amqp_message)
def format_message(self, message):
"""Format message to AMQP format.
:param dict message: Message to format
"""
exchange = message['properties'].get('exchange', '')
delivery_mode = message['properties'].get('delivery_mode', 2)
correlation_id = message['properties'].get('correlation_id', None)
_message = {}
_message['body'] = message['body']
_message['routing_key'] = self.queue
_message['exchange'] = exchange
_message['properties'] = pika.BasicProperties(
content_type='application/json',
delivery_mode=delivery_mode,
correlation_id=correlation_id,
)
LOGGER.debug('AMQP Message: %r ', _message)
return _message
def consume(self, worker):
"""Consume message from the queue.
:param function worker: Method that consume the message
"""
callback = functools.partial(self.consume_callback, worker=worker)
self.channel.basic_consume(callback, self.queue)
try:
self.channel.start_consuming()
except KeyboardInterrupt:
self.channel.stop_consuming()
for thread in self.threads:
thread.join()
self.close()
def consume_callback(self, channel, method, properties, body, worker):
"""Create a new thred.
:param pika.channel.Channel channel: The channel object
:param pika.Spec.Basic.Deliver method: basic_deliver method
:param pika.Spec.BasicProperties properties: properties
:param str|unicode body: The message body
:param function worker: Worker to execture in the consume callback
"""
thread = threading.Thread(target=self.do_work, args=(channel, method, properties, body, worker))
thread.start()
self.threads.append(thread)
def do_work(self, channel, method, properties, body, worker):
"""Execute worker
:param pika.channel.Channel channel: The channel object
:param pika.Spec.Basic.Deliver method: basic_deliver method
:param pika.Spec.BasicProperties properties: properties
:param str|unicode body: The message body
:param function worker: Worker to execture in the consume callback
"""
thread_id = threading.current_thread().ident
tag = method.delivery_tag
LOGGER.debug('Thread id: %r Delivery tag: %r Message body: %r', thread_id, tag, body)
acknowledge = worker(channel, method, properties, body)
callback = functools.partial(self._consume_acknowledge, channel, tag, acknowledge)
self.connection.add_callback_threadsafe(callback)
def _consume_acknowledge(self, channel, tag, acknowledge=True):
"""Message acknowledge.
:param pika.channel.Channel channel: Channel to acknowledge the message
:param int tag: Message tag to acknowledge
:param bool acknowledge: If should acknowledge the message or not
"""
if acknowledge is False:
channel.basic_nack(delivery_tag=tag)
return
channel.basic_ack(delivery_tag=tag)
def subscribe(self, exchange, queue, exchange_type="fanout", **kwargs):
"""Subscribes to a exchange.
:param function worker: Method that consume the message
:param string exchange: Exchange name
:param string exchange: Queue name
:param string exchange_type: Exchange type
"""
self.queue = queue
self.channel.exchange_declare(
exchange=exchange, exchange_type=exchange_type)
self.channel.queue_declare(
queue=self.queue,
passive=kwargs.get('passive', False),
durable=kwargs.get('durable', True),
exclusive=kwargs.get('exclusive', False),
auto_delete=kwargs.get('auto_delete', False),
arguments=kwargs.get('arguments', None)
)
self.channel.basic_qos(prefetch_count=kwargs.get('prefetch_count', 1))
self.channel.queue_bind(exchange=exchange, queue=self.queue)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Depth-prediction networks, based on the Struct2Depth code.
https://github.com/tensorflow/models/blob/master/research/struct2depth/nets.py
"""
import abc
import numpy as np
import tensorflow.compat.v1 as tf
from depth_and_motion_learning import maybe_summary
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
layers = contrib_layers
arg_scope = contrib_framework.arg_scope
WEIGHT_DECAY_KEY = 'WEIGHT_DECAY'
def encoder_resnet(target_image, weight_reg, is_training, normalizer_fn=None):
"""Defines a ResNet18-based encoding architecture.
This implementation follows Juyong Kim's implementation of ResNet18 on GitHub:
https://github.com/dalgu90/resnet-18-tensorflow
Args:
target_image: Input tensor with shape [B, h, w, 3] to encode.
weight_reg: Parameter ignored.
is_training: Whether the model is being trained or not.
normalizer_fn: Normalization function, defaults to batch normalization (_bn)
below.
Returns:
Tuple of tensors, with the first being the bottleneck layer as tensor of
size [B, h_hid, w_hid, c_hid], and others being intermediate layers
for building skip-connections.
"""
del weight_reg
normalizer_fn = normalizer_fn or _bn
encoder_filters = [64, 64, 128, 256, 512]
stride = 2
# conv1
with tf.variable_scope('conv1'):
x = s_conv(target_image, 7, encoder_filters[0], stride)
x = normalizer_fn(x, is_train=is_training)
econv1 = s_relu(x)
x = tf.nn.max_pool(econv1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')
# conv2_x
x = s_residual_block(
x, is_training, name='conv2_1', normalizer_fn=normalizer_fn)
econv2 = s_residual_block(
x, is_training, name='conv2_2', normalizer_fn=normalizer_fn)
# conv3_x
x = s_residual_block_first(
econv2,
is_training,
encoder_filters[2],
stride,
name='conv3_1',
normalizer_fn=normalizer_fn)
econv3 = s_residual_block(
x, is_training, name='conv3_2', normalizer_fn=normalizer_fn)
# conv4_x
x = s_residual_block_first(
econv3,
is_training,
encoder_filters[3],
stride,
name='conv4_1',
normalizer_fn=normalizer_fn)
econv4 = s_residual_block(
x, is_training, name='conv4_2', normalizer_fn=normalizer_fn)
# conv5_x
x = s_residual_block_first(
econv4,
is_training,
encoder_filters[4],
stride,
name='conv5_1',
normalizer_fn=normalizer_fn)
econv5 = s_residual_block(
x, is_training, name='conv5_2', normalizer_fn=normalizer_fn)
return econv5, (econv4, econv3, econv2, econv1)
class GenericDepthPredictor(object):
"""An abstract class for a depth predictor."""
__metaclass__ = abc.ABCMeta
def __init__(self, mode, params=None):
"""Creates an instance.
Args:
mode: One of tf.estimator.ModeKeys: TRAIN, PREDICT or EVAL.
params: A dictionary containing relevant parameters.
"""
allowed_attrs = ['TRAIN', 'PREDICT', 'EVAL']
allowed_values = [
getattr(tf.estimator.ModeKeys, attr) for attr in allowed_attrs
]
if mode not in allowed_values:
raise ValueError('\'mode\' must be one of tf.estimator.ModeKeys.(%s)' %
', '.join(allowed_attrs))
self._mode = mode
self._params = self._default_params
self._params.update(params or {})
@property
def _defalut_params(self):
return {}
@abc.abstractmethod
def predict_depth(self, rgb, sensor_depth):
"""An interface for predicting depth.
Args:
rgb: A batch of RGB images, of shape [B, H, W, 3].
sensor_depth: Optional, batch of depth sensor images of shape [B, H, W],
to be fused into the prediction.
"""
pass
class ResNet18DepthPredictor(GenericDepthPredictor):
"""A depth predictor based on ResNet18 with randomized layer normalization."""
@property
def _default_params(self):
return {
# Number of training steps over which the noise in randomized layer
# normalization ramps up.
'layer_norm_noise_rampup_steps': 10000,
# Weight decay regularization of the network base.
'weight_decay': 0.01,
# If true, a learned scale factor will multiply the network's depth
# prediction. This is useful when direct depth supervision exists.
'learn_scale': False,
# A boolean, if True, deconvolutions will be padded in 'REFLECT' mode,
# otherwise in 'CONSTANT' mode (the former is not supported on TPU)
'reflect_padding': False
}
def predict_depth(self, rgb, sensor_depth=None):
del sensor_depth # unused
with tf.variable_scope('depth_prediction', reuse=tf.AUTO_REUSE):
if self._mode == tf.estimator.ModeKeys.TRAIN:
noise_stddev = 0.5
global_step = tf.train.get_global_step()
rampup_steps = self._params['layer_norm_noise_rampup_steps']
if global_step is not None and rampup_steps > 0:
# If global_step is available, ramp up the noise.
noise_stddev *= tf.square(
tf.minimum(tf.to_float(global_step) / float(rampup_steps), 1.0))
else:
noise_stddev = 0.0
def _normalizer_fn(x, is_train, name='bn'):
return randomized_layer_norm(
x, is_train=is_train, name=name, stddev=noise_stddev)
if self._params['learn_scale']:
depth_scale = tf.get_variable('depth_scale', initializer=1.0)
maybe_summary.scalar('depth_scale', depth_scale)
else:
depth_scale = 1.0
return depth_scale * depth_prediction_resnet18unet(
2 * rgb - 1.0,
self._mode == tf.estimator.ModeKeys.TRAIN,
self._params['weight_decay'],
_normalizer_fn,
reflect_padding=self._params['reflect_padding'])
def depth_prediction_resnet18unet(images, is_training, decoder_weight_reg=0.0,
normalizer_fn=None, reflect_padding=True):
"""A depth prediciton network based on a ResNet18 UNet architecture.
This network is identical to disp_net in struct2depth.nets with
architecture='resnet', with the following differences:
1. We use a softplus activation to generate positive depths. This eliminates
the need for the hyperparameters DISP_SCALING and MIN_DISP defined in
struct2depth.nets. The predicted depth is no longer bounded.
2. The network predicts depth rather than disparity, and at a single scale.
Args:
images: A tf.Tensor of shape [B, H, W, C] representing images.
is_training: A boolean, True if in training mode.
decoder_weight_reg: A scalar, strength of L2 weight regularization to be
used in the decoder.
normalizer_fn: Normalizer function to use for convolutions. Defaults to
batch normalization.
reflect_padding: A boolean, if True, deconvolutions will be padded in
'REFLECT' mode, otherwise in 'CONSTANT' mode (the former is not supported
on TPU)
Returns:
A tf.Tensor of shape [B, H, W, 1] containing depths maps.
"""
# The struct2depth resnet encoder does not use the weight_reg argument, hence
# we're passing None.
bottleneck, skip_connections = encoder_resnet(
images,
weight_reg=None,
is_training=is_training,
normalizer_fn=normalizer_fn)
(econv4, econv3, econv2, econv1) = skip_connections
decoder_filters = [16, 32, 64, 128, 256]
reg = layers.l2_regularizer(decoder_weight_reg)
padding_mode = 'REFLECT' if reflect_padding else 'CONSTANT'
with arg_scope([layers.conv2d, layers.conv2d_transpose],
normalizer_fn=None,
normalizer_params=None,
activation_fn=tf.nn.relu,
weights_regularizer=reg):
upconv5 = layers.conv2d_transpose(
bottleneck, decoder_filters[4], [3, 3], stride=2, scope='upconv5')
iconv5 = layers.conv2d(
_concat_and_pad(upconv5, econv4, padding_mode),
decoder_filters[4], [3, 3],
stride=1,
scope='iconv5',
padding='VALID')
upconv4 = layers.conv2d_transpose(
iconv5, decoder_filters[3], [3, 3], stride=2, scope='upconv4')
iconv4 = layers.conv2d(
_concat_and_pad(upconv4, econv3, padding_mode),
decoder_filters[3], [3, 3],
stride=1,
scope='iconv4',
padding='VALID')
upconv3 = layers.conv2d_transpose(
iconv4, decoder_filters[2], [3, 3], stride=2, scope='upconv3')
iconv3 = layers.conv2d(
_concat_and_pad(upconv3, econv2, padding_mode),
decoder_filters[2], [3, 3],
stride=1,
scope='iconv3',
padding='VALID')
upconv2 = layers.conv2d_transpose(
iconv3, decoder_filters[1], [3, 3], stride=2, scope='upconv2')
iconv2 = layers.conv2d(
_concat_and_pad(upconv2, econv1, padding_mode),
decoder_filters[1], [3, 3],
stride=1,
scope='iconv2',
padding='VALID')
upconv1 = layers.conv2d_transpose(
iconv2, decoder_filters[0], [3, 3], stride=2, scope='upconv1')
upconv1 = tf.pad(
upconv1, [[0, 0], [1, 1], [1, 1], [0, 0]], mode=padding_mode)
iconv1 = layers.conv2d(
upconv1,
decoder_filters[0], [3, 3],
stride=1,
scope='iconv1',
padding='VALID')
depth_input = tf.pad(
iconv1, [[0, 0], [1, 1], [1, 1], [0, 0]], mode=padding_mode)
return layers.conv2d(
depth_input,
1, [3, 3],
stride=1,
activation_fn=tf.nn.softplus,
normalizer_fn=None,
scope='disp1',
padding='VALID')
def _concat_and_pad(decoder_layer, encoder_layer, padding_mode):
concat = tf.concat([decoder_layer, encoder_layer], axis=3)
return tf.pad(concat, [[0, 0], [1, 1], [1, 1], [0, 0]], mode=padding_mode)
def randomized_layer_norm(x, is_train, name='bn', stddev=0.5):
"""Applies layer normalization and applies noise on the mean and variance.
For every item in a batch and for every layer, we calculate the mean and
variance across the spatial dimensions, and multiply them by Gaussian noise
with a mean equal to 1.0 (at training time only). This improved the results
compared to batch normalization - see more in
https://arxiv.org/abs/1904.04998.
Args:
x: tf.Tensor to normalize, of shape [B, H, W, C].
is_train: A boolean, True at training mode.
name: A string, a name scope.
stddev: Standard deviation of the Gaussian noise. Defaults to 0.5 because
this is the largest value where the noise is guaranteed to be a
non-negative multiplicative factor
Returns:
A tf.Tensor of shape [B, H, W, C], the normalized tensor.
"""
with tf.variable_scope(name, None, [x]):
inputs_shape = x.shape.as_list()
params_shape = inputs_shape[-1:]
beta = tf.get_variable(
'beta', shape=params_shape, initializer=tf.initializers.zeros())
gamma = tf.get_variable(
'gamma', shape=params_shape, initializer=tf.initializers.ones())
mean, variance = tf.nn.moments(x, [1, 2], keep_dims=True)
if is_train:
mean *= 1.0 + tf.random.truncated_normal(tf.shape(mean), stddev=stddev)
variance *= 1.0 + tf.random.truncated_normal(
tf.shape(variance), stddev=stddev)
outputs = tf.nn.batch_normalization(
x,
mean,
variance,
offset=beta,
scale=gamma,
variance_epsilon=1e-3)
outputs.set_shape(x.shape)
return outputs
def s_residual_block_first(x,
is_training,
out_channel,
strides,
name='unit',
normalizer_fn=None):
"""Helper function for defining ResNet architecture."""
normalizer_fn = normalizer_fn or _bn
in_channel = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
# Shortcut connection
if in_channel == out_channel:
if strides == 1:
shortcut = tf.identity(x)
else:
shortcut = tf.nn.max_pool(x, [1, strides, strides, 1],
[1, strides, strides, 1], 'VALID')
else:
shortcut = s_conv(x, 1, out_channel, strides, name='shortcut')
# Residual
x = s_conv(x, 3, out_channel, strides, name='conv_1')
x = normalizer_fn(x, is_train=is_training, name='bn_1')
x = s_relu(x, name='relu_1')
x = s_conv(x, 3, out_channel, 1, name='conv_2')
x = normalizer_fn(x, is_train=is_training, name='bn_2')
# Merge
x = x + shortcut
x = s_relu(x, name='relu_2')
return x
def s_residual_block(x,
is_training,
input_q=None,
output_q=None,
name='unit',
normalizer_fn=None):
"""Helper function for defining ResNet architecture."""
normalizer_fn = normalizer_fn or _bn
num_channel = x.get_shape().as_list()[-1]
with tf.variable_scope(name):
shortcut = x # Shortcut connection
# Residual
x = s_conv(
x, 3, num_channel, 1, input_q=input_q, output_q=output_q, name='conv_1')
x = normalizer_fn(x, is_train=is_training, name='bn_1')
x = s_relu(x, name='relu_1')
x = s_conv(
x,
3,
num_channel,
1,
input_q=output_q,
output_q=output_q,
name='conv_2')
x = normalizer_fn(x, is_train=is_training, name='bn_2')
# Merge
x = x + shortcut
x = s_relu(x, name='relu_2')
return x
def s_conv(x,
filter_size,
out_channel,
stride,
pad='SAME',
input_q=None,
output_q=None,
name='conv'):
"""Helper function for defining ResNet architecture."""
if (input_q is None) ^ (output_q is None):
raise ValueError('Input/Output splits are not correctly given.')
in_shape = x.get_shape()
with tf.variable_scope(name):
kernel = tf.get_variable(
'kernel', [filter_size, filter_size, in_shape[3], out_channel],
tf.float32,
initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / filter_size / filter_size / out_channel)))
if kernel not in tf.get_collection(WEIGHT_DECAY_KEY):
tf.add_to_collection(WEIGHT_DECAY_KEY, kernel)
conv = tf.nn.conv2d(x, kernel, [1, stride, stride, 1], pad)
return conv
def _bn(x, is_train, name='bn'):
"""Helper function for defining ResNet architecture."""
bn = tf.layers.batch_normalization(x, training=is_train, name=name)
return bn
def s_relu(x, name=None, leakness=0.0):
"""Helper function for defining ResNet architecture."""
if leakness > 0.0:
name = 'lrelu' if name is None else name
return tf.maximum(x, x * leakness, name='lrelu')
else:
name = 'relu' if name is None else name
return tf.nn.relu(x, name='relu')
|
|
from __future__ import unicode_literals
import os
import dvc
import time
import shutil
import filecmp
import posixpath
from dvc.logger import logger
from dvc.system import System
from mock import patch
from dvc.main import main
from dvc.utils import file_md5, load_stage_file
from dvc.stage import Stage
from dvc.exceptions import DvcException, RecursiveAddingWhileUsingFilename
from dvc.output.base import OutputAlreadyTrackedError
from dvc.repo import Repo as DvcRepo
from tests.basic_env import TestDvc
from tests.utils import spy, reset_logger_error_output, get_gitignore_content
from tests.utils.logger import MockLoggerHandlers, ConsoleFontColorsRemover
class TestAdd(TestDvc):
def test(self):
md5 = file_md5(self.FOO)[0]
stages = self.dvc.add(self.FOO)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertTrue(stage is not None)
self.assertIsInstance(stage, Stage)
self.assertTrue(os.path.isfile(stage.path))
self.assertEqual(len(stage.outs), 1)
self.assertEqual(len(stage.deps), 0)
self.assertEqual(stage.cmd, None)
self.assertEqual(stage.outs[0].info["md5"], md5)
def test_unicode(self):
fname = "\xe1"
with open(fname, "w") as fobj:
fobj.write("something")
stage = self.dvc.add(fname)[0]
self.assertTrue(os.path.isfile(stage.path))
class TestAddUnupportedFile(TestDvc):
def test(self):
with self.assertRaises(DvcException):
self.dvc.add("unsupported://unsupported")
class TestAddDirectory(TestDvc):
def test(self):
dname = "directory"
os.mkdir(dname)
self.create(os.path.join(dname, "file"), "file")
stages = self.dvc.add(dname)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertTrue(stage is not None)
self.assertEqual(len(stage.deps), 0)
self.assertEqual(len(stage.outs), 1)
md5 = stage.outs[0].info["md5"]
dir_info = self.dvc.cache.local.load_dir_cache(md5)
for info in dir_info:
self.assertTrue("\\" not in info["relpath"])
class TestAddDirectoryRecursive(TestDvc):
def test(self):
stages = self.dvc.add(self.DATA_DIR, recursive=True)
self.assertEqual(len(stages), 2)
class TestAddCmdDirectoryRecursive(TestDvc):
def test(self):
ret = main(["add", "--recursive", self.DATA_DIR])
self.assertEqual(ret, 0)
class TestAddDirectoryWithForwardSlash(TestDvc):
def test(self):
dname = "directory/"
os.mkdir(dname)
self.create(os.path.join(dname, "file"), "file")
stages = self.dvc.add(dname)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertTrue(stage is not None)
self.assertEqual(os.path.abspath("directory.dvc"), stage.path)
class TestAddTrackedFile(TestDvc):
def test(self):
fname = "tracked_file"
self.create(fname, "tracked file contents")
self.dvc.scm.add([fname])
self.dvc.scm.commit("add {}".format(fname))
with self.assertRaises(OutputAlreadyTrackedError):
self.dvc.add(fname)
class TestAddDirWithExistingCache(TestDvc):
def test(self):
dname = "a"
fname = os.path.join(dname, "b")
os.mkdir(dname)
shutil.copyfile(self.FOO, fname)
stages = self.dvc.add(self.FOO)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
stages = self.dvc.add(dname)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
class TestAddModifiedDir(TestDvc):
def test(self):
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
os.unlink(self.DATA)
time.sleep(2)
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
class TestAddFileInDir(TestDvc):
def test(self):
stages = self.dvc.add(self.DATA_SUB)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertNotEqual(stage, None)
self.assertEqual(len(stage.deps), 0)
self.assertEqual(len(stage.outs), 1)
self.assertEqual(stage.relpath, self.DATA_SUB + ".dvc")
class TestAddExternalLocalFile(TestDvc):
def test(self):
dname = TestDvc.mkdtemp()
fname = os.path.join(dname, "foo")
shutil.copyfile(self.FOO, fname)
stages = self.dvc.add(fname)
self.assertEqual(len(stages), 1)
stage = stages[0]
self.assertNotEqual(stage, None)
self.assertEqual(len(stage.deps), 0)
self.assertEqual(len(stage.outs), 1)
self.assertEqual(stage.relpath, "foo.dvc")
self.assertEqual(len(os.listdir(dname)), 1)
self.assertTrue(os.path.isfile(fname))
self.assertTrue(filecmp.cmp(fname, "foo", shallow=False))
class TestAddLocalRemoteFile(TestDvc):
def test(self):
"""
Making sure that 'remote' syntax is handled properly for local outs.
"""
cwd = os.getcwd()
remote = "myremote"
ret = main(["remote", "add", remote, cwd])
self.assertEqual(ret, 0)
self.dvc = DvcRepo()
foo = "remote://{}/{}".format(remote, self.FOO)
ret = main(["add", foo])
self.assertEqual(ret, 0)
d = load_stage_file("foo.dvc")
self.assertEqual(d["outs"][0]["path"], foo)
bar = os.path.join(cwd, self.BAR)
ret = main(["add", bar])
self.assertEqual(ret, 0)
d = load_stage_file("bar.dvc")
self.assertEqual(d["outs"][0]["path"], bar)
class TestCmdAdd(TestDvc):
def test(self):
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
ret = main(["add", "non-existing-file"])
self.assertNotEqual(ret, 0)
class TestDoubleAddUnchanged(TestDvc):
def test_file(self):
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
def test_dir(self):
ret = main(["add", self.DATA_DIR])
self.assertEqual(ret, 0)
ret = main(["add", self.DATA_DIR])
self.assertEqual(ret, 0)
class TestShouldUpdateStateEntryForFileAfterAdd(TestDvc):
def test(self):
file_md5_counter = spy(dvc.state.file_md5)
with patch.object(dvc.state, "file_md5", file_md5_counter):
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
self.assertEqual(file_md5_counter.mock.call_count, 1)
ret = main(["status"])
self.assertEqual(ret, 0)
self.assertEqual(file_md5_counter.mock.call_count, 1)
ret = main(["run", "-d", self.FOO, "cat {}".format(self.FOO)])
self.assertEqual(ret, 0)
self.assertEqual(file_md5_counter.mock.call_count, 1)
class TestShouldUpdateStateEntryForDirectoryAfterAdd(TestDvc):
def test(self):
file_md5_counter = spy(dvc.state.file_md5)
with patch.object(dvc.state, "file_md5", file_md5_counter):
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
ret = main(["add", self.DATA_DIR])
self.assertEqual(ret, 0)
self.assertEqual(file_md5_counter.mock.call_count, 3)
ret = main(["status"])
self.assertEqual(ret, 0)
self.assertEqual(file_md5_counter.mock.call_count, 3)
ret = main(
["run", "-d", self.DATA_DIR, "ls {}".format(self.DATA_DIR)]
)
self.assertEqual(ret, 0)
self.assertEqual(file_md5_counter.mock.call_count, 3)
class TestAddCommit(TestDvc):
def test(self):
ret = main(["add", self.FOO, "--no-commit"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(self.FOO))
self.assertEqual(len(os.listdir(self.dvc.cache.local.cache_dir)), 0)
ret = main(["commit", self.FOO + ".dvc"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(self.FOO))
self.assertEqual(len(os.listdir(self.dvc.cache.local.cache_dir)), 1)
class TestShouldNotCheckCacheForDirIfCacheMetadataDidNotChange(TestDvc):
def test(self):
remote_local_loader_spy = spy(
dvc.remote.local.RemoteLOCAL.load_dir_cache
)
with patch.object(
dvc.remote.local.RemoteLOCAL,
"load_dir_cache",
remote_local_loader_spy,
):
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
ret = main(["add", self.DATA_DIR])
self.assertEqual(ret, 0)
self.assertEqual(1, remote_local_loader_spy.mock.call_count)
ret = main(["status", "{}.dvc".format(self.DATA_DIR)])
self.assertEqual(ret, 0)
self.assertEqual(1, remote_local_loader_spy.mock.call_count)
class TestShouldCollectDirCacheOnlyOnce(TestDvc):
NEW_LARGE_DIR_SIZE = 1
@patch("dvc.remote.local.LARGE_DIR_SIZE", NEW_LARGE_DIR_SIZE)
def test(self):
from dvc.remote.local import RemoteLOCAL
collect_dir_counter = spy(RemoteLOCAL.collect_dir_cache)
with patch.object(
RemoteLOCAL, "collect_dir_cache", collect_dir_counter
):
LARGE_DIR_FILES_NUM = self.NEW_LARGE_DIR_SIZE + 1
data_dir = "dir"
os.makedirs(data_dir)
for i in range(LARGE_DIR_FILES_NUM):
with open(os.path.join(data_dir, str(i)), "w+") as f:
f.write(str(i))
ret = main(["add", data_dir])
self.assertEqual(0, ret)
ret = main(["status"])
self.assertEqual(0, ret)
ret = main(["status"])
self.assertEqual(0, ret)
self.assertEqual(1, collect_dir_counter.mock.call_count)
class SymlinkAddTestBase(TestDvc):
def _get_data_dir(self):
raise NotImplementedError
def _prepare_external_data(self):
data_dir = self._get_data_dir()
self.data_file_name = "data_file"
external_data_path = os.path.join(data_dir, self.data_file_name)
with open(external_data_path, "w+") as f:
f.write("data")
self.link_name = "data_link"
System.symlink(data_dir, self.link_name)
def _test(self):
self._prepare_external_data()
ret = main(["add", os.path.join(self.link_name, self.data_file_name)])
self.assertEqual(0, ret)
stage_file = self.data_file_name + Stage.STAGE_FILE_SUFFIX
self.assertTrue(os.path.exists(stage_file))
d = load_stage_file(stage_file)
relative_data_path = posixpath.join(
self.link_name, self.data_file_name
)
self.assertEqual(relative_data_path, d["outs"][0]["path"])
class TestShouldAddDataFromExternalSymlink(SymlinkAddTestBase):
def _get_data_dir(self):
return self.mkdtemp()
def test(self):
self._test()
class TestShouldAddDataFromInternalSymlink(SymlinkAddTestBase):
def _get_data_dir(self):
return self.DATA_DIR
def test(self):
self._test()
class TestShouldPlaceStageInDataDirIfRepositoryBelowSymlink(TestDvc):
def test(self):
def is_symlink_true_below_dvc_root(path):
if path == os.path.dirname(self.dvc.root_dir):
return True
return False
with patch.object(
System, "is_symlink", side_effect=is_symlink_true_below_dvc_root
):
ret = main(["add", self.DATA])
self.assertEqual(0, ret)
stage_file_path_on_data_below_symlink = (
os.path.basename(self.DATA) + Stage.STAGE_FILE_SUFFIX
)
self.assertFalse(
os.path.exists(stage_file_path_on_data_below_symlink)
)
stage_file_path = self.DATA + Stage.STAGE_FILE_SUFFIX
self.assertTrue(os.path.exists(stage_file_path))
class TestShouldThrowProperExceptionOnCorruptedStageFile(TestDvc):
def test(self):
with MockLoggerHandlers(logger), ConsoleFontColorsRemover():
reset_logger_error_output()
ret = main(["add", self.FOO])
self.assertEqual(0, ret)
foo_stage = os.path.relpath(self.FOO + Stage.STAGE_FILE_SUFFIX)
# corrupt stage file
with open(foo_stage, "a+") as file:
file.write("this will break yaml file structure")
ret = main(["add", self.BAR])
self.assertEqual(1, ret)
self.assertIn(
"unable to read stage file: {} "
"YAML file structure is corrupted".format(foo_stage),
logger.handlers[1].stream.getvalue(),
)
class TestAddFilename(TestDvc):
def test(self):
ret = main(["add", self.FOO, self.BAR, "-f", "error.dvc"])
self.assertNotEqual(0, ret)
ret = main(["add", "-R", self.DATA_DIR, "-f", "error.dvc"])
self.assertNotEqual(0, ret)
with self.assertRaises(RecursiveAddingWhileUsingFilename):
self.dvc.add(self.DATA_DIR, recursive=True, fname="error.dvc")
ret = main(["add", self.DATA_DIR, "-f", "data_directory.dvc"])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists("data_directory.dvc"))
ret = main(["add", self.FOO, "-f", "bar.dvc"])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists("bar.dvc"))
self.assertFalse(os.path.exists("foo.dvc"))
os.remove("bar.dvc")
ret = main(["add", self.FOO, "--file", "bar.dvc"])
self.assertEqual(0, ret)
self.assertTrue(os.path.exists("bar.dvc"))
self.assertFalse(os.path.exists("foo.dvc"))
class TestShouldCleanUpAfterFailedAdd(TestDvc):
def test(self):
ret = main(["add", self.FOO])
self.assertEqual(0, ret)
foo_stage_file = self.FOO + Stage.STAGE_FILE_SUFFIX
# corrupt stage file
with open(foo_stage_file, "a+") as file:
file.write("this will break yaml file structure")
ret = main(["add", self.BAR])
self.assertEqual(1, ret)
bar_stage_file = self.BAR + Stage.STAGE_FILE_SUFFIX
self.assertFalse(os.path.exists(bar_stage_file))
gitignore_content = get_gitignore_content()
self.assertNotIn("/" + self.BAR, gitignore_content)
class TestShouldNotTrackGitInternalFiles(TestDvc):
def test(self):
stage_creator_spy = spy(dvc.repo.add._create_stages)
with patch.object(dvc.repo.add, "_create_stages", stage_creator_spy):
ret = main(["add", "-R", self.dvc.root_dir])
self.assertEqual(0, ret)
created_stages_filenames = stage_creator_spy.mock.call_args[0][0]
for fname in created_stages_filenames:
self.assertNotIn(".git", fname)
|
|
import posixpath
from collections import defaultdict
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.db.models import CharField, Q
from django.db.models.functions import Length, Substr
from django.db.models.query import BaseIterable
from treebeard.mp_tree import MP_NodeQuerySet
from wagtail.search.queryset import SearchableQuerySetMixin
class TreeQuerySet(MP_NodeQuerySet):
"""
Extends Treebeard's MP_NodeQuerySet with additional useful tree-related operations.
"""
def descendant_of_q(self, other, inclusive=False):
q = Q(path__startswith=other.path) & Q(depth__gte=other.depth)
if not inclusive:
q &= ~Q(pk=other.pk)
return q
def descendant_of(self, other, inclusive=False):
"""
This filters the QuerySet to only contain pages that descend from the specified page.
If inclusive is set to True, it will also contain the page itself (instead of just its descendants).
"""
return self.filter(self.descendant_of_q(other, inclusive))
def not_descendant_of(self, other, inclusive=False):
"""
This filters the QuerySet to not contain any pages that descend from the specified page.
If inclusive is set to True, it will also exclude the specified page.
"""
return self.exclude(self.descendant_of_q(other, inclusive))
def child_of_q(self, other):
return self.descendant_of_q(other) & Q(depth=other.depth + 1)
def child_of(self, other):
"""
This filters the QuerySet to only contain pages that are direct children of the specified page.
"""
return self.filter(self.child_of_q(other))
def not_child_of(self, other):
"""
This filters the QuerySet to not contain any pages that are direct children of the specified page.
"""
return self.exclude(self.child_of_q(other))
def ancestor_of_q(self, other, inclusive=False):
paths = [
other.path[0:pos]
for pos in range(0, len(other.path) + 1, other.steplen)[1:]
]
q = Q(path__in=paths)
if not inclusive:
q &= ~Q(pk=other.pk)
return q
def ancestor_of(self, other, inclusive=False):
"""
This filters the QuerySet to only contain pages that are ancestors of the specified page.
If inclusive is set to True, it will also include the specified page.
"""
return self.filter(self.ancestor_of_q(other, inclusive))
def not_ancestor_of(self, other, inclusive=False):
"""
This filters the QuerySet to not contain any pages that are ancestors of the specified page.
If inclusive is set to True, it will also exclude the specified page.
"""
return self.exclude(self.ancestor_of_q(other, inclusive))
def parent_of_q(self, other):
return Q(path=self.model._get_parent_path_from_path(other.path))
def parent_of(self, other):
"""
This filters the QuerySet to only contain the parent of the specified page.
"""
return self.filter(self.parent_of_q(other))
def not_parent_of(self, other):
"""
This filters the QuerySet to exclude the parent of the specified page.
"""
return self.exclude(self.parent_of_q(other))
def sibling_of_q(self, other, inclusive=True):
q = Q(path__startswith=self.model._get_parent_path_from_path(other.path)) & Q(depth=other.depth)
if not inclusive:
q &= ~Q(pk=other.pk)
return q
def sibling_of(self, other, inclusive=True):
"""
This filters the QuerySet to only contain pages that are siblings of the specified page.
By default, inclusive is set to True so it will include the specified page in the results.
If inclusive is set to False, the page will be excluded from the results.
"""
return self.filter(self.sibling_of_q(other, inclusive))
def not_sibling_of(self, other, inclusive=True):
"""
This filters the QuerySet to not contain any pages that are siblings of the specified page.
By default, inclusive is set to True so it will exclude the specified page from the results.
If inclusive is set to False, the page will be included in the results.
"""
return self.exclude(self.sibling_of_q(other, inclusive))
class PageQuerySet(SearchableQuerySetMixin, TreeQuerySet):
def live_q(self):
return Q(live=True)
def live(self):
"""
This filters the QuerySet to only contain published pages.
"""
return self.filter(self.live_q())
def not_live(self):
"""
This filters the QuerySet to only contain unpublished pages.
"""
return self.exclude(self.live_q())
def in_menu_q(self):
return Q(show_in_menus=True)
def in_menu(self):
"""
This filters the QuerySet to only contain pages that are in the menus.
"""
return self.filter(self.in_menu_q())
def not_in_menu(self):
"""
This filters the QuerySet to only contain pages that are not in the menus.
"""
return self.exclude(self.in_menu_q())
def page_q(self, other):
return Q(id=other.id)
def page(self, other):
"""
This filters the QuerySet so it only contains the specified page.
"""
return self.filter(self.page_q(other))
def not_page(self, other):
"""
This filters the QuerySet so it doesn't contain the specified page.
"""
return self.exclude(self.page_q(other))
def type_q(self, klass):
content_types = ContentType.objects.get_for_models(*[
model for model in apps.get_models()
if issubclass(model, klass)
]).values()
return Q(content_type__in=list(content_types))
def type(self, model):
"""
This filters the QuerySet to only contain pages that are an instance
of the specified model (including subclasses).
"""
return self.filter(self.type_q(model))
def not_type(self, model):
"""
This filters the QuerySet to not contain any pages which are an instance of the specified model.
"""
return self.exclude(self.type_q(model))
def exact_type_q(self, klass):
return Q(content_type=ContentType.objects.get_for_model(klass))
def exact_type(self, model):
"""
This filters the QuerySet to only contain pages that are an instance of the specified model
(matching the model exactly, not subclasses).
"""
return self.filter(self.exact_type_q(model))
def not_exact_type(self, model):
"""
This filters the QuerySet to not contain any pages which are an instance of the specified model
(matching the model exactly, not subclasses).
"""
return self.exclude(self.exact_type_q(model))
def public_q(self):
from wagtail.core.models import PageViewRestriction
q = Q()
for restriction in PageViewRestriction.objects.all():
q &= ~self.descendant_of_q(restriction.page, inclusive=True)
return q
def public(self):
"""
This filters the QuerySet to only contain pages that are not in a private section
"""
return self.filter(self.public_q())
def not_public(self):
"""
This filters the QuerySet to only contain pages that are in a private section
"""
return self.exclude(self.public_q())
def first_common_ancestor(self, include_self=False, strict=False):
"""
Find the first ancestor that all pages in this queryset have in common.
For example, consider a page hierarchy like::
- Home/
- Foo Event Index/
- Foo Event Page 1/
- Foo Event Page 2/
- Bar Event Index/
- Bar Event Page 1/
- Bar Event Page 2/
The common ancestors for some queries would be:
.. code-block:: python
>>> Page.objects\\
... .type(EventPage)\\
... .first_common_ancestor()
<Page: Home>
>>> Page.objects\\
... .type(EventPage)\\
... .filter(title__contains='Foo')\\
... .first_common_ancestor()
<Page: Foo Event Index>
This method tries to be efficient, but if you have millions of pages
scattered across your page tree, it will be slow.
If `include_self` is True, the ancestor can be one of the pages in the
queryset:
.. code-block:: python
>>> Page.objects\\
... .filter(title__contains='Foo')\\
... .first_common_ancestor()
<Page: Foo Event Index>
>>> Page.objects\\
... .filter(title__exact='Bar Event Index')\\
... .first_common_ancestor()
<Page: Bar Event Index>
A few invalid cases exist: when the queryset is empty, when the root
Page is in the queryset and ``include_self`` is False, and when there
are multiple page trees with no common root (a case Wagtail does not
support). If ``strict`` is False (the default), then the first root
node is returned in these cases. If ``strict`` is True, then a
``ObjectDoesNotExist`` is raised.
"""
# An empty queryset has no ancestors. This is a problem
if not self.exists():
if strict:
raise self.model.DoesNotExist('Can not find ancestor of empty queryset')
return self.model.get_first_root_node()
if include_self:
# Get all the paths of the matched pages.
paths = self.order_by().values_list('path', flat=True)
else:
# Find all the distinct parent paths of all matched pages.
# The empty `.order_by()` ensures that `Page.path` is not also
# selected to order the results, which makes `.distinct()` works.
paths = self.order_by()\
.annotate(parent_path=Substr(
'path', 1, Length('path') - self.model.steplen,
output_field=CharField(max_length=255)))\
.values_list('parent_path', flat=True)\
.distinct()
# This method works on anything, not just file system paths.
common_parent_path = posixpath.commonprefix(paths)
# That may have returned a path like (0001, 0002, 000), which is
# missing some chars off the end. Fix this by trimming the path to a
# multiple of `Page.steplen`
extra_chars = len(common_parent_path) % self.model.steplen
if extra_chars != 0:
common_parent_path = common_parent_path[:-extra_chars]
if common_parent_path == '':
# This should only happen when there are multiple trees,
# a situation that Wagtail does not support;
# or when the root node itself is part of the queryset.
if strict:
raise self.model.DoesNotExist('No common ancestor found!')
# Assuming the situation is the latter, just return the root node.
# The root node is not its own ancestor, so this is technically
# incorrect. If you want very correct operation, use `strict=True`
# and receive an error.
return self.model.get_first_root_node()
# Assuming the database is in a consistent state, this page should
# *always* exist. If your database is not in a consistent state, you've
# got bigger problems.
return self.model.objects.get(path=common_parent_path)
def unpublish(self):
"""
This unpublishes all live pages in the QuerySet.
"""
for page in self.live():
page.unpublish()
def specific(self, defer=False):
"""
This efficiently gets all the specific pages for the queryset, using
the minimum number of queries.
When the "defer" keyword argument is set to True, only the basic page
fields will be loaded and all specific fields will be deferred. It
will still generate a query for each page type though (this may be
improved to generate only a single query in a future release).
"""
clone = self._clone()
if defer:
clone._iterable_class = DeferredSpecificIterable
else:
clone._iterable_class = SpecificIterable
return clone
def in_site(self, site):
"""
This filters the QuerySet to only contain pages within the specified site.
"""
return self.descendant_of(site.root_page, inclusive=True)
def specific_iterator(qs, defer=False):
"""
This efficiently iterates all the specific pages in a queryset, using
the minimum number of queries.
This should be called from ``PageQuerySet.specific``
"""
pks_and_types = qs.values_list('pk', 'content_type')
pks_by_type = defaultdict(list)
for pk, content_type in pks_and_types:
pks_by_type[content_type].append(pk)
# Content types are cached by ID, so this will not run any queries.
content_types = {pk: ContentType.objects.get_for_id(pk)
for _, pk in pks_and_types}
# Get the specific instances of all pages, one model class at a time.
pages_by_type = {}
for content_type, pks in pks_by_type.items():
# look up model class for this content type, falling back on the original
# model (i.e. Page) if the more specific one is missing
model = content_types[content_type].model_class() or qs.model
pages = model.objects.filter(pk__in=pks)
if defer:
# Defer all specific fields
from wagtail.core.models import Page
fields = [field.attname for field in Page._meta.get_fields() if field.concrete]
pages = pages.only(*fields)
pages_by_type[content_type] = {page.pk: page for page in pages}
# Yield all of the pages, in the order they occurred in the original query.
for pk, content_type in pks_and_types:
yield pages_by_type[content_type][pk]
class SpecificIterable(BaseIterable):
def __iter__(self):
return specific_iterator(self.queryset)
class DeferredSpecificIterable(BaseIterable):
def __iter__(self):
return specific_iterator(self.queryset, defer=True)
|
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import nested_scopes
import gtk
from twisted import copyright
from twisted.internet import defer
from twisted.python import failure, log, util
from twisted.spread import pb
from twisted.cred.credentials import UsernamePassword
from twisted.internet import error as netError
def login(client=None, **defaults):
"""
@param host:
@param port:
@param identityName:
@param password:
@param serviceName:
@param perspectiveName:
@returntype: Deferred RemoteReference of Perspective
"""
d = defer.Deferred()
LoginDialog(client, d, defaults)
return d
class GladeKeeper:
"""
@cvar gladefile: The file in which the glade GUI definition is kept.
@type gladefile: str
@cvar _widgets: Widgets that should be attached to me as attributes.
@type _widgets: list of strings
"""
gladefile = None
_widgets = ()
def __init__(self):
from gtk import glade
self.glade = glade.XML(self.gladefile)
# mold can go away when we get a newer pygtk (post 1.99.14)
mold = {}
for k in dir(self):
mold[k] = getattr(self, k)
self.glade.signal_autoconnect(mold)
self._setWidgets()
def _setWidgets(self):
get_widget = self.glade.get_widget
for widgetName in self._widgets:
setattr(self, "_" + widgetName, get_widget(widgetName))
class LoginDialog(GladeKeeper):
# IdentityConnector host port identityName password
# requestLogin -> identityWrapper or login failure
# requestService serviceName perspectiveName client
# window killed
# cancel button pressed
# login button activated
fields = ['host','port','identityName','password',
'perspectiveName']
_widgets = ("hostEntry", "portEntry", "identityNameEntry", "passwordEntry",
"perspectiveNameEntry", "statusBar",
"loginDialog")
_advancedControls = ['perspectiveLabel', 'perspectiveNameEntry',
'protocolLabel', 'versionLabel']
gladefile = util.sibpath(__file__, "login2.glade")
_timeoutID = None
def __init__(self, client, deferred, defaults):
self.client = client
self.deferredResult = deferred
GladeKeeper.__init__(self)
self.setDefaults(defaults)
self._loginDialog.show()
def setDefaults(self, defaults):
if not defaults.has_key('port'):
defaults['port'] = str(pb.portno)
elif isinstance(defaults['port'], (int, long)):
defaults['port'] = str(defaults['port'])
for k, v in defaults.iteritems():
if k in self.fields:
widget = getattr(self, "_%sEntry" % (k,))
widget.set_text(v)
def _setWidgets(self):
GladeKeeper._setWidgets(self)
self._statusContext = self._statusBar.get_context_id("Login dialog.")
get_widget = self.glade.get_widget
get_widget("versionLabel").set_text(copyright.longversion)
get_widget("protocolLabel").set_text("Protocol PB-%s" %
(pb.Broker.version,))
def _on_loginDialog_response(self, widget, response):
handlers = {gtk.RESPONSE_NONE: self._windowClosed,
gtk.RESPONSE_DELETE_EVENT: self._windowClosed,
gtk.RESPONSE_OK: self._doLogin,
gtk.RESPONSE_CANCEL: self._cancelled}
handler = handlers.get(response)
if handler is not None:
handler()
else:
log.msg("Unexpected dialog response %r from %s" % (response,
widget))
def _on_loginDialog_close(self, widget, userdata=None):
self._windowClosed()
def _on_loginDialog_destroy_event(self, widget, userdata=None):
self._windowClosed()
def _cancelled(self):
if not self.deferredResult.called:
self.deferredResult.errback(netError.UserError("User hit Cancel."))
self._loginDialog.destroy()
def _windowClosed(self, reason=None):
if not self.deferredResult.called:
self.deferredResult.errback(netError.UserError("Window closed."))
def _doLogin(self):
idParams = {}
idParams['host'] = self._hostEntry.get_text()
idParams['port'] = self._portEntry.get_text()
idParams['identityName'] = self._identityNameEntry.get_text()
idParams['password'] = self._passwordEntry.get_text()
try:
idParams['port'] = int(idParams['port'])
except ValueError:
pass
f = pb.PBClientFactory()
from twisted.internet import reactor
reactor.connectTCP(idParams['host'], idParams['port'], f)
creds = UsernamePassword(idParams['identityName'], idParams['password'])
d = f.login(creds, self.client)
def _timeoutLogin():
self._timeoutID = None
d.errback(failure.Failure(defer.TimeoutError("Login timed out.")))
self._timeoutID = reactor.callLater(30, _timeoutLogin)
d.addCallbacks(self._cbGotPerspective, self._ebFailedLogin)
self.statusMsg("Contacting server...")
# serviceName = self._serviceNameEntry.get_text()
# perspectiveName = self._perspectiveNameEntry.get_text()
# if not perspectiveName:
# perspectiveName = idParams['identityName']
# d = _identityConnector.requestService(serviceName, perspectiveName,
# self.client)
# d.addCallbacks(self._cbGotPerspective, self._ebFailedLogin)
# setCursor to waiting
def _cbGotPerspective(self, perspective):
self.statusMsg("Connected to server.")
if self._timeoutID is not None:
self._timeoutID.cancel()
self._timeoutID = None
self.deferredResult.callback(perspective)
# clear waiting cursor
self._loginDialog.destroy()
def _ebFailedLogin(self, reason):
if isinstance(reason, failure.Failure):
reason = reason.value
self.statusMsg(reason)
if isinstance(reason, (unicode, str)):
text = reason
else:
text = unicode(reason)
msg = gtk.MessageDialog(self._loginDialog,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE,
text)
msg.show_all()
msg.connect("response", lambda *a: msg.destroy())
# hostname not found
# host unreachable
# connection refused
# authentication failed
# no such service
# no such perspective
# internal server error
def _on_advancedButton_toggled(self, widget, userdata=None):
active = widget.get_active()
if active:
op = "show"
else:
op = "hide"
for widgetName in self._advancedControls:
widget = self.glade.get_widget(widgetName)
getattr(widget, op)()
def statusMsg(self, text):
if not isinstance(text, (unicode, str)):
text = unicode(text)
return self._statusBar.push(self._statusContext, text)
|
|
import os
import random
import string
import unittest
from fs.errors import ResourceNotFoundError
from fs.path import relpath
from fs.tempfs import TempFS
from fs.tests import FSTestCases
from fs.tests import ThreadingTestCases
from versioning_fs import VersioningFS
from versioning_fs.errors import VersionError
KB = 1024
MB = pow(1024, 2)
def generate_file(fs, path, size, generator=None):
with fs.open(path, 'wb') as f:
if generator is None:
text = '12345678'
else:
text = generator().next()
for _ in range(size/len(text)):
f.write(text)
def generate_user_files(fs, dir_path, count, size):
for _ in range(count):
path = os.path.join(dir_path, random_filename())
generate_file(fs, path, size)
def random_filename(size=20):
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
class BaseTest(unittest.TestCase):
def setUp(self):
rootfs = TempFS()
backup = TempFS(temp_dir=rootfs.getsyspath('/'))
self.fs = VersioningFS(rootfs, backup=backup, tmp=TempFS(),
testing={'time': 1})
def tearDown(self):
self.fs.close()
class BaseTimeSensitiveTest(unittest.TestCase):
"""The base class for tests that should not bypass the time settings for
rdiff-backup.
"""
def setUp(self):
rootfs = TempFS()
backup = TempFS(temp_dir=rootfs.getsyspath('/'))
self.fs = VersioningFS(rootfs, backup=backup, tmp=TempFS())
def tearDown(self):
self.fs.close()
class TestVersioningFS(FSTestCases, ThreadingTestCases, BaseTimeSensitiveTest):
maxDiff = None
class TestSnapshotAttributes(BaseTimeSensitiveTest):
"""Test meta data manipulation for the files involved in snapshots."""
def test_snapshot_file_versions(self):
# make sure no snapshot information exists yet
self.assert_all_files_have_snapshot_info(should_exist=False)
repeat_text = 'smartfile_versioning_rocks_\n'
def file_contents():
while True:
yield repeat_text
# generate file 1
file_name = random_filename()
generate_file(fs=self.fs, path=file_name, size=5*KB,
generator=file_contents)
# make sure each user file is version 1
self.assert_all_file_versions_equal(1)
# generate file 2
file_name = random_filename()
generate_file(fs=self.fs, path=file_name, size=5*KB,
generator=file_contents)
# make sure each user file is version 1
self.assert_all_file_versions_equal(1)
with self.fs.open(file_name, 'wb') as f:
f.write('hello world')
# check that the updated file is at version 2
self.assertEqual(self.fs.version(file_name), 2)
# not all of the files will be at the same version
with self.assertRaises(AssertionError):
self.assert_all_file_versions_equal(1)
# check that only one file was updated to version 1
self.fs.remove(file_name)
self.assert_all_file_versions_equal(1)
# make sure all files in the user folder have snapshot information
self.assert_all_files_have_snapshot_info(should_exist=True)
def test_file_version_timestamps(self):
"""Test version information for a specific path."""
file_name = random_filename()
with self.fs.open(file_name, 'wb') as f:
f.write('hello world\n')
self.assertEqual(len(self.fs.list_info(file_name).keys()), 1)
with self.fs.open(file_name, 'wb') as f:
f.write('hello world123\n')
with self.fs.open(file_name, 'wb') as f:
f.write('hello world123456\n')
version_info = self.fs.list_info(file_name)
dates = version_info.values()
for z in range(len(dates) - 1):
current_date = dates[z]
next_date = dates[z+1]
self.assertTrue(current_date <= next_date)
def test_file_version_sizes(self):
"""Test version sizes for a specific path."""
file_name = random_filename()
for _ in range(3):
with self.fs.open(file_name, 'wb') as f:
f.write(random_filename())
f.write('\n')
self.assertEqual(len(self.fs.list_sizes(file_name).keys()), 3)
def assert_all_file_versions_equal(self, version):
for path in self.fs.walkfiles('/'):
if not 'abcdefg' in path and 'tmp' not in path:
path = relpath(path)
file_version = self.fs.version(path)
self.assertEqual(file_version, version)
def assert_all_files_have_snapshot_info(self, should_exist=True):
for path in self.fs.walkfiles('/'):
if not 'abcdefg' in path and 'tmp' not in path:
path = relpath(path)
snapshot_info_exists = self.fs.has_snapshot(path)
self.assertEqual(snapshot_info_exists, should_exist)
class TestFileVersions(BaseTest):
"""Test file versions."""
def test_single_file_write(self):
file_name = random_filename()
f = self.fs.open(file_name, 'wb')
f.write('smartfile_versioning_rocks\n')
f.close()
# check that version 1 was created
self.assertEqual(self.fs.version(file_name), 1)
f = self.fs.open(file_name, 'rb')
self.assertEqual(f.read(), 'smartfile_versioning_rocks\n')
f.close()
# make some changes to the file and check for version increment
f = self.fs.open(file_name, 'wb')
f.writelines("hello world!\nhello world!")
f.close()
self.assertEqual(self.fs.version(file_name), 2)
# check the contents when we open the file
f = self.fs.open(file_name, 'rb')
self.assertEqual(f.readlines(), ["hello world!\n", "hello world!"])
f.close()
# make sure the version has not been updated since reading
self.assertEqual(self.fs.version(file_name), 2)
def test_single_file_append(self):
file_name = random_filename()
f = self.fs.open(file_name, 'ab')
f.write('smartfile_versioning_rocks\n')
f.close()
# check that version 1 was created
self.assertEqual(self.fs.version(file_name), 1)
f = self.fs.open(file_name, 'rb')
self.assertEqual(f.read(), 'smartfile_versioning_rocks\n')
f.close()
# make some changes to the file and check for version increment
f = self.fs.open(file_name, 'ab')
f.writelines("hello world!\nhello world!")
f.close()
self.assertEqual(self.fs.version(file_name), 2)
# check the contents when we open the file
f = self.fs.open(file_name, 'rb')
self.assertEqual(f.readlines(), ['smartfile_versioning_rocks\n',
"hello world!\n", "hello world!"])
f.close()
# make sure the version has not been updated since reading
self.assertEqual(self.fs.version(file_name), 2)
def test_open_old_version(self):
file_name = random_filename()
f = self.fs.open(file_name, 'wb')
f.write("smartfile")
f.close()
f = self.fs.open(file_name, 'wb')
f.write("smartfile versioning")
f.close()
f = self.fs.open(file_name, 'wb')
f.write("smartfile versioning rocks")
f.close()
# now try opening previous versions of the file and check content
f = self.fs.open(file_name, 'rb', version=1)
self.assertEqual(f.read(), "smartfile")
f.close()
f = self.fs.open(file_name, 'rb', version=2)
self.assertEqual(f.read(), "smartfile versioning")
f.close()
f = self.fs.open(file_name, 'rb', version=3)
self.assertEqual(f.read(), "smartfile versioning rocks")
f.close()
# the file version has not changed since we only read the version
self.assertEqual(self.fs.version(file_name), 3)
def test_bad_version(self):
repeat_text = 'smartfile_versioning_rocks_\n'
def file_contents():
while True:
yield repeat_text
# generate file 1
file_name = random_filename()
generate_file(fs=self.fs, path=file_name, size=5*KB,
generator=file_contents)
# version 0 should never exist
with self.assertRaises(ResourceNotFoundError):
self.fs.open(file_name, 'rb', version=0)
# version 2 has not been created yet
with self.assertRaises(ResourceNotFoundError):
self.fs.open(file_name, 'rb', version=2)
def test_skip_version_snapshot(self):
"""
Test opening a file but setting 'take_snapshot' to False.
A version should not be created.
"""
file_name = random_filename()
f = self.fs.open(file_name, 'wb', take_snapshot=False)
f.write('smartfile_versioning_rocks\n')
f.close()
# check that version 1 was not created
self.assertEqual(self.fs.version(file_name), 0)
class TestVersionDeletion(BaseTimeSensitiveTest):
"""Test the deletion of older versions."""
def test_delete_older_versions(self):
file_name = random_filename()
iterations = 5
# generate some files
for _ in range(iterations):
with self.fs.open(file_name, 'wb') as f:
f.write(random_filename())
# try a bad version: remove versions before 1
with self.assertRaises(VersionError):
self.fs.remove_versions_before(file_name, version=1)
# try a bad version: remove versions after the current+1
with self.assertRaises(VersionError):
invalid_version = iterations + 1
self.fs.remove_versions_before(file_name, version=invalid_version)
# try a bad version: use an invalid time format
with self.assertRaises(VersionError):
invalid_version = "3/4/1998T13:00"
self.fs.remove_versions_before(file_name, version=invalid_version)
# look at the time of version 2 and delete anything older than it
self.fs.remove_versions_before(path=file_name, version=2)
# we deleted versions older than 2 which deleted version 1
total_versions = self.fs.version(file_name)
self.assertEqual(total_versions, 4)
# try deleting with a timestamp string rather than version number
delete_date = self.fs.list_info(file_name)[2]
self.fs.remove_versions_before(path=file_name, version=delete_date)
# we deleted versions before the date of the second version
total_versions = self.fs.version(file_name)
self.assertEqual(total_versions, 3)
# try deleting a version with a string that is also a digit
self.fs.remove_versions_before(path=file_name, version=u'2')
# we deleted versions older than 2 which deleted version 1
total_versions = self.fs.version(file_name)
self.assertEqual(total_versions, 2)
class TestRdiffBackupSleep(BaseTimeSensitiveTest):
"""Rdiff backup cannot make two snapshots within 1 second.
This test checks that the filewrapper sleeps for 1 second before
trying to make a snapshot.
"""
def test_quick_file_changes(self):
# test two file edits within 1 second
file_name = random_filename()
iterations = 3
for _ in range(iterations):
with self.fs.open(file_name, 'wb') as f:
f.write(random_filename())
self.assertEqual(self.fs.version(file_name), iterations)
class TestFileOperations(BaseTest):
"""Test fs.move, fs.movedir, fs.remove, and fs.removedir"""
def test_move_single_file(self):
"""Move a single file, which should also move its backups."""
# have 2 versions of a file we create
file_name = random_filename()
contents = ["smartfile", "smartfile versioning"]
for content in contents:
with self.fs.open(file_name, 'wb') as f:
f.write(content)
# move the file somewhere else
new_filename = random_filename()
self.fs.move(file_name, new_filename)
# check if versioning is still available
for version, content in enumerate(contents):
with self.fs.open(new_filename, 'rb', version=version+1) as f:
self.assertEqual(f.read(), contents[version])
def test_move_file_into_directory(self):
"""Move a file into a directory and check that backups were moved."""
file_name = random_filename()
dir_name = random_filename()
file_path = os.path.join(dir_name, file_name)
contents = ["smartfile", "smartfile versioning",
"smartfile versioning rocks"]
for content in contents:
with self.fs.open(file_name, 'wb') as f:
f.write(content)
# create a directory for the file to be moved into
self.fs.makedir(dir_name)
# move the file into the directory
self.fs.move(file_name, file_path)
# check if versioning is still available
self.assertTrue(self.fs.has_snapshot(file_path))
for version, content in enumerate(contents):
f = self.fs.open(file_path, 'rb', version=version+1)
self.assertEqual(f.read(), contents[version])
f.close()
def test_move_directory(self):
"""Move a directory and check that backups were moved."""
file1_name = random_filename()
dir1_name = random_filename()
dir2_name = random_filename()
file1_full_path = os.path.join(dir1_name, file1_name)
file1_new_full_path = os.path.join(dir2_name, file1_name)
# create a directory for the file we are going to create
self.fs.makedir(dir1_name)
contents = ["smartfile", "smartfile versioning"]
for content in contents:
with self.fs.open(file1_full_path, 'wb') as f:
f.write(content)
# move the directory
self.fs.movedir(dir1_name, dir2_name)
# check if versioning is still available
self.assertTrue(self.fs.has_snapshot(file1_new_full_path))
for version, content in enumerate(contents):
f = self.fs.open(file1_new_full_path, 'rb', version=version+1)
self.assertEqual(f.read(), contents[version])
f.close()
def test_rename_file(self):
"""Rename a file and check that backups were moved."""
file_name = random_filename()
file2_name = random_filename()
contents = ["smartfile", "smartfile versioning",
"smartfile versioning rocks"]
for content in contents:
with self.fs.open(file_name, 'wb') as f:
f.write(content)
# Rename the file
self.fs.rename(file_name, file2_name)
# check if versioning is still available
self.assertTrue(self.fs.has_snapshot(file2_name))
for version, content in enumerate(contents):
f = self.fs.open(file2_name, 'rb', version=version+1)
self.assertEqual(f.read(), contents[version])
f.close()
def test_rename_directory(self):
"""Rename a directory and check that backups were moved."""
file1_name = random_filename()
dir1_name = random_filename()
dir2_name = random_filename()
file1_full_path = os.path.join(dir1_name, file1_name)
file1_new_full_path = os.path.join(dir2_name, file1_name)
# create a directory for the file we are going to create
self.fs.makedir(dir1_name)
contents = ["smartfile", "smartfile versioning"]
for content in contents:
with self.fs.open(file1_full_path, 'wb') as f:
f.write(content)
# move the directory
self.fs.rename(dir1_name, dir2_name)
# check if versioning is still available
self.assertTrue(self.fs.has_snapshot(file1_new_full_path))
for version, content in enumerate(contents):
f = self.fs.open(file1_new_full_path, 'rb', version=version+1)
self.assertEqual(f.read(), contents[version])
f.close()
def test_remove_single_file(self):
"""Remove a single file along with its backups."""
file_name = random_filename()
with self.fs.open(file_name, 'wb') as f:
f.write("smartfile")
self.fs.remove(file_name)
self.assertFalse(self.fs.has_snapshot(file_name))
def test_remove_single_dir(self):
"""Remove a single dir along with its backups."""
dir_name = random_filename()
self.fs.makedir(dir_name)
files = [random_filename() for x in range(4)]
paths = [os.path.join(dir_name, path) for path in files]
for path in paths:
for _ in range(2):
with self.fs.open(path, 'wb') as f:
f.write('hello world')
self.fs.removedir(dir_name, force=True)
for path in paths:
self.assertTrue(not self.fs.has_snapshot(path))
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from random import uniform
from compas.geometry import transform_points
from compas.geometry import centroid_points
from compas.geometry import bounding_box
from compas.geometry import Primitive
from compas.geometry import Point
__all__ = ['Pointcloud']
class Pointcloud(Primitive):
"""Class for working with pointclouds.
Parameters
----------
points : sequence[point]
A sequence of points to add to the cloud.
**kwargs : dict[str, Any], optional
Additional keyword arguments collected in a dict.
Attributes
----------
points : list[:class:`~compas.geometry.Point`]
The points of the cloud.
Examples
--------
>>>
"""
def __init__(self, points, **kwargs):
super(Pointcloud, self).__init__(**kwargs)
self._points = None
self.points = points
@property
def DATASCHEMA(self):
from schema import Schema
from compas.data import is_float3
return Schema({
'points': lambda points: all(is_float3(point) for point in points)
})
@property
def JSONSCHEMANAME(self):
return 'pointcloud'
@property
def data(self):
return {'points': [point.data for point in self.points]}
@data.setter
def data(self, data):
self._points = [Point.from_data(point) for point in data['points']]
@classmethod
def from_data(cls, data):
return cls(data['points'])
# ==========================================================================
# properties
# ==========================================================================
@property
def points(self):
return self._points
@points.setter
def points(self, points):
self._points = [Point(*point) for point in points]
@property
def centroid(self):
return centroid_points(self.points)
@property
def bounding_box(self):
return bounding_box(self.points)
# ==========================================================================
# customization
# ==========================================================================
def __repr__(self):
return 'Pointcloud({0!r})'.format(self.points)
def __len__(self):
return len(self.points)
def __getitem__(self, key):
if key > len(self) - 1:
raise KeyError
return self.points[key]
def __setitem__(self, key, value):
if key > len(self) - 1:
raise KeyError
self.points[key] = value
def __iter__(self):
return iter(self.points)
def __eq__(self, other):
"""Is this pointcloud equal to the other pointcloud?
Two pointclouds are considered equal if they have the same number of points
and if the XYZ coordinates of the corresponding points are identical.
Parameters
----------
other : :class:`~compas.geometry.Pointcloud` | list[[float, float, float] | :class:`~compas.geometry.Point`]
The pointcloud to compare.
Returns
-------
bool
True if the pointclouds are equal.
False otherwise.
"""
if len(self) != len(other):
return False
A = sorted(self, key=lambda point: (point[0], point[1], point[2]))
B = sorted(other, key=lambda point: (point[0], point[1], point[2]))
return all(a == b for a, b in zip(A, B))
# ==========================================================================
# constructors
# ==========================================================================
@classmethod
def from_ply(cls, filepath):
"""Construct a pointcloud from a PLY file.
Parameters
----------
filepath : str | bytes | os.PathLike
Path of the PLY file.
Returns
-------
:class:`~compas.geometry.Pointcloud`
"""
pass
@classmethod
def from_pcd(cls, filepath):
"""Construct a pointcloud from a PCD file.
Parameters
----------
filepath : str | bytes | os.PathLike
Path of the PCD file.
Returns
-------
:class:`~compas.geometry.Pointcloud`
"""
pass
@classmethod
def from_bounds(cls, x, y, z, n):
"""Construct a point cloud within a given box.
Parameters
----------
x : float | tuple[float, float]
Size of the cloud in the X direction.
If a single value, the size is (0, x).
If a pair of values, the size is (x[0], x[1]).
y : float | tuple[float, float]
Size of the cloud in the Y direction.
If a single value, the size is (0, y).
If a pair of values, the size is (y[0], y[1]).
z : float | tuple[float, float]
Size of the cloud in the Z direction.
If a single value, the size is (0, z).
If a pair of values, the size is (z[0], z[1]).
n : int
The number of points in the cloud.
Returns
-------
:class:`~compas.geometry.Pointcloud`
Notes
-----
The XYZ coordinates of the `n` points are radnomly chosen within the provided `x`, `y`, and `z` bounds.
Thererefor, there is no guarantee that the bounds are part of the resulting coordinates.
Examples
--------
>>>
"""
try:
len(x)
except TypeError:
xmin = 0
xmax = x
else:
xmin, xmax = x
try:
len(y)
except TypeError:
ymin = 0
ymax = y
else:
ymin, ymax = y
try:
len(z)
except TypeError:
zmin = 0
zmax = z
else:
zmin, zmax = z
x = [uniform(xmin, xmax) for i in range(n)]
y = [uniform(ymin, ymax) for i in range(n)]
z = [uniform(zmin, zmax) for i in range(n)]
return cls(list(map(list, zip(x, y, z))))
@classmethod
def from_box(cls, box, n):
"""Construct a point cloud within a given box.
Parameters
----------
box: :class:`~compas.geometry.Box`
The axis aligned bounding box of the cloud.
n: int
The number of points in the cloud.
Returns
-------
:class:`~compas.geometry.Pointcloud`
Examples
--------
>>> from compas.geometry import Box
>>> cloud = Pointcloud.from_box(Box.from_width_height_depth(10, 3, 5), 100)
>>> all((-5 < x < +5) and (-2.5 < y < +2.5) and (-1.5 < z < +1.5) for x, y, z in cloud.points)
True
"""
points = box.points
x, y, z = zip(*points)
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
zmin, zmax = min(z), max(z)
x = [uniform(xmin, xmax) for i in range(n)]
y = [uniform(ymin, ymax) for i in range(n)]
z = [uniform(zmin, zmax) for i in range(n)]
return cls(list(map(list, zip(x, y, z))))
# ==========================================================================
# methods
# ==========================================================================
def transform(self, T):
"""Apply a transformation to the pointcloud.
Parameters
----------
T : :class:`~compas.geometry.Transformation`
The transformation.
Returns
-------
None
The cloud is modified in place.
"""
for index, point in enumerate(transform_points(self.points, T)):
self.points[index].x = point[0]
self.points[index].y = point[1]
self.points[index].z = point[2]
|
|
import collections
import numpy as np
import theano
import theano.tensor as TT
import rllab.misc.logger as logger
from rllab.algos.batch_polopt import BatchPolopt, BatchSampler
from rllab.algos.npo import NPO
from rllab.misc import ext
from rllab.misc.overrides import overrides
from rllab.sampler import parallel_sampler
from sandbox.snn4hrl.distributions.categorical import from_index, from_onehot
from sandbox.snn4hrl.regressors.latent_regressor import Latent_regressor
from sandbox.snn4hrl.sampler.utils import rollout
from sandbox.snn4hrl.sampler.utils_snn import rollout_snn
class BatchSampler_snn(BatchSampler):
"""
Allows giving bonus for MI and other bonus_evaluators, hallucinate if needed (not used in the paper)
and switching latent every certain number of time-steps.
"""
def __init__(self,
*args, # this collects algo, passing it to BatchSampler in the super __init__
bonus_evaluator=None, # list of bonus evals
reward_coef_bonus=None, # this is the total bonus from the bonus evaluator. it's a LIST
latent_regressor=None, # Latent_regressor object for MI. Provides logging AND bonus if needed
reward_regressor_mi=0, # this is for the regressor bonus, not the grid
self_normalize=False, # this is for the hallucinated samples importance weight
switch_lat_every=0,
**kwargs
):
super(BatchSampler_snn, self).__init__(*args, **kwargs) # this should be giving a self.algo
self.bonus_evaluator = bonus_evaluator if bonus_evaluator else []
self.reward_coef_bonus = reward_coef_bonus if reward_coef_bonus else [0] * len(self.bonus_evaluator)
self.reward_regressor_mi = reward_regressor_mi
self.latent_regressor = latent_regressor
self.self_normalize = self_normalize
self.switch_lat_every = switch_lat_every
def _worker_collect_one_path_snn(self, G, max_path_length, switch_lat_every=0, scope=None):
G = parallel_sampler._get_scoped_G(G, scope)
path = rollout_snn(G.env, G.policy, max_path_length, switch_lat_every=switch_lat_every)
return path, len(path["rewards"])
def sample_paths(
self,
policy_params,
max_samples,
max_path_length=np.inf,
env_params=None,
scope=None):
"""
:param policy_params: parameters for the policy. This will be updated on each worker process
:param max_samples: desired maximum number of samples to be collected. The actual number of collected samples
might be greater since all trajectories will be rolled out either until termination or until max_path_length is
reached
:param max_path_length: horizon / maximum length of a single trajectory
:return: a list of collected paths
"""
parallel_sampler.singleton_pool.run_each(
parallel_sampler._worker_set_policy_params,
[(policy_params, scope)] * parallel_sampler.singleton_pool.n_parallel
)
if env_params is not None:
parallel_sampler.singleton_pool.run_each(
parallel_sampler._worker_set_env_params,
[(env_params, scope)] * parallel_sampler.singleton_pool.n_parallel
)
return parallel_sampler.singleton_pool.run_collect(
# parallel_sampler._worker_collect_one_path_snn, # now this is defined in parallel_sampler also!
self._worker_collect_one_path_snn, # now this is defined in parallel_sampler also!
threshold=max_samples,
args=(max_path_length, self.switch_lat_every, scope),
show_prog_bar=True
)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = self.sample_paths( # use the sample function above
policy_params=cur_params,
max_samples=self.algo.batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated
@overrides
def process_samples(self, itr, paths):
# count visitations or whatever the bonus wants to do. This should not modify the paths
for b_eval in self.bonus_evaluator:
logger.log("fitting bonus evaluator before processing...")
b_eval.fit_before_process_samples(paths)
logger.log("fitted")
# save real undiscounted reward before changing them
undiscounted_returns = [sum(path["rewards"]) for path in paths]
logger.record_tabular('TrueAverageReturn', np.mean(undiscounted_returns))
for path in paths:
path['true_rewards'] = list(path['rewards'])
# If using a latent regressor (and possibly adding MI to the reward):
if isinstance(self.latent_regressor, Latent_regressor):
with logger.prefix(' Latent_regressor '):
self.latent_regressor.fit(paths)
if self.reward_regressor_mi:
for i, path in enumerate(paths):
path['logli_latent_regressor'] = self.latent_regressor.predict_log_likelihood(
[path], [path['agent_infos']['latents']])[0] # this is for paths usually..
path['rewards'] += self.reward_regressor_mi * path[
'logli_latent_regressor'] # the logli of the latent is the variable of the mutual information
# for the extra bonus
for b, b_eval in enumerate(self.bonus_evaluator):
for i, path in enumerate(paths):
bonuses = b_eval.predict(path)
path['rewards'] += self.reward_coef_bonus[b] * bonuses
real_samples = ext.extract_dict(
BatchSampler.process_samples(self, itr, paths),
# I don't need to process the hallucinated samples: the R, A,.. same!
"observations", "actions", "advantages", "env_infos", "agent_infos"
)
real_samples["importance_weights"] = np.ones_like(real_samples["advantages"])
return real_samples
def log_diagnostics(self, paths):
for b_eval in self.bonus_evaluator:
b_eval.log_diagnostics(paths)
if isinstance(self.latent_regressor, Latent_regressor):
with logger.prefix(' Latent regressor logging | '):
self.latent_regressor.log_diagnostics(paths)
class NPO_snn(NPO):
"""
Natural Policy Optimization for SNNs:
- differentiable reward bonus for L2 or KL between conditional distributions (commented out: not used in paper).
- allows to give rewards for serveral divergence metrics among conditional distributions (through BatchSampler_snn)
- logg individually for every latent as well as some "hierarchy" metric or the deterministic policy
"""
def __init__(
self,
# some extra logging. What of this could be included in the sampler?
log_individual_latents=False, # to log the progress of each individual latent
log_deterministic=False, # log the performance of the policy with std=0 (for each latent separate)
log_hierarchy=False,
bonus_evaluator=None,
reward_coef_bonus=None,
latent_regressor=None,
reward_regressor_mi=0, # kwargs to the sampler (that also processes)
switch_lat_every=0,
**kwargs):
# some logging
self.log_individual_latents = log_individual_latents
self.log_deterministic = log_deterministic
self.log_hierarchy = log_hierarchy
sampler_cls = BatchSampler_snn
sampler_args = {'switch_lat_every': switch_lat_every,
'latent_regressor': latent_regressor,
'bonus_evaluator': bonus_evaluator,
'reward_coef_bonus': reward_coef_bonus,
'reward_regressor_mi': reward_regressor_mi,
}
super(NPO_snn, self).__init__(sampler_cls=sampler_cls, sampler_args=sampler_args, **kwargs)
@overrides
def init_opt(self):
assert not self.policy.recurrent
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
latent_var = self.policy.latent_space.new_tensor_variable(
'latents',
extra_dims=1 + is_recurrent,
)
advantage_var = ext.new_tensor(
'advantage',
ndim=1 + is_recurrent,
dtype=theano.config.floatX
)
dist = self.policy.distribution # this can still be the dist P(a|s,__h__)
old_dist_info_vars = {
k: ext.new_tensor(
'old_%s' % k, # define tensors old_mean and old_log_std
ndim=2 + is_recurrent,
dtype=theano.config.floatX
) for k in dist.dist_info_keys
}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys] ##put 2 tensors above in a list
if is_recurrent:
valid_var = TT.matrix('valid')
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, latent_var)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
lr = dist.likelihood_ratio_sym(action_var, old_dist_info_vars, dist_info_vars)
if is_recurrent:
mean_kl = TT.sum(kl * valid_var) / TT.sum(valid_var)
surr_loss = - TT.sum(lr * advantage_var * valid_var) / TT.sum(valid_var)
else:
mean_kl = TT.mean(kl)
surr_loss = - TT.mean(lr * advantage_var)
loss = surr_loss
input_list = [ # these are sym var. the inputs in optimize_policy have to be in same order!
obs_var,
action_var,
advantage_var,
latent_var,
] + old_dist_info_vars_list # provide old mean and var, for the new states as they were sampled from it!
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(
loss=loss,
target=self.policy,
leq_constraint=(mean_kl, self.step_size),
inputs=input_list,
constraint_name="mean_kl"
)
return dict()
@overrides
def optimize_policy(self, itr,
samples_data): # make that samples_data comes with latents: see train in batch_polopt
all_input_values = tuple(ext.extract( # it will be in agent_infos!!! under key "latents"
samples_data,
"observations", "actions", "advantages"
))
agent_infos = samples_data["agent_infos"]
all_input_values += (agent_infos[
"latents"],) # latents has already been processed and is the concat of all latents, but keeps key "latents"
info_list = [agent_infos[k] for k in
self.policy.distribution.dist_info_keys] # these are the mean and var used at rollout, corresponding to
all_input_values += tuple(info_list) # old_dist_info_vars_list as symbolic var
if self.policy.recurrent:
all_input_values += (samples_data["valids"],)
loss_before = self.optimizer.loss(all_input_values)
# this should always be 0. If it's not there is a problem.
mean_kl_before = self.optimizer.constraint_val(all_input_values)
logger.record_tabular('MeanKL_Before', mean_kl_before)
with logger.prefix(' PolicyOptimize | '):
self.optimizer.optimize(all_input_values)
mean_kl = self.optimizer.constraint_val(all_input_values)
loss_after = self.optimizer.loss(all_input_values)
logger.record_tabular('LossAfter', loss_after)
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('dLoss', loss_before - loss_after)
return dict()
@overrides
def log_diagnostics(self, paths):
BatchPolopt.log_diagnostics(self, paths)
self.sampler.log_diagnostics(paths)
if self.policy.latent_dim:
if self.log_individual_latents and not self.policy.resample: # this is only valid for finite discrete latents!!
all_latent_avg_returns = []
clustered_by_latents = collections.OrderedDict() # this could be done within the distribution to be more general, but ugly
for lat_key in range(self.policy.latent_dim):
clustered_by_latents[lat_key] = []
for path in paths:
lat = path['agent_infos']['latents'][0]
lat_key = int(from_onehot(lat)) # from_onehot returns an axis less than the input.
clustered_by_latents[lat_key].append(path)
for latent_key, paths in clustered_by_latents.items(): # what to do if this is empty?? set a default!
with logger.tabular_prefix(str(latent_key)), logger.prefix(str(latent_key)):
if paths:
undiscounted_rewards = [sum(path["true_rewards"]) for path in paths]
else:
undiscounted_rewards = [0]
all_latent_avg_returns.append(np.mean(undiscounted_rewards))
logger.record_tabular('Avg_TrueReturn', np.mean(undiscounted_rewards))
logger.record_tabular('Std_TrueReturn', np.std(undiscounted_rewards))
logger.record_tabular('Max_TrueReturn', np.max(undiscounted_rewards))
if self.log_deterministic:
lat = from_index(latent_key, self.policy.latent_dim)
with self.policy.fix_latent(lat), self.policy.set_std_to_0():
path_det = rollout(self.env, self.policy, self.max_path_length)
logger.record_tabular('Deterministic_TrueReturn', np.sum(path_det["rewards"]))
with logger.tabular_prefix('all_lat_'), logger.prefix('all_lat_'):
logger.record_tabular('MaxAvgReturn', np.max(all_latent_avg_returns))
logger.record_tabular('MinAvgReturn', np.min(all_latent_avg_returns))
logger.record_tabular('StdAvgReturn', np.std(all_latent_avg_returns))
if self.log_hierarchy:
max_in_path_length = 10
completed_in_paths = 0
path = rollout(self.env, self.policy, max_path_length=max_in_path_length, animated=False)
if len(path['rewards']) == max_in_path_length:
completed_in_paths += 1
for t in range(1, 50):
path = rollout(self.env, self.policy, max_path_length=10, animated=False,
reset_start_rollout=False)
if len(path['rewards']) < 10:
break
completed_in_paths += 1
logger.record_tabular('Hierarchy', completed_in_paths)
else:
if self.log_deterministic:
with self.policy.set_std_to_0():
path = rollout(self.env, self.policy, self.max_path_length)
logger.record_tabular('Deterministic_TrueReturn', np.sum(path["rewards"]))
|
|
# pylint: disable=W0212
import json
import logging
from datetime import datetime
from django.utils.timezone import utc
import re
import redis
import botbot_plugins.plugins
from botbot_plugins.base import PrivateMessage
from django.core.cache import cache
from django.conf import settings
from django.utils.importlib import import_module
from django_statsd.clients import statsd
from botbot.apps.bots import models as bots_models
from botbot.apps.plugins.utils import convert_nano_timestamp, log_on_error
from .plugin import RealPluginMixin
CACHE_TIMEOUT_2H = 7200
LOG = logging.getLogger('botbot.plugin_runner')
class Line(object):
"""
All the methods and data necessary for a plugin to act on a line
"""
def __init__(self, packet, app):
self.full_text = packet['Content']
self.text = packet['Content']
self.user = packet['User']
# Private attributes not accessible to external plugins
self._chatbot_id = packet['ChatBotId']
self._raw = packet['Raw']
self._channel_name = packet['Channel'].strip()
self._command = packet['Command']
self._is_message = packet['Command'] == 'PRIVMSG'
self._host = packet['Host']
self._received = convert_nano_timestamp(packet['Received'])
self.is_direct_message = self.check_direct_message()
@property
def _chatbot(self):
"""Simple caching for ChatBot model"""
if not hasattr(self, '_chatbot_cache'):
cache_key = 'chatbot:{0}'.format(self._chatbot_id)
chatbot = cache.get(cache_key)
if not chatbot:
chatbot = bots_models.ChatBot.objects.get(id=self._chatbot_id)
cache.set(cache_key, chatbot, CACHE_TIMEOUT_2H)
self._chatbot_cache = chatbot
return self._chatbot_cache
@property
def _channel(self):
"""Simple caching for Channel model"""
if not hasattr(self, '_channel_cache'):
cache_key = 'channel:{0}-{1}'.format(self._chatbot_id, self._channel_name)
channel = cache.get(cache_key)
if not channel and self._channel_name.startswith("#"):
channel = self._chatbot.channel_set.get(
name=self._channel_name)
cache.set(cache_key, channel, CACHE_TIMEOUT_2H)
"""
The following logging is to help out in sentry. For some
channels, we are getting occasional issues with the
``channel_set.get()`` lookup above
"""
LOG.debug(channel)
LOG.debug(self._channel_name)
LOG.debug(cache_key)
LOG.debug("%s", ", ".join(self._chatbot.channel_set.values_list('name', flat=True)))
self._channel_cache = channel
return self._channel_cache
@property
def _active_plugin_slugs(self):
if not hasattr(self, '_active_plugin_slugs_cache'):
if self._channel:
self._active_plugin_slugs_cache = self._channel.active_plugin_slugs
else:
self._active_plugin_slugs_cache = set()
return self._active_plugin_slugs_cache
def check_direct_message(self):
"""
If message is addressed to the bot, strip the bot's nick
and return the rest of the message. Otherwise, return False.
"""
nick = self._chatbot.nick
# Private message
if self._channel_name == nick:
LOG.debug('Private message detected')
# Set channel as user, so plugins reply by PM to correct user
self._channel_name = self.user
return True
if len(nick) == 1:
# support @<plugin> or !<plugin>
regex = ur'^{0}(.*)'.format(re.escape(nick))
else:
# support <nick>: <plugin>
regex = ur'^{0}[:\s](.*)'.format(re.escape(nick))
match = re.match(regex, self.full_text, re.IGNORECASE)
if match:
LOG.debug('Direct message detected')
self.text = match.groups()[0].lstrip()
return True
return False
def __str__(self):
return self.full_text
def __repr__(self):
return str(self)
class PluginRunner(object):
"""
Registration and routing for plugins
Calls to plugins are done via greenlets
"""
def __init__(self, use_gevent=False):
if use_gevent:
import gevent
self.gevent = gevent
self.bot_bus = redis.StrictRedis.from_url(
settings.REDIS_PLUGIN_QUEUE_URL)
self.storage = redis.StrictRedis.from_url(
settings.REDIS_PLUGIN_STORAGE_URL)
# plugins that listen to everything coming over the wire
self.firehose_router = {}
# plugins that listen to all messages (aka PRIVMSG)
self.messages_router = {}
# plugins that listen on direct messages (starting with bot nick)
self.mentions_router = {}
def register_all_plugins(self):
"""Iterate over all plugins and register them with the app"""
for core_plugin in ['help', 'logger']:
mod = import_module('botbot.apps.plugins.core.{}'.format(core_plugin))
plugin = mod.Plugin()
self.register(plugin)
for mod in botbot_plugins.plugins.__all__:
plugin = import_module('botbot_plugins.plugins.' + mod).Plugin()
self.register(plugin)
def register(self, plugin):
"""
Introspects the Plugin class instance provided for methods
that need to be registered with the internal app routers.
"""
for key in dir(plugin):
try:
# the config attr bombs if accessed here because it tries
# to access an attribute from the dummyapp
attr = getattr(plugin, key)
except AttributeError:
continue
if (not key.startswith('__') and
getattr(attr, 'route_rule', None)):
LOG.info('Route: %s.%s listens to %s for matches to %s',
plugin.slug, key, attr.route_rule[0],
attr.route_rule[1])
getattr(self, attr.route_rule[0] + '_router').setdefault(
plugin.slug, []).append((attr.route_rule[1], attr, plugin))
def listen(self):
"""Listens for incoming messages on the Redis queue"""
while 1:
val = None
try:
val = self.bot_bus.blpop('q', 1)
# Track q length
ql = self.bot_bus.llen('q')
statsd.gauge(".".join(["plugins", "q"]), ql)
if val:
_, val = val
LOG.debug('Recieved: %s', val)
line = Line(json.loads(val), self)
# Calculate the transport latency between go and the plugins.
delta = datetime.utcnow().replace(tzinfo=utc) - line._received
statsd.timing(".".join(["plugins", "latency"]),
delta.total_seconds() * 1000)
self.dispatch(line)
except Exception:
LOG.error("Line Dispatch Failed", exc_info=True, extra={
"line": val
})
def dispatch(self, line):
"""Given a line, dispatch it to the right plugins & functions."""
# This is a pared down version of the `check_for_plugin_route_matches`
# method for firehose plugins (no regexing or return values)
active_firehose_plugins = line._active_plugin_slugs.intersection(
self.firehose_router.viewkeys())
for plugin_slug in active_firehose_plugins:
for _, func, plugin in self.firehose_router[plugin_slug]:
# firehose gets everything, no rule matching
LOG.info('Match: %s.%s', plugin_slug, func.__name__)
with statsd.timer(".".join(["plugins", plugin_slug])):
# FIXME: This will not have correct timing if go back to
# gevent.
channel_plugin = self.setup_plugin_for_channel(
plugin.__class__, line)
new_func = log_on_error(LOG, getattr(channel_plugin,
func.__name__))
if hasattr(self, 'gevent'):
self.gevent.Greenlet.spawn(new_func, line)
else:
channel_plugin.respond(new_func(line))
# pass line to other routers
if line._is_message:
self.check_for_plugin_route_matches(line, self.messages_router)
if line.is_direct_message:
self.check_for_plugin_route_matches(line, self.mentions_router)
def setup_plugin_for_channel(self, fake_plugin_class, line):
"""Given a dummy plugin class, initialize it for the line's channel"""
class RealPlugin(RealPluginMixin, fake_plugin_class):
pass
plugin = RealPlugin(slug=fake_plugin_class.__module__.split('.')[-1],
channel=line._channel,
chatbot_id=line._chatbot_id,
app=self)
return plugin
def check_for_plugin_route_matches(self, line, router):
"""Checks the active plugins' routes and calls functions on matches"""
# get the active routes for this channel
active_slugs = line._active_plugin_slugs.intersection(router.viewkeys())
for plugin_slug in active_slugs:
for rule, func, plugin in router[plugin_slug]:
match = re.match(rule, line.text, re.IGNORECASE)
if match:
LOG.info('Match: %s.%s', plugin_slug, func.__name__)
with statsd.timer(".".join(["plugins", plugin_slug])):
# FIXME: This will not have correct timing if go back to
# gevent.
# Instantiate a plugin specific to this channel
channel_plugin = self.setup_plugin_for_channel(
plugin.__class__, line)
# get the method from the channel-specific plugin
new_func = log_on_error(LOG, getattr(channel_plugin,
func.__name__))
if hasattr(self, 'gevent'):
grnlt = self.gevent.Greenlet(new_func, line,
**match.groupdict())
grnlt.link_value(channel_plugin.greenlet_respond)
grnlt.start()
else:
channel_plugin.respond(new_func(line,
**match.groupdict()))
def start_plugins(*args, **kwargs):
"""
Used by the management command to start-up plugin listener
and register the plugins.
"""
LOG.info('Starting plugins. Gevent=%s', kwargs['use_gevent'])
app = PluginRunner(**kwargs)
app.register_all_plugins()
app.listen()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
from nova import context as nova_context
from nova import db
from nova import flags
from nova import log as logging
from nova.compute import power_state
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class InstanceInfo(object):
def __init__(self, name, state):
self.name = name
assert state in power_state.valid_states(), "Bad state: %s" % state
self.state = state
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
In contrast, the list_disks and list_interfaces calls may return
platform-specific IDs. These identify a specific virtual disk or specific
virtual network interface, and these IDs are opaque to the rest of Nova.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance_id):
"""Checks existence of an instance on the host.
:param instance_id: The ID / name of the instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return instance_id in self.list_instances()
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instances_detail(self):
"""Return a list of InstanceInfo for all registered VMs"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def spawn(self, context, instance, image_meta,
network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def snapshot_instance(self, context, instance_id, image_id):
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_vnc_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_all_bw_usage(self, start_time, stop_time=None):
"""Return bandwidth usage info for each interface on each
running VM"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach the disk to the instance at mountpoint using info"""
raise NotImplementedError()
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach the disk attached to the instance"""
raise NotImplementedError()
def compare_cpu(self, cpu_info):
"""Compares given cpu info against host
Before attempting to migrate a VM to this host,
compare_cpu is called to ensure that the VM will
actually run here.
:param cpu_info: (str) JSON structure describing the source CPU.
:returns: None if migration is acceptable
:raises: :py:class:`~nova.exception.InvalidCPUInfo` if migration
is not acceptable.
"""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info):
"""
Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id):
"""
Snapshots the specified instance.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance):
"""Completes a resize, turning on the migrated instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info):
"""Finish reverting a resize, powering back on the instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
"""suspend the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance):
"""resume the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info):
"""resume guest state when a host is booted"""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance"""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()
def power_on(self, instance):
"""Power on the specified instance"""
raise NotImplementedError()
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
This method is called when nova-compute launches, and
whenever admin executes "nova-manage service update_resource".
:param ctxt: security context
:param host: hostname that compute manager is currently running
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method):
"""Spawning live_migration operation for distributing high-load.
:param ctxt: security context
:param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
"""
Set the root password on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the value of the new password.
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""
Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def agent_update(self, instance, url, md5hash):
"""
Update agent on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the URL of the agent to be fetched and updated on the
instance; the third is the md5 hash of the file for verification
purposes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout):
"""Poll for rebooting instances"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def poll_unconfirmed_resizes(self, resize_confirm_window):
"""Poll for unconfirmed resizes."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
raise NotImplementedError()
def update_host_status(self):
"""Refresh host stats"""
raise NotImplementedError()
def get_host_stats(self, refresh=False):
"""Return currently known host stats"""
raise NotImplementedError()
def list_disks(self, instance_name):
"""
Return the IDs of all the virtual disks attached to the specified
instance, as a list. These IDs are opaque to the caller (they are
only useful for giving back to this layer as a parameter to
disk_stats). These IDs only need to be unique for a given instance.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def list_interfaces(self, instance_name):
"""
Return the IDs of all the virtual network interfaces attached to the
specified instance, as a list. These IDs are opaque to the caller
(they are only useful for giving back to this layer as a parameter to
interface_stats). These IDs only need to be unique for a given
instance.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def resize(self, instance, flavor):
"""
Resizes/Migrates the specified instance.
The flavor parameter determines whether or not the instance RAM and
disk space are modified, and if so, to what size.
"""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
"""
Return performance counters associated with the given disk_id on the
given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def interface_stats(self, instance_name, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def legacy_nwinfo(self):
"""
Indicate if the driver requires the legacy network_info format.
"""
# TODO(tr3buchet): update all subclasses and remove this
return True
def manage_image_cache(self, context):
"""
Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection and and the name of the
iscsi initiator as follows::
{
'ip': ip,
'initiator': initiator,
}
"""
raise NotImplementedError()
|
|
"""
Sample anonymous user sessions that have different bot threshold scores.
Also, samples testing data.
Usage:
anonymous_users_and_testing_data_sampling (-h|--help)
anonymous_users_and_testing_data_sampling <input_testing> <input_anonymous_user_threshold_scores> <input_anonymous_user_threshold_scores_i2> <anonymous_user_samples_output> <anonymous_user_samples_i2_output> <testing_samples_output>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input_testing> Path to input testing data file
to process.
<input_anonymous_user_threshold_scores> Path to input anonymous user
model threshold scores file to
sample.
<input_anonymous_user_threshold_scores_i2> Path to input anonymous user
model threshold iteration 2
scores file to sample.
<anonymous_user_samples_output> Where anonymous samples will be
written.
<anonymous_user_samples_i2_output> Where iteration 2 anonymous
samples will be written.
<testing_samples_output> Where testing samples will be
written.
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import logging
import operator
import sys
import mysqltsv
import datetime
from collections import defaultdict
import random
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_testing_file = mysqltsv.Reader(
open(args['<input_testing>'],'rt'), headers=True,
types=[str, str, str, float, float, int, int, int, int, int, int, int,
int, int, float, int, int, int, str, str, int])
input_anonymous_user_threshold_scores_file = mysqltsv.Reader(
open(args['<input_anonymous_user_threshold_scores>'],'rt'),
headers=True, types=[str, str, float, float, int, int, int, int, int,
int, int, int, int, float, int, int, int, float])
input_anonymous_user_threshold_scores_i2_file = mysqltsv.Reader(
open(args['<input_anonymous_user_threshold_scores_i2>'],'rt'),
headers=True, types=[str, str, float, float, int, int, int, int, int,
int, int, int, int, float, int, int, int, int, int, int, int, int, int,
int, int, int, int, int, int, int, int, float])
anonymous_user_samples_output_file = mysqltsv.Writer(
open(args['<anonymous_user_samples_output>'], "w"),
headers=['session start timestamp', 'session completed timestamp',
'url', 'Consistent revision frequency',
'Comment is "Updated item"',
'Similar operations occur to different pages',
'More than one claim edited per revision',
'At least one rev. comment is prefixed by "bot" or "robot"',
'Short session with rapid revisions', 'Not-obviously a bot'])
anonymous_user_samples_i2_output_file = mysqltsv.Writer(
open(args['<anonymous_user_samples_i2_output>'], "w"),
headers=['session start timestamp', 'session completed timestamp',
'url', 'Consistent revision frequency',
'Comment is "Updated item"',
'Similar operations occur to different pages',
'More than one claim edited per revision',
'At least one rev. comment is prefixed by "bot" or "robot"',
'Short session with rapid revisions', 'Not-obviously a bot'])
testing_samples_output_file = mysqltsv.Writer(
open(args['<testing_samples_output>'], "w"),
headers=['session start timestamp', 'session completed timestamp',
'url', 'Consistent revision frequency',
'Comment is "Updated item"',
'Similar operations occur to different pages',
'More than one claim edited per revision',
'At least one rev. comment is prefixed by "bot" or "robot"',
'Short session with rapid revisions', 'Not-obviously a bot'])
verbose = args['--verbose']
run(input_testing_file, input_anonymous_user_threshold_scores_file,
input_anonymous_user_threshold_scores_i2_file,
anonymous_user_samples_output_file,
anonymous_user_samples_i2_output_file, testing_samples_output_file,
verbose)
def run(input_testing_file, input_anonymous_user_threshold_scores_file,
input_anonymous_user_threshold_scores_i2_file,
anonymous_user_samples_output_file,
anonymous_user_samples_i2_output_file, testing_samples_output_file,
verbose):
false_negative_testing_sessions = []
# Anonymous file sampling
anonymous_sampling(input_anonymous_user_threshold_scores_file,
anonymous_user_samples_output_file, 1.59, .76, .17, -.51, -1.16, -1.82,
-2.59, -3.39, -4.21)
anonymous_sampling(input_anonymous_user_threshold_scores_i2_file,
anonymous_user_samples_i2_output_file, 5.46, 4.01, 3.01, 2.21, 1.41,
.66, -.18, -1.18, -2.39)
# Testing file sampling
for line in input_testing_file:
if line['bot'] == 'TRUE' and line['bot_prediction'] == 0:
false_negative_testing_sessions.append(line)
for line in random.sample(false_negative_testing_sessions, 100):
testing_samples_output_file.write(create_url_item(line['username'],
line['session_start'],
line["session_length_in_seconds"]))
def create_url_item(username, starting_timestamp, session_length_in_seconds):
converted_username = username.replace(":","%3A")
converted_starting_timestamp =\
datetime.datetime(int(starting_timestamp[0:4]),
int(starting_timestamp[4:6]),
int(starting_timestamp[6:8]),
int(starting_timestamp[8:10]),
int(starting_timestamp[10:12]),
int(starting_timestamp[12:14]))
session_completed = converted_starting_timestamp +\
datetime.timedelta(seconds=session_length_in_seconds)
starting_timestamp_readable = \
str(converted_starting_timestamp.year) + "-" +\
str(converted_starting_timestamp.month).zfill(2) + "-" +\
str(converted_starting_timestamp.day).zfill(2) + " " +\
str(converted_starting_timestamp.hour).zfill(2) + " " +\
str(converted_starting_timestamp.minute).zfill(2) + " " +\
str(converted_starting_timestamp.second).zfill(2)
session_completed_readable = \
str(session_completed.year) + "-" +\
str(session_completed.month).zfill(2) + "-" +\
str(session_completed.day).zfill(2) + " " +\
str(session_completed.hour).zfill(2) + " " +\
str(session_completed.minute).zfill(2) + " " +\
str(session_completed.second).zfill(2)
starting_year_month_day =\
str(converted_starting_timestamp.year).zfill(4) + "-" +\
str(converted_starting_timestamp.month).zfill(2) + "-" +\
str(converted_starting_timestamp.day).zfill(2) + "T" +\
str(converted_starting_timestamp.hour).zfill(2) + ":" +\
str(converted_starting_timestamp.minute).zfill(2) + ":" +\
str(converted_starting_timestamp.second).zfill(2) + "Z"
completed_year_month_day =\
str(session_completed.year).zfill(4) + "-" +\
str(session_completed.month).zfill(2) + "-" +\
str(session_completed.day).zfill(2) + "T" +\
str(session_completed.hour).zfill(2) + ":" +\
str(session_completed.minute).zfill(2) + ":" +\
str(session_completed.second).zfill(2) + "Z"
url = "https://wikidata.org/w/api.php?action=query&list=usercontribs" + \
"&uclimit=max&ucstart=" + starting_year_month_day + "&ucend=" + \
completed_year_month_day + "&ucuser=" + converted_username + \
"&ucdir=newer"
return starting_timestamp_readable, session_completed_readable, url
def anonymous_sampling(sampling_file, anonymous_user_samples_output_file, ten,
twenty, thirty, forty, fifty, sixty, seventy, eighty, ninety):
threshold_scores = defaultdict(list)
for i, line in enumerate(sampling_file):
if line['threshold_score'] >= ten:
recall_rounded = 'less_than_10_percent'
elif line['threshold_score'] >= twenty:
recall_rounded = '10_to_20_percent'
elif line['threshold_score'] >= thirty:
recall_rounded = '20_to_30_percent'
elif line['threshold_score'] >= forty:
recall_rounded = '30_to_40_percent'
elif line['threshold_score'] >= fifty:
recall_rounded = '40_to_50_percent'
elif line['threshold_score'] >= sixty:
recall_rounded = '50_to_60_percent'
elif line['threshold_score'] >= seventy:
recall_rounded = '60_to_70_percent'
elif line['threshold_score'] >= eighty:
recall_rounded = '70_to_80_percent'
elif line['threshold_score'] >= ninety:
recall_rounded = '80_to_90_percent'
else:
recall_rounded = 'greater_than_90_percent'
threshold_scores[recall_rounded]\
.append({'username' : line['username'],
'session_start' : line['session_start'],
'threshold_score' : line['threshold_score'],
'session_length_in_seconds' :
line['session_length_in_seconds']})
recall_sample = defaultdict(list)
for recall in threshold_scores:
number_of_samples = 20
length_of_recall = len(threshold_scores[recall])
if length_of_recall < number_of_samples:
number_of_samples = length_of_recall
recall_sample[recall].append(random.sample(threshold_scores[recall],
number_of_samples))
increasing_recall = ['less_than_10_percent', '10_to_20_percent',
'20_to_30_percent', '30_to_40_percent',
'40_to_50_percent', '50_to_60_percent',
'60_to_70_percent', '70_to_80_percent',
'80_to_90_percent', 'greater_than_90_percent']
for recall in increasing_recall:
anonymous_user_samples_output_file.write(["RECALL", recall, ""])
for recall_session in recall_sample[recall]:
for session in recall_session:
anonymous_user_samples_output_file.write(
create_url_item(session['username'],
session['session_start'],
session["session_length_in_seconds"]))
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for ImageNet data preprocessing & prediction decoding.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
CLASS_INDEX = None
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
# Global tensor of imagenet mean for preprocessing symbolic inputs
_IMAGENET_MEAN = None
def _preprocess_numpy_input(x, data_format, mode):
"""Preprocesses a Numpy array encoding a batch of images.
Arguments:
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed Numpy array.
"""
if mode == 'tf':
x /= 127.5
x -= 1.
return x
if mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
if x.ndim == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
def _preprocess_symbolic_input(x, data_format, mode):
"""Preprocesses a tensor encoding a batch of images.
Arguments:
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed tensor.
"""
global _IMAGENET_MEAN
if mode == 'tf':
x /= 127.5
x -= 1.
return x
if mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if K.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
if _IMAGENET_MEAN is None:
_IMAGENET_MEAN = constant_op.constant(-np.array(mean), dtype=K.floatx())
# Zero-center by mean pixel
if K.dtype(x) != K.dtype(_IMAGENET_MEAN):
x = K.bias_add(x, math_ops.cast(_IMAGENET_MEAN, K.dtype(x)), data_format)
else:
x = K.bias_add(x, _IMAGENET_MEAN, data_format)
if std is not None:
x /= std
return x
@tf_export('keras.applications.resnet50.preprocess_input',
'keras.applications.vgg19.preprocess_input',
'keras.applications.vgg16.preprocess_input')
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor or Numpy array encoding a batch of images.
Arguments:
x: Input Numpy or symbolic tensor, 3D or 4D.
data_format: Data format of the image tensor/array.
mode: One of "caffe", "tf".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
Returns:
Preprocessed tensor or Numpy array.
Raises:
ValueError: In case of unknown `data_format` argument.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(x, data_format=data_format, mode=mode)
else:
return _preprocess_symbolic_input(x, data_format=data_format, mode=mode)
@tf_export('keras.applications.nasnet.decode_predictions',
'keras.applications.resnet50.decode_predictions',
'keras.applications.vgg19.decode_predictions',
'keras.applications.vgg16.decode_predictions',
'keras.applications.inception_resnet_v2.decode_predictions',
'keras.applications.inception_v3.decode_predictions',
'keras.applications.densenet.decode_predictions',
'keras.applications.mobilenet.decode_predictions',
'keras.applications.xception.decode_predictions')
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
Arguments:
preds: Numpy tensor encoding a batch of predictions.
top: Integer, how many top-guesses to return.
Returns:
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises:
ValueError: In case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file(
'imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
def _obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None):
"""Internal utility to compute/validate a model's input shape.
Arguments:
input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: One of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: In case of invalid argument values.
"""
if weights != 'imagenet' and input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
logging.warning('This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[0]) + ' input channels.')
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
logging.warning('This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[-1]) + ' input channels.')
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting`include_top=True` '
'and loading `imagenet` weights, '
'`input_shape` should be ' + str(default_shape) + '.')
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' + str(min_size) +
'x' + str(min_size) + '; got '
'`input_shape=' + str(input_shape) + '`')
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' + str(min_size) +
'x' + str(min_size) + '; got '
'`input_shape=' + str(input_shape) + '`')
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
'Got `input_shape=' + str(input_shape) + '`')
return input_shape
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Tests for contrib.tpu.python.tpu.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.tpu.python.tpu import feature_column as tpu_fc
from tensorflow.python.client import session
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
def _initialized_session():
sess = session.Session()
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class EmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_all_constructor_args(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer')
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = tpu_fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
fc._LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
class SharedEmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNotNone(embedding_column_a.initializer)
self.assertIsNotNone(embedding_column_b.initializer)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.shared_embedding_collection_name)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a._var_scope_name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b._parse_example_spec)
def test_all_constructor_args(self):
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='var_scope_name')
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('my_initializer', embedding_column_a.initializer())
self.assertEqual('my_initializer', embedding_column_b.initializer())
self.assertEqual('var_scope_name',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('var_scope_name',
embedding_column_b.shared_embedding_collection_name)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual('var_scope_name', embedding_column_a._var_scope_name)
self.assertEqual('var_scope_name', embedding_column_b._var_scope_name)
self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b._parse_example_spec)
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array([
[2, -1, -1], # example 0, ids [2]
[0, 1, -1]
]) # example 1, ids [0, 1]
input_b = np.array([
[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]
]) # example 1, ids []
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
(1., 2.), # ids [0], embedding = [1, 2]
# example 1:
(0., 0.), # ids [], embedding = [0, 0]
)
# Build columns.
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a._get_dense_tensor(
fc._LazyBuilder(input_features))
embedding_lookup_b = embedding_column_b._get_dense_tensor(
fc._LazyBuilder(input_features))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval())
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/python3
import asyncio
import datetime
import logging
import subprocess
import sys
import json
import aiohttp
import discord
from discord.ext import commands
import cupboard
from bs4 import BeautifulSoup
logging.basicConfig(level=logging.WARNING)
startup_time = datetime.datetime.today()
def get_version():
try:
args = ['git', 'describe', '--tags', '--always']
return subprocess.check_output(args).decode('utf-8', 'ignore').strip()
except Exception:
return 'unknown'
version = get_version()
config = {}
with open('config.json', 'r') as f:
config.update(json.load(f))
# keep a set of verified user id's so we can ignore them
verified_users = set()
verified_forum_ids = set()
# keep track of users we are to verify manually (to avoid spamming mod chan)
manually_verified_users = set()
# hack to create file if it doesn't exist yet
with open('verified_forum_ids.txt', 'a+') as f:
pass
with open('verified_forum_ids.txt', 'r') as f:
for line in f.readlines():
verified_forum_ids.add(line.strip())
# The channels we care about. public_channel (also known as the requests
# channel) is where the bot will send public welcome messages as well as
# announcing when people are verified. mod_channel is where the bot will
# send messages that should only be visible to mods.
public_channel = None
mod_channel = None
bot = commands.Bot(command_prefix='!', pm_help=True, help_attrs={'name': 'cmdhelp'})
@asyncio.coroutine
def welcome(user):
m = config['welcome_message'].format(
name = user.name,
mention_name = user.mention,
id = user.id,
)
if public_channel is not None:
yield from bot.send_message(public_channel, m)
@asyncio.coroutine
def help_message(user):
m = config['help_message'].format(
name = user.name,
mention_name = user.mention,
id = user.id,
)
yield from bot.send_message(user, m)
@bot.listen
@asyncio.coroutine
def on_member_join(member):
server = member.server
if server.id != config['server']:
return
# debug!
print('{} [id = {}] joined the server'.format(member.name, member.id))
yield from help_message(member)
yield from welcome(member)
@bot.command(pass_context=True, aliases=['hello'])
@asyncio.coroutine
def help(ctx):
"""Shows instructions on how to register your account"""
yield from help_message(ctx.message.author)
@bot.listen('on_message')
@asyncio.coroutine
def try_verify_user(message):
if ignore_message(message):
return
user = message.author
content = message.content
if is_verified(user):
return
for word in content.split():
if word.lower().startswith(config['verify_url_prefix']):
ret = yield from verify_provided_link(user, word)
return ret
elif word.startswith('https://') or word.startswith('http://'):
yield from bot.send_message(user, config['invalid_link_message'])
return
def ignore_message(message):
if message.author == bot.user:
# ignore messages from ourselves!
return True
# TODO: add per-user rate limiting?
if message.server is None:
# private message. never ignore
return False
elif message.server.id == config['server']:
if config['channel'] == '*':
# all channels are okay
return False
elif config['channel'] in [message.channel.id, message.channel.name]:
# message was in correct channel
return False
elif config['mod_channel'] in [message.channel.id, message.channel.name]:
# allow mod channel too
return False
# otherwise, ignore message
return True
def is_verified(user):
# TODO: we might want some fallback to query the server in case our
# local verified_users cache isn't right.
return user.id in verified_users
def is_mod(user):
# always check user roles for admin
member = get_member(user)
if member is None:
# not even on server, so can't be a mod
return False
for role in member.roles:
if config['mod_role'] == role.id:
return True
return False
def mod_only_command():
return commands.check(lambda ctx: is_mod(ctx.message.author))
@bot.command()
@mod_only_command()
@asyncio.coroutine
def about():
"""Show version info for the bot"""
msg = 'python version: {}\ndiscord.py version: {}\nbot version: {}'.format(
sys.version.split()[0], discord.__version__, version)
yield from bot.say(msg)
@bot.command()
@mod_only_command()
@asyncio.coroutine
def stats():
"""Show bot stats"""
uptime = datetime.datetime.today() - startup_time
# strip ugly microseconds
nice_uptime = datetime.timedelta(uptime.days, uptime.seconds, 0)
msg = 'Bot uptime: {}\nVerified users: {}\nUnique forum profiles verified: {}'.format(
nice_uptime, len(verified_users), len(verified_forum_ids))
yield from bot.say(msg)
@bot.command()
@mod_only_command()
@asyncio.coroutine
def refresh():
"""Refresh the verified users cache"""
# refresh verified users cache
new_verified_users = set()
for server in bot.servers:
if config['server'] != server.id:
continue
for member in server.members:
if config['verified_role'] in [role.id for role in member.roles]:
new_verified_users.add(member.id)
added = len(new_verified_users.difference(verified_users))
removed = len(verified_users.difference(new_verified_users))
if added == 0 and removed == 0:
msg = 'Verified cache refreshed (no changes)'
yield from bot.say(msg)
return
# replace
verified_users.clear()
verified_users.update(new_verified_users)
msg = 'Verified cache refreshed ({} added, {} removed)'.format(added, removed)
yield from bot.say(msg)
@asyncio.coroutine
def verify_provided_link(user, link):
print('Attempting to verify user {} with link {}'.format(user.id, link))
# TODO: we might want a better way than just stuffing hard-coded cookies
# (like auto login with user and pass and get the cookies from that)
with aiohttp.ClientSession(cookies=config['verify_cookies']) as session:
r = yield from session.get(link)
if r.status != 200:
print('Error loading verification page:', r.status_code)
yield from bot.send_message(user, config['verification_error'])
return
# note: apparently the 'lxml' parser is faster, but you need to install it
content = yield from r.text()
soup = BeautifulSoup(content, 'html.parser')
posts = soup.findAll('div', {'class': 'ItemContent Activity'})
for post in posts:
# TODO: verify that this post is by the correct author
text = ' '.join(post.findAll(text=True))
print('Found Post:', text)
if user.id in text and 'discord' in text.lower():
# verify success!
ret = yield from verify_success(user, link)
return ret
print('No verification post found for user', user.id)
msg = config['missing_verification_post'].format(
id = user.id,
)
yield from bot.send_message(user, msg)
@asyncio.coroutine
def verify_success(user, link):
forum_id = get_forum_id(link)
format_args = dict(
name = user.name,
mention_name = user.mention,
id = user.id,
link = link,
forum_id = forum_id
)
# First, some sanity checks. If there are multiple Discord users with
# the same name, or the forum account has been used before, we will alert
# the mods, and not verify the user. We want to avoid having impersonators.
if dupe_user_names(user):
yield from bot.send_message(user, config['verified_profile_duplicate_name'])
if mod_channel is not None and user.id not in manually_verified_users:
msg = config['verified_public_message'].format(**format_args)
msg += config['verified_profile_duplicate_name_mods'].format(
name = user.name,
)
yield from bot.send_message(mod_channel, msg)
manually_verified_users.add(user.id)
return
elif forum_account_used(forum_id):
yield from bot.send_message(user, config['verified_profile_before'])
if mod_channel is not None and user.id not in manually_verified_users:
msg = config['verified_public_message'].format(**format_args)
msg += config['verified_profile_before_mods']
yield from bot.send_message(mod_channel, msg)
manually_verified_users.add(user.id)
return
# add user roles
# we need to first find the correct Member object on the server
# (we can't modify roles on User objects directly)
member = get_member(user)
if member is None:
# TODO: make a proper error message, this shouldn't happen
return
yield from bot.add_roles(member, discord.Role(id=config['verified_role']))
verified_forum_ids.add(forum_id)
with open('verified_forum_ids.txt', 'a+') as f:
f.write(forum_id + '\n')
priv_message = config['verified_private_message'].format(**format_args)
yield from bot.send_message(user, priv_message)
if public_channel is not None:
pub_message = config['verified_public_message'].format(**format_args)
yield from bot.send_message(public_channel, pub_message)
verified_users.add(user.id)
print('Verified user {} successfully'.format(user.id))
def get_forum_id(link):
# strip the url prefix
url_suffix = link[len(config['verify_url_prefix']):]
for url_part in url_suffix.split('/'):
if url_part.isdigit():
return url_part
def dupe_user_names(user):
count = 0
for server in bot.servers:
if server.id != config['server']:
continue
for member in server.members:
# TODO: do a similarity check instead of comparing lowercase
# The point is to check for impersonators, so we might want to
# check for variations of the name like 'I' -> 'l' etc
if member.name.lower() == user.name.lower():
count += 1
return count > 1
def forum_account_used(forum_id):
return forum_id in verified_forum_ids
def get_member(user):
if isinstance(user, discord.Member) and user.server.id == config['server']:
return user
for server in bot.servers:
if server.id != config['server']:
continue
return server.get_member(user.id)
# member not found
return None
@bot.event
@asyncio.coroutine
def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
# get a list of all the users already verified so we can ignore them
for server in bot.servers:
if server.id != config['server']:
continue
print('total users on the server:', len(server.members))
for member in server.members:
for role in member.roles:
if role.id == config['verified_role']:
verified_users.add(member.id)
print('already verified users:', len(verified_users))
print('------')
# find the channels we care about
for channel in bot.get_all_channels():
if config['channel'] in [channel.id, channel.name]:
# this is ugly, but we need to tell python we are setting
# the global var
global public_channel
public_channel = channel
if config['mod_channel'] in [channel.id, channel.name]:
global mod_channel
mod_channel = channel
bot.run(config['email'], config['password'])
|
|
# Copyright (c) 2012 <Jaume Devesa ([email protected])>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import model
from clint.textui import colored
"""
gists.formatters
~~~~~~~~~~~~~~~~
'formatters' is the last step of the execution workflow. All the methods
receive an instance of the :class: `Result <Result>` and use them to format
the output legible, clear and pretty (using the usefull 'clint' package)
"""
def format_show(result):
""" Formats the output of the 'show' action.
:param result: Result instance
"""
if result.success:
resultdata = result.data
# If the data is an instance of the 'GistFile' data model,
# parse the file, otherwise, parse the 'Gist' metadata
if isinstance(resultdata, model.GistFile):
return __format_file(resultdata)
else:
return __format_gist(resultdata)
else:
# Format the error string message
return __format_error(result.data)
def format_post(result):
""" Formats the output of the 'create' action.
:param result: Result instance
"""
if result.success:
# Format the 'Gist' metadata object
return __format_gist(result.data)
else:
# Format the error string message
return __format_error(result.data)
def format_get(result):
""" Formats the output of the 'get/download' action.
:param result: Result instance
"""
if result.success:
# The result is just a string informing the success
return result.data
else:
# Format the error string message
return __format_error(result.data)
def format_delete(result):
""" Formats the output of the 'delete' action.
:param result: Result instance
"""
if result.success:
# The result is just a string informing the success
return result.data
else:
# Format the error string message
return __format_error(result.data)
def format_update(result):
""" Formats the output of the 'delete' action.
:param result: Result instance
"""
if result.success:
# Format the 'Gist' metadata object
return __format_gist(result.data)
else:
# Format the error string message
return __format_error(result.data)
def format_list(result):
""" Formats the output of the 'list' action.
:param result: Result instance
"""
if result.success:
# Get the list of Gists from the data
list_of_gists = result.data
# Calculate the number of columns of the current terminal window
rows, columns = os.popen('stty size', 'r').read().split()
# Set the header
gists_string = unicode(colored.cyan('-' * int(columns)) + "\n")
gists_string += unicode(colored.cyan("List of gists\n"))
gists_string += unicode(colored.cyan('-' * int(columns)) + "\n")
# Set the contents for each Gist listed
for gist in list_of_gists:
gists_string += unicode(colored.green(gist.identifier + ": "))
description = "(no desc)"
if gist.description and gist.description != "":
description = gist.description
gists_string += description
gist_names = [gistfile.filename for
gistfile in gist.files]
stringfiles = " [" + ", ".join(gist_names) + "]"
gists_string += unicode(colored.red(stringfiles))
if not gist.public:
gists_string += " (Private Gist) "
gists_string += '\n'
# Set the footer
gists_string += unicode(colored.cyan('-' * int(columns)) + "\n")
# Return the formatted String
return gists_string
else:
# Format the error string message
return __format_error(result.data)
def format_authorize(result):
""" This is enough for this method. """
if result.success:
return colored.green("Authentication token written in '~/.gistsrc'")
else:
# Format the error string message
return __format_error(result.data)
def format_star(result):
""" Formats the output of the 'star' and 'unstar' action.
:param result: Result instance
"""
if result.success:
# The result is just a string informing the success
return result.data
else:
# Format the error string message
return __format_error(result.data)
def __format_gist(gist):
""" Formats the output for a Gist metadata object.
:param gist: :class: `Gist <Gist>` instance.
"""
url_title = unicode(colored.green('Url:\t\t'))
html_title = unicode(colored.green('Html Url:\t'))
public_title = unicode(colored.green('Private:\t'))
file_title = unicode(colored.green('Files:\t\t'))
# Calculate the number of columns of the current terminal window
rows, columns = os.popen('stty size', 'r').read().split()
# Prepare the Header
gists_string = colored.cyan('-' * int(columns)) + "\n"
gists_string += colored.cyan("Gist [" + gist.identifier + "]") + '\n'
gists_string += colored.cyan('-' * int(columns)) + "\n"
# Format Gist data
gists_string += colored.green('Description:\t')
if gist.description:
gists_string += gist.description + '\n'
gists_string += url_title + gist.url + '\n'
gists_string += html_title + gist.html_url + '\n'
gists_string += public_title + str(not gist.public) + '\n'
gist_names = [gistfile.filename for gistfile in gist.files]
stringfiles = "[" + ", ".join(gist_names) + "]"
gists_string += file_title + colored.red(stringfiles) + '\n'
# Prepare the Footer
gists_string += colored.cyan('-' * int(columns)) + "\n"
return gists_string
def __format_error(data):
""" Print the string output error. """
return colored.red("Error: ") + data
def __format_file(file_gist):
""" Formats the output for a GistFile object.
:param gist: :class: `GistFile <GistFile>` instance.
"""
# Calculate the number of columns of the current terminal window
rows, columns = os.popen('stty size', 'r').read().split()
# Prepare the Header
gist_string = colored.cyan('-' * int(columns)) + "\n"
gist_string += colored.cyan("File [" + file_gist.filename + "]\n")
gist_string += colored.cyan('-' * int(columns)) + "\n"
# Format Gist data
gist_string += (colored.green("Language:") + " " +
colored.red(file_gist.language) + "\n")
gist_string += (colored.green("Size:") + " " +
colored.red(file_gist.size) + "\n")
gist_string += (colored.green("Raw Url:") + " " +
colored.red(file_gist.raw_url + "\n"))
gist_string += (colored.green("Content:\n\n")
+ file_gist.content + "\n\n")
# Prepare the Footer
gist_string += colored.cyan('-' * int(columns)) + "\n"
return gist_string
|
|
# Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Magnum object test utilities."""
from magnum.common import exception
from magnum import objects
from magnum.tests.unit.db import utils as db_utils
def get_test_baymodel(context, **kw):
"""Return a BayModel object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_baymodel = db_utils.get_test_baymodel(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del db_baymodel['id']
baymodel = objects.BayModel(context)
for key in db_baymodel:
setattr(baymodel, key, db_baymodel[key])
return baymodel
def create_test_baymodel(context, **kw):
"""Create and return a test baymodel object.
Create a baymodel in the DB and return a BayModel object with appropriate
attributes.
"""
baymodel = get_test_baymodel(context, **kw)
try:
baymodel.create()
except exception.BayModelAlreadyExists:
baymodel = objects.BayModel.get(context, baymodel.uuid)
return baymodel
def get_test_bay(context, **kw):
"""Return a Bay object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_bay = db_utils.get_test_bay(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del db_bay['id']
bay = objects.Bay(context)
for key in db_bay:
setattr(bay, key, db_bay[key])
return bay
def create_test_bay(context, **kw):
"""Create and return a test bay object.
Create a bay in the DB and return a Bay object with appropriate
attributes.
"""
bay = get_test_bay(context, **kw)
create_test_baymodel(context, uuid=bay['baymodel_id'],
coe=kw.get('coe', 'swarm'))
bay.create()
return bay
def get_test_pod(context, **kw):
"""Return a Pod object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_pod = db_utils.get_test_pod(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del db_pod['id']
pod = objects.Pod(context)
for key in db_pod:
setattr(pod, key, db_pod[key])
return pod
def create_test_pod(context, **kw):
"""Create and return a test pod object.
Create a pod in the DB and return a Pod object with appropriate
attributes.
"""
pod = get_test_pod(context, **kw)
pod.manifest = '{"foo": "bar"}'
return pod
def get_test_service(context, **kw):
"""Return a Service object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_service = db_utils.get_test_service(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del db_service['id']
service = objects.Service(context)
for key in db_service:
setattr(service, key, db_service[key])
return service
def create_test_service(context, **kw):
"""Create and return a test service object.
Create a service in the DB and return a Service object with appropriate
attributes.
"""
service = get_test_service(context, **kw)
service.manifest = '{"foo": "bar"}'
return service
def get_test_rc(context, **kw):
"""Return a ReplicationController object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_rc = db_utils.get_test_rc(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del db_rc['id']
rc = objects.ReplicationController(context)
for key in db_rc:
setattr(rc, key, db_rc[key])
return rc
def create_test_rc(context, **kw):
"""Create and return a test ReplicationController object.
Create a replication controller in the DB and return a
ReplicationController object with appropriate attributes.
"""
rc = get_test_rc(context, **kw)
rc.manifest = '{"foo": "bar"}'
return rc
def get_test_x509keypair(context, **kw):
"""Return a X509KeyPair object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_x509keypair = db_utils.get_test_x509keypair(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
del db_x509keypair['id']
x509keypair = objects.X509KeyPair(context)
for key in db_x509keypair:
setattr(x509keypair, key, db_x509keypair[key])
return x509keypair
def create_test_x509keypair(context, **kw):
"""Create and return a test x509keypair object.
Create a x509keypair in the DB and return a X509KeyPair object with
appropriate attributes.
"""
x509keypair = get_test_x509keypair(context, **kw)
x509keypair.create()
return x509keypair
def get_test_magnum_service_object(context, **kw):
"""Return a test magnum_service object.
Get a magnum_service from DB layer and return an object with
appropriate attributes.
"""
db_magnum_service = db_utils.get_test_magnum_service(**kw)
magnum_service = objects.MagnumService(context)
for key in db_magnum_service:
setattr(magnum_service, key, db_magnum_service[key])
return magnum_service
def create_test_container(context, **kw):
"""Create and return a test container object.
Create a container in the DB and return a container object with
appropriate attributes.
"""
container = get_test_container(context, **kw)
container.create()
return container
def get_test_container(context, **kw):
"""Return a test container object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_container = db_utils.get_test_container(**kw)
container = objects.Container(context)
for key in db_container:
setattr(container, key, db_container[key])
return container
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Workflow Logic the Assignment service."""
import copy
import urllib
import urlparse
import uuid
import six
from keystone.common import controller
from keystone.common import dependency
from keystone import config
from keystone import exception
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
@dependency.requires('assignment_api', 'identity_api', 'token_api')
class Tenant(controller.V2Controller):
@controller.v2_deprecated
def get_all_projects(self, context, **kw):
"""Gets a list of all tenants for an admin user."""
if 'name' in context['query_string']:
return self.get_project_by_name(
context, context['query_string'].get('name'))
self.assert_admin(context)
tenant_refs = self.assignment_api.list_projects()
for tenant_ref in tenant_refs:
tenant_ref = self.filter_domain_id(tenant_ref)
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self._format_project_list(tenant_refs, **params)
@controller.v2_deprecated
def get_projects_for_token(self, context, **kw):
"""Get valid tenants for token based on token used to authenticate.
Pulls the token from the context, validates it and gets the valid
tenants for the user in the token.
Doesn't care about token scopedness.
"""
try:
token_ref = self.token_api.get_token(context['token_id'])
except exception.NotFound as e:
LOG.warning(_('Authentication failed: %s'), e)
raise exception.Unauthorized(e)
user_ref = token_ref['user']
tenant_refs = (
self.assignment_api.list_projects_for_user(user_ref['id']))
tenant_refs = [self.filter_domain_id(ref) for ref in tenant_refs
if ref['domain_id'] == CONF.identity.default_domain_id]
params = {
'limit': context['query_string'].get('limit'),
'marker': context['query_string'].get('marker'),
}
return self._format_project_list(tenant_refs, **params)
@controller.v2_deprecated
def get_project(self, context, tenant_id):
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
ref = self.assignment_api.get_project(tenant_id)
return {'tenant': self.filter_domain_id(ref)}
@controller.v2_deprecated
def get_project_by_name(self, context, tenant_name):
self.assert_admin(context)
ref = self.assignment_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
return {'tenant': self.filter_domain_id(ref)}
# CRUD Extension
@controller.v2_deprecated
def create_project(self, context, tenant):
tenant_ref = self._normalize_dict(tenant)
if 'name' not in tenant_ref or not tenant_ref['name']:
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
self.assert_admin(context)
tenant_ref['id'] = tenant_ref.get('id', uuid.uuid4().hex)
tenant = self.assignment_api.create_project(
tenant_ref['id'],
self._normalize_domain_id(context, tenant_ref))
return {'tenant': self.filter_domain_id(tenant)}
@controller.v2_deprecated
def update_project(self, context, tenant_id, tenant):
self.assert_admin(context)
# Remove domain_id if specified - a v2 api caller should not
# be specifying that
clean_tenant = tenant.copy()
clean_tenant.pop('domain_id', None)
tenant_ref = self.assignment_api.update_project(
tenant_id, clean_tenant)
return {'tenant': tenant_ref}
@controller.v2_deprecated
def delete_project(self, context, tenant_id):
self.assert_admin(context)
self.assignment_api.delete_project(tenant_id)
@controller.v2_deprecated
def get_project_users(self, context, tenant_id, **kw):
self.assert_admin(context)
user_refs = []
user_ids = self.assignment_api.list_user_ids_for_project(tenant_id)
for user_id in user_ids:
user_ref = self.identity_api.get_user(user_id)
user_refs.append(self.identity_api.v3_to_v2_user(user_ref))
return {'users': user_refs}
def _format_project_list(self, tenant_refs, **kwargs):
marker = kwargs.get('marker')
first_index = 0
if marker is not None:
for (marker_index, tenant) in enumerate(tenant_refs):
if tenant['id'] == marker:
# we start pagination after the marker
first_index = marker_index + 1
break
else:
msg = _('Marker could not be found')
raise exception.ValidationError(message=msg)
limit = kwargs.get('limit')
last_index = None
if limit is not None:
try:
limit = int(limit)
if limit < 0:
raise AssertionError()
except (ValueError, AssertionError):
msg = _('Invalid limit value')
raise exception.ValidationError(message=msg)
last_index = first_index + limit
tenant_refs = tenant_refs[first_index:last_index]
for x in tenant_refs:
if 'enabled' not in x:
x['enabled'] = True
o = {'tenants': tenant_refs,
'tenants_links': []}
return o
@dependency.requires('assignment_api', 'identity_api')
class Role(controller.V2Controller):
# COMPAT(essex-3)
@controller.v2_deprecated
def get_user_roles(self, context, user_id, tenant_id=None):
"""Get the roles for a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant ID required')
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
return {'roles': [self.assignment_api.get_role(x)
for x in roles]}
# CRUD extension
@controller.v2_deprecated
def get_role(self, context, role_id):
self.assert_admin(context)
return {'role': self.assignment_api.get_role(role_id)}
@controller.v2_deprecated
def create_role(self, context, role):
role = self._normalize_dict(role)
self.assert_admin(context)
if 'name' not in role or not role['name']:
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
role_id = uuid.uuid4().hex
role['id'] = role_id
role_ref = self.assignment_api.create_role(role_id, role)
return {'role': role_ref}
@controller.v2_deprecated
def delete_role(self, context, role_id):
self.assert_admin(context)
self.assignment_api.delete_role(role_id)
@controller.v2_deprecated
def get_roles(self, context):
self.assert_admin(context)
return {'roles': self.assignment_api.list_roles()}
@controller.v2_deprecated
def add_role_to_user(self, context, user_id, role_id, tenant_id=None):
"""Add a role to a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
role_ref = self.assignment_api.get_role(role_id)
return {'role': role_ref}
@controller.v2_deprecated
def remove_role_from_user(self, context, user_id, role_id, tenant_id=None):
"""Remove a role from a user and tenant pair.
Since we're trying to ignore the idea of user-only roles we're
not implementing them in hopes that the idea will die off.
"""
self.assert_admin(context)
if tenant_id is None:
raise exception.NotImplemented(message='User roles not supported: '
'tenant_id required')
# This still has the weird legacy semantics that adding a role to
# a user also adds them to a tenant, so we must follow up on that
self.assignment_api.remove_role_from_user_and_project(
user_id, tenant_id, role_id)
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def get_role_refs(self, context, user_id):
"""Ultimate hack to get around having to make role_refs first-class.
This will basically iterate over the various roles the user has in
all tenants the user is a member of and create fake role_refs where
the id encodes the user-tenant-role information so we can look
up the appropriate data when we need to delete them.
"""
self.assert_admin(context)
# Ensure user exists by getting it first.
self.identity_api.get_user(user_id)
tenants = self.assignment_api.list_projects_for_user(user_id)
o = []
for tenant in tenants:
# As a v2 call, we should limit the response to those projects in
# the default domain.
if tenant['domain_id'] != CONF.identity.default_domain_id:
continue
role_ids = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant['id'])
for role_id in role_ids:
ref = {'roleId': role_id,
'tenantId': tenant['id'],
'userId': user_id}
ref['id'] = urllib.urlencode(ref)
o.append(ref)
return {'roles': o}
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def create_role_ref(self, context, user_id, role):
"""This is actually used for adding a user to a tenant.
In the legacy data model adding a user to a tenant required setting
a role.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
tenant_id = role.get('tenantId')
role_id = role.get('roleId')
self.assignment_api.add_role_to_user_and_project(
user_id, tenant_id, role_id)
role_ref = self.assignment_api.get_role(role_id)
return {'role': role_ref}
# COMPAT(diablo): CRUD extension
@controller.v2_deprecated
def delete_role_ref(self, context, user_id, role_ref_id):
"""This is actually used for deleting a user from a tenant.
In the legacy data model removing a user from a tenant required
deleting a role.
To emulate this, we encode the tenant and role in the role_ref_id,
and if this happens to be the last role for the user-tenant pair,
we remove the user from the tenant.
"""
self.assert_admin(context)
# TODO(termie): for now we're ignoring the actual role
role_ref_ref = urlparse.parse_qs(role_ref_id)
tenant_id = role_ref_ref.get('tenantId')[0]
role_id = role_ref_ref.get('roleId')[0]
self.assignment_api.remove_role_from_user_and_project(
user_id, tenant_id, role_id)
@dependency.requires('assignment_api')
class DomainV3(controller.V3Controller):
collection_name = 'domains'
member_name = 'domain'
def __init__(self):
super(DomainV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_domain
@controller.protected()
def create_domain(self, context, domain):
self._require_attribute(domain, 'name')
ref = self._assign_unique_id(self._normalize_dict(domain))
ref = self.assignment_api.create_domain(ref['id'], ref)
return DomainV3.wrap_member(context, ref)
@controller.filterprotected('enabled', 'name')
def list_domains(self, context, filters):
hints = DomainV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_domains(hints=hints)
return DomainV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_domain(self, context, domain_id):
ref = self.assignment_api.get_domain(domain_id)
return DomainV3.wrap_member(context, ref)
@controller.protected()
def update_domain(self, context, domain_id, domain):
self._require_matching_id(domain_id, domain)
ref = self.assignment_api.update_domain(domain_id, domain)
return DomainV3.wrap_member(context, ref)
@controller.protected()
def delete_domain(self, context, domain_id):
return self.assignment_api.delete_domain(domain_id)
@dependency.requires('assignment_api')
class ProjectV3(controller.V3Controller):
collection_name = 'projects'
member_name = 'project'
def __init__(self):
super(ProjectV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_project
@controller.protected()
def create_project(self, context, project):
self._require_attribute(project, 'name')
ref = self._assign_unique_id(self._normalize_dict(project))
ref = self._normalize_domain_id(context, ref)
ref = self.assignment_api.create_project(ref['id'], ref)
return ProjectV3.wrap_member(context, ref)
@controller.filterprotected('domain_id', 'enabled', 'name')
def list_projects(self, context, filters):
hints = ProjectV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_projects(hints=hints)
return ProjectV3.wrap_collection(context, refs, hints=hints)
@controller.filterprotected('enabled', 'name')
def list_user_projects(self, context, filters, user_id):
hints = ProjectV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_projects_for_user(user_id,
hints=hints)
return ProjectV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_project(self, context, project_id):
ref = self.assignment_api.get_project(project_id)
return ProjectV3.wrap_member(context, ref)
@controller.protected()
def update_project(self, context, project_id, project):
self._require_matching_id(project_id, project)
ref = self.assignment_api.update_project(project_id, project)
return ProjectV3.wrap_member(context, ref)
@controller.protected()
def delete_project(self, context, project_id):
return self.assignment_api.delete_project(project_id)
@dependency.requires('assignment_api', 'identity_api')
class RoleV3(controller.V3Controller):
collection_name = 'roles'
member_name = 'role'
def __init__(self):
super(RoleV3, self).__init__()
self.get_member_from_driver = self.assignment_api.get_role
@controller.protected()
def create_role(self, context, role):
self._require_attribute(role, 'name')
ref = self._assign_unique_id(self._normalize_dict(role))
ref = self.assignment_api.create_role(ref['id'], ref)
return RoleV3.wrap_member(context, ref)
@controller.filterprotected('name')
def list_roles(self, context, filters):
hints = RoleV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_roles(
hints=hints)
return RoleV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
def get_role(self, context, role_id):
ref = self.assignment_api.get_role(role_id)
return RoleV3.wrap_member(context, ref)
@controller.protected()
def update_role(self, context, role_id, role):
self._require_matching_id(role_id, role)
ref = self.assignment_api.update_role(role_id, role)
return RoleV3.wrap_member(context, ref)
@controller.protected()
def delete_role(self, context, role_id):
self.assignment_api.delete_role(role_id)
def _require_domain_xor_project(self, domain_id, project_id):
if (domain_id and project_id) or (not domain_id and not project_id):
msg = _('Specify a domain or project, not both')
raise exception.ValidationError(msg)
def _require_user_xor_group(self, user_id, group_id):
if (user_id and group_id) or (not user_id and not group_id):
msg = _('Specify a user or group, not both')
raise exception.ValidationError(msg)
def _check_if_inherited(self, context):
return (CONF.os_inherit.enabled and
context['path'].startswith('/OS-INHERIT') and
context['path'].endswith('/inherited_to_projects'))
def _check_grant_protection(self, context, protection, role_id=None,
user_id=None, group_id=None,
domain_id=None, project_id=None):
"""Check protection for role grant APIs.
The policy rule might want to inspect attributes of any of the entities
involved in the grant. So we get these and pass them to the
check_protection() handler in the controller.
"""
ref = {}
if role_id:
ref['role'] = self.assignment_api.get_role(role_id)
if user_id:
ref['user'] = self.identity_api.get_user(user_id)
else:
ref['group'] = self.identity_api.get_group(group_id)
if domain_id:
ref['domain'] = self.assignment_api.get_domain(domain_id)
else:
ref['project'] = self.assignment_api.get_project(project_id)
self.check_protection(context, protection, ref)
@controller.protected(callback=_check_grant_protection)
def create_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Grants a role to a user or group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
if user_id:
self.identity_api.get_user(user_id)
if group_id:
self.identity_api.get_group(group_id)
self.assignment_api.create_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@controller.protected(callback=_check_grant_protection)
def list_grants(self, context, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Lists roles granted to user/group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
refs = self.assignment_api.list_grants(
user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
return RoleV3.wrap_collection(context, refs)
@controller.protected(callback=_check_grant_protection)
def check_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Checks if a role has been granted on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
if user_id:
self.identity_api.get_user(user_id)
if group_id:
self.identity_api.get_group(group_id)
self.assignment_api.get_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@controller.protected(callback=_check_grant_protection)
def revoke_grant(self, context, role_id, user_id=None,
group_id=None, domain_id=None, project_id=None):
"""Revokes a role from user/group on either a domain or project."""
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
self.assignment_api.delete_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@dependency.requires('assignment_api', 'identity_api')
class RoleAssignmentV3(controller.V3Controller):
# TODO(henry-nash): The current implementation does not provide a full
# first class entity for role-assignment. There is no role_assignment_id
# and only the list_role_assignment call is supported. Further, since it
# is not a first class entity, the links for the individual entities
# reference the individual role grant APIs.
collection_name = 'role_assignments'
member_name = 'role_assignment'
@classmethod
def wrap_member(cls, context, ref):
# NOTE(henry-nash): Since we are not yet a true collection, we override
# the wrapper as have already included the links in the entities
pass
def _format_entity(self, entity):
"""Format an assignment entity for API response.
The driver layer returns entities as dicts containing the ids of the
actor (e.g. user or group), target (e.g. domain or project) and role.
If it is an inherited role, then this is also indicated. Examples:
{'user_id': user_id,
'project_id': domain_id,
'role_id': role_id}
or, for an inherited role:
{'user_id': user_id,
'domain_id': domain_id,
'role_id': role_id,
'inherited_to_projects': true}
This function maps this into the format to be returned via the API,
e.g. for the second example above:
{
'user': {
{'id': user_id}
},
'scope': {
'domain': {
{'id': domain_id}
},
'OS-INHERIT:inherited_to': 'projects
},
'role': {
{'id': role_id}
},
'links': {
'assignment': '/domains/domain_id/users/user_id/roles/'
'role_id/inherited_to_projects'
}
}
"""
formatted_entity = {}
suffix = ""
if 'user_id' in entity:
formatted_entity['user'] = {'id': entity['user_id']}
actor_link = 'users/%s' % entity['user_id']
if 'group_id' in entity:
formatted_entity['group'] = {'id': entity['group_id']}
actor_link = 'groups/%s' % entity['group_id']
if 'role_id' in entity:
formatted_entity['role'] = {'id': entity['role_id']}
if 'project_id' in entity:
formatted_entity['scope'] = (
{'project': {'id': entity['project_id']}})
target_link = '/projects/%s' % entity['project_id']
if 'domain_id' in entity:
formatted_entity['scope'] = (
{'domain': {'id': entity['domain_id']}})
if 'inherited_to_projects' in entity:
formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
'projects')
target_link = '/OS-INHERIT/domains/%s' % entity['domain_id']
suffix = '/inherited_to_projects'
else:
target_link = '/domains/%s' % entity['domain_id']
formatted_entity.setdefault('links', {})
formatted_entity['links']['assignment'] = (
self.base_url('%(target)s/%(actor)s/roles/%(role)s%(suffix)s' % {
'target': target_link,
'actor': actor_link,
'role': entity['role_id'],
'suffix': suffix}))
return formatted_entity
def _expand_indirect_assignments(self, refs):
"""Processes entity list into all-direct assignments.
For any group role assignments in the list, create a role assignment
entity for each member of that group, and then remove the group
assignment entity itself from the list.
If the OS-INHERIT extension is enabled, then honor any inherited
roles on the domain by creating the equivalent on all projects
owned by the domain.
For any new entity created by virtue of group membership, add in an
additional link to that membership.
"""
def _get_group_members(ref):
"""Get a list of group members.
Get the list of group members. If this fails with
GroupNotFound, then log this as a warning, but allow
overall processing to continue.
"""
try:
members = self.identity_api.list_users_in_group(
ref['group']['id'])
except exception.GroupNotFound:
members = []
# The group is missing, which should not happen since
# group deletion should remove any related assignments, so
# log a warning
if 'domain' in ref:
target = 'Domain: %s' % ref['domain'].get('domain_id')
elif 'project' in ref:
target = 'Project: %s' % ref['project'].get('project_id')
else:
# Should always be a domain or project, but since to get
# here things have gone astray, let's be cautious.
target = 'Unknown'
LOG.warning(
_('Group %(group)s not found for role-assignment - '
'%(target)s with Role: %(role)s'), {
'group': ref['group_id'], 'target': target,
'role': ref.get('role_id')})
return members
def _build_user_assignment_equivalent_of_group(
user, group_id, template):
"""Create a user assignment equivalent to the group one.
The template has had the 'group' entity removed, so
substitute a 'user' one. The 'assignment' link stays as it is,
referring to the group assignment that led to this role.
A 'membership' link is added that refers to this particular
user's membership of this group.
"""
user_entry = copy.deepcopy(template)
user_entry['user'] = {'id': user['id']}
user_entry['links']['membership'] = (
self.base_url('/groups/%s/users/%s' %
(group_id, user['id'])))
return user_entry
def _build_project_equivalent_of_user_domain_role(
project_id, domain_id, template):
"""Create a user project assignment equivalent to the domain one.
The template has had the 'domain' entity removed, so
substitute a 'project' one, modifying the 'assignment' link
to match.
"""
project_entry = copy.deepcopy(template)
project_entry['scope']['project'] = {'id': project_id}
project_entry['links']['assignment'] = (
self.base_url(
'/OS-INHERIT/domains/%s/users/%s/roles/%s'
'/inherited_to_projects' % (
domain_id, project_entry['user']['id'],
project_entry['role']['id'])))
return project_entry
def _build_project_equivalent_of_group_domain_role(
user_id, group_id, project_id, domain_id, template):
"""Create a user project equivalent to the domain group one.
The template has had the 'domain' and 'group' entities removed, so
substitute a 'user-project' one, modifying the 'assignment' link
to match.
"""
project_entry = copy.deepcopy(template)
project_entry['user'] = {'id': user_id}
project_entry['scope']['project'] = {'id': project_id}
project_entry['links']['assignment'] = (
self.base_url('/OS-INHERIT/domains/%s/groups/%s/roles/%s'
'/inherited_to_projects' % (
domain_id, group_id,
project_entry['role']['id'])))
project_entry['links']['membership'] = (
self.base_url('/groups/%s/users/%s' %
(group_id, user_id)))
return project_entry
# Scan the list of entities for any assignments that need to be
# expanded.
#
# If the OS-INERIT extension is enabled, the refs lists may
# contain roles to be inherited from domain to project, so expand
# these as well into project equivalents
#
# For any regular group entries, expand these into user entries based
# on membership of that group.
#
# Due to the potentially large expansions, rather than modify the
# list we are enumerating, we build a new one as we go.
#
new_refs = []
for r in refs:
if 'OS-INHERIT:inherited_to' in r['scope']:
# It's an inherited domain role - so get the list of projects
# owned by this domain. A domain scope is guaranteed since we
# checked this when we built the refs list
project_ids = (
[x['id'] for x in
self.assignment_api.list_projects_in_domain(
r['scope']['domain']['id'])])
base_entry = copy.deepcopy(r)
domain_id = base_entry['scope']['domain']['id']
base_entry['scope'].pop('domain')
# For each project, create an equivalent role assignment
for p in project_ids:
# If it's a group assignment, then create equivalent user
# roles based on membership of the group
if 'group' in base_entry:
members = _get_group_members(base_entry)
sub_entry = copy.deepcopy(base_entry)
group_id = sub_entry['group']['id']
sub_entry.pop('group')
for m in members:
new_entry = (
_build_project_equivalent_of_group_domain_role(
m['id'], group_id, p,
domain_id, sub_entry))
new_refs.append(new_entry)
else:
new_entry = (
_build_project_equivalent_of_user_domain_role(
p, domain_id, base_entry))
new_refs.append(new_entry)
elif 'group' in r:
# It's a non-inherited group role assignment, so get the list
# of members.
members = _get_group_members(r)
# Now replace that group role assignment entry with an
# equivalent user role assignment for each of the group members
base_entry = copy.deepcopy(r)
group_id = base_entry['group']['id']
base_entry.pop('group')
for m in members:
user_entry = _build_user_assignment_equivalent_of_group(
m, group_id, base_entry)
new_refs.append(user_entry)
else:
new_refs.append(r)
return new_refs
def _query_filter_is_true(self, filter_value):
"""Determine if bool query param is 'True'.
We treat this the same way as we do for policy
enforcement:
{bool_param}=0 is treated as False
Any other value is considered to be equivalent to
True, including the absence of a value
"""
if (isinstance(filter_value, six.string_types) and
filter_value == '0'):
val = False
else:
val = True
return val
def _filter_inherited(self, entry):
if ('inherited_to_projects' in entry and
not CONF.os_inherit.enabled):
return False
else:
return True
@controller.filterprotected('group.id', 'role.id',
'scope.domain.id', 'scope.project.id',
'scope.OS-INHERIT:inherited_to', 'user.id')
def list_role_assignments(self, context, filters):
# TODO(henry-nash): This implementation uses the standard filtering
# in the V3.wrap_collection. Given the large number of individual
# assignments, this is pretty inefficient. An alternative would be
# to pass the filters into the driver call, so that the list size is
# kept a minimum.
hints = self.build_driver_hints(context, filters)
refs = self.assignment_api.list_role_assignments()
formatted_refs = (
[self._format_entity(x) for x in refs
if self._filter_inherited(x)])
if ('effective' in context['query_string'] and
self._query_filter_is_true(
context['query_string']['effective'])):
formatted_refs = self._expand_indirect_assignments(formatted_refs)
return self.wrap_collection(context, formatted_refs, hints=hints)
@controller.protected()
def get_role_assignment(self, context):
raise exception.NotImplemented()
@controller.protected()
def update_role_assignment(self, context):
raise exception.NotImplemented()
@controller.protected()
def delete_role_assignment(self, context):
raise exception.NotImplemented()
|
|
# -*- coding: utf-8 -*-
from genestack.core_files.genestack_file import File
from genestack.genestack_exceptions import GenestackException
from genestack.java import java_object, JAVA_STRING, JAVA_HASH_MAP, JAVA_MAP
from genestack.query_range import QueryRange
from genestack.utils import deprecated, validate_type
class StringMapFile(File):
"""
File that stores arbitrary text data as a key-value mapping.
It supports prefixed lookups - that is, retrieves data by prepending the provided prefix to the given key
"""
INTERFACE_NAME = 'com.genestack.api.files.IStringMapFile'
def as_map(self, query):
"""
Returns this file's entries as a dict.
:param query: query that specifies number of entries and prefix
:type query: StringMapFileQuery
:rtype: dict
"""
validate_type(query, StringMapFileQuery)
return self.invoke('asMap', types=[query.CLASS_NAME], values=[query.as_java_object()])
def keys(self, query):
"""
Returns this file's entry keys.
:param query: query that specifies number of entries and prefix
:type query: StringMapFileQuery
:rtype: set
"""
validate_type(query, StringMapFileQuery)
return frozenset(self.invoke('keys', types=[query.CLASS_NAME], values=[query.as_java_object()]))
def values(self, query):
"""
Returns this file's entry values. Result can be lexicographically sorted, by specifying sort_direction in the
query parameter
:param query: query that specifies number of entries, prefix and sorting direction
:type query: StringMapFileQuery
:rtype: list
"""
validate_type(query, StringMapFileQuery)
return self.invoke('values', types=[query.CLASS_NAME], values=[query.as_java_object()])
def get(self, key, prefix=None):
"""
Retrieves this file's entry value associated with the provided key.
:type key: basestring
:param prefix: perform prefixed lookup - find values which keys start with the specified string
:type prefix: basestring
:return: Entry value associated with the provided key or None if the value is not present
:rtype: basestring
"""
validate_type(key, basestring)
validate_type(prefix, basestring, accept_none=True)
if prefix is None:
return self.invoke('get', types=[JAVA_STRING], values=[key])
else:
return self.invoke('get', types=[JAVA_STRING, JAVA_STRING], values=[prefix, key])
def put(self, key, value, prefix=None):
"""
Creates a file entry with the provided key, value and prefix.
:type key: basestring
:type value: basestring
:type prefix: basestring
"""
validate_type(key, basestring)
validate_type(value, basestring, accept_none=True)
validate_type(prefix, basestring, accept_none=True)
if prefix is None:
return self.invoke('put', types=[JAVA_STRING, JAVA_STRING], values=[key, value])
else:
return self.invoke('put', types=[JAVA_STRING, JAVA_STRING, JAVA_STRING], values=[prefix, key, value])
def put_all(self, values_map, prefix=None):
"""
Creates multiple entries that correspond to the provided dict.
:param values_map: entries to be inserted
:type values_map: dict
:param prefix: perform prefixed insertion
:type prefix: basestring
"""
validate_type(values_map, dict)
validate_type(prefix, basestring, accept_none=True)
value = java_object(JAVA_HASH_MAP, values_map)
if prefix is None:
return self.invoke('putAll', types=[JAVA_MAP], values=[value])
else:
return self.invoke('putAll', types=[JAVA_STRING, JAVA_MAP], values=[prefix, value])
def size(self):
"""
Returns the number of entries in this file.
"""
return self.invoke('size')
def clear(self, prefix=None):
"""
Removes all entries, which keys start with the specified prefix.
:param prefix: if entry's key starts with this prefix, it will be deleted. If prefix is None -
whole file content will be erased.
:type prefix: basestring
"""
validate_type(prefix, basestring, accept_none=True)
if prefix is None:
return self.invoke('clear')
else:
return self.invoke('clear', types=[JAVA_STRING], values=[prefix])
def get_modification_token(self):
"""
Retrieves current modification counter value. Required only in StringMapFileQuery to detect
modifications to the file while iterating over it's content.
:rtype: int
"""
return self.invoke('getModificationToken')
@deprecated('use "get" instead')
def get_value(self, key):
return self.get(key)
@deprecated('use "put" instead')
def set_value(self, key, value):
return self.put(key, value)
@deprecated('use "put_all" instead')
def add_all(self, values_map):
return self.put_all(values_map)
class ApplicationPageFile(StringMapFile):
INTERFACE_NAME = 'com.genestack.api.files.IApplicationPageFile'
def get_application_id(self):
return self.invoke('getApplicationId')
class StringMapFileQuery(object):
MAX_LIMIT = 5000
CLASS_NAME = 'com.genestack.api.files.queries.StringMapFileQuery'
__SORT_ORDER_CLASS = 'com.genestack.api.files.queries.StringMapFileQuery$SortOrder'
__SORT_DIRECTION_CLASS = 'com.genestack.api.files.queries.StringMapFileQuery$SortDirection'
ORDER_BY_KEY = 'BY_KEY'
ORDER_BY_VALUE = 'BY_VALUE'
ORDER_DEFAULT = 'DEFAULT'
DIRECTION_DEFAULT = 'DEFAULT'
DIRECTION_ASCENDING = 'ASCENDING'
DIRECTION_DESCENDING = 'DESCENDING'
def __init__(self, string_map_file, prefix=None,
offset=0, limit=MAX_LIMIT,
sort_order=ORDER_DEFAULT,
sort_direction=DIRECTION_DEFAULT
):
"""
Creates a new query to use in StringMapFile's methods
:param string_map_file: file to create query for
:type string_map_file: StringMapFile
:param prefix: prefix to use when retrieving values
:type prefix: basestring
:param offset: starting entry index (zero-based, included)
:type offset: int
:param limit: number of entries
:type limit: int
:param sort_order: sorting order. Must be one of the provided SORT_ORDER_* constants
:type sort_direction: basestring
:param sort_direction: sorting direction. Must be one of the provided SORT_DIRECTION_* constants
:type sort_direction: basestring
"""
validate_type(string_map_file, StringMapFile, accept_none=True)
validate_type(prefix, basestring, accept_none=True)
validate_type(offset, (int, long))
validate_type(limit, (int, long))
validate_type(sort_order, basestring)
validate_type(sort_direction, basestring)
if sort_order not in (self.ORDER_BY_KEY,
self.ORDER_BY_VALUE,
self.ORDER_DEFAULT):
raise GenestackException('Invalid sort order')
if sort_direction not in (self.DIRECTION_DEFAULT,
self.DIRECTION_ASCENDING,
self.DIRECTION_DESCENDING):
raise GenestackException('Invalid sort direction')
self._token = None if string_map_file is None else string_map_file.get_modification_token()
self.prefix = '' if prefix is None else prefix
self.range = QueryRange(offset, limit, self.MAX_LIMIT)
self.sort_order = sort_order
self.sort_direction = sort_direction
@property
def offset(self):
return self.range.offset
@property
def limit(self):
return self.range.limit
def get_next_page_query(self):
"""
Creates a new query to retrieve next page of values
:return: query that can be used to get the next page
:rtype: StringMapFileQuery
"""
result = StringMapFileQuery(None,
prefix=self.prefix,
offset=self.offset + self.limit,
limit=self.limit,
sort_order=self.sort_order,
sort_direction=self.sort_direction)
result._token = self._token
return result
def as_java_object(self):
if self._token is None:
raise GenestackException('Modification token was not set')
object_dict = {
'token': self._token,
'prefix': self.prefix,
'range': self.range.as_java_object(),
'sortOrder': java_object(self.__SORT_ORDER_CLASS, self.sort_order),
'sortDirection': java_object(self.__SORT_DIRECTION_CLASS, self.sort_direction)
}
return java_object(self.CLASS_NAME, object_dict)
|
|
#!/usr/bin/env python
"""
Defines LineSplitter and helper functions.
-----
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License. See http://scipy.org.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Author: Pearu Peterson <[email protected]>
Created: May 2006
-----
"""
__all__ = ['String','string_replace_map','splitquote','splitparen']
import re
class String(str): pass
class ParenString(str): pass
def split2(line, lower=False):
"""
Split line into non-string part and into a start of a string part.
Returns 2-tuple. The second item either is empty string or start
of a string part.
"""
return LineSplitter(line,lower=lower).split2()
_f2py_str_findall = re.compile(r"_F2PY_STRING_CONSTANT_\d+_").findall
_is_name = re.compile(r'\w*\Z',re.I).match
_is_simple_str = re.compile(r'\w*\Z',re.I).match
_f2py_findall = re.compile(r'(_F2PY_STRING_CONSTANT_\d+_|F2PY_EXPR_TUPLE_\d+)').findall
class string_replace_dict(dict):
"""
Dictionary object that is callable for applying map returned
by string_replace_map() function.
"""
def __call__(self, line):
for k in _f2py_findall(line):
line = line.replace(k, self[k])
return line
def string_replace_map(line, lower=False,
_cache={'index':0,'pindex':0}):
"""
1) Replaces string constants with symbol `'_F2PY_STRING_CONSTANT_<index>_'`
2) Replaces (expression) with symbol `(F2PY_EXPR_TUPLE_<index>)`
Returns a new line and the replacement map.
"""
items = []
string_map = string_replace_dict()
rev_string_map = {}
for item in splitquote(line, lower=lower)[0]:
if isinstance(item, String) and not _is_simple_str(item[1:-1]):
key = rev_string_map.get(item)
if key is None:
_cache['index'] += 1
index = _cache['index']
key = "_F2PY_STRING_CONSTANT_%s_" % (index)
it = item[1:-1]
string_map[key] = it
rev_string_map[it] = key
items.append(item[0]+key+item[-1])
else:
items.append(item)
newline = ''.join(items)
items = []
expr_keys = []
for item in splitparen(newline):
if isinstance(item, ParenString) and not _is_name(item[1:-1]):
key = rev_string_map.get(item)
if key is None:
_cache['pindex'] += 1
index = _cache['pindex']
key = 'F2PY_EXPR_TUPLE_%s' % (index)
it = item[1:-1].strip()
string_map[key] = it
rev_string_map[it] = key
expr_keys.append(key)
items.append(item[0]+key+item[-1])
else:
items.append(item)
found_keys = set()
for k in expr_keys:
v = string_map[k]
l = _f2py_str_findall(v)
if l:
found_keys = found_keys.union(l)
for k1 in l:
v = v.replace(k1, string_map[k1])
string_map[k] = v
for k in found_keys:
del string_map[k]
return ''.join(items), string_map
def splitquote(line, stopchar=None, lower=False, quotechars = '"\''):
"""
Fast LineSplitter
"""
items = []
i = 0
while 1:
try:
char = line[i]; i += 1
except IndexError:
break
l = []
l_append = l.append
nofslashes = 0
if stopchar is None:
# search for string start
while 1:
if char in quotechars and not nofslashes % 2:
stopchar = char
i -= 1
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
if not l: continue
item = ''.join(l)
if lower: item = item.lower()
items.append(item)
continue
if char==stopchar:
# string starts with quotechar
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
if l:
item = String(''.join(l))
items.append(item)
break
# else continued string
while 1:
if char==stopchar and not nofslashes % 2:
l_append(char)
stopchar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
if l:
item = String(''.join(l))
items.append(item)
return items, stopchar
class LineSplitterBase(object):
def __iter__(self):
return self
def next(self):
item = ''
while not item:
item = self.get_item() # get_item raises StopIteration
return item
class LineSplitter(LineSplitterBase):
""" Splits a line into non strings and strings. E.g.
abc=\"123\" -> ['abc=','\"123\"']
Handles splitting lines with incomplete string blocks.
"""
def __init__(self, line,
quotechar = None,
lower=False,
):
self.fifo_line = [c for c in line]
self.fifo_line.reverse()
self.quotechar = quotechar
self.lower = lower
def split2(self):
"""
Split line until the first start of a string.
"""
try:
item1 = self.get_item()
except StopIteration:
return '',''
i = len(item1)
l = self.fifo_line[:]
l.reverse()
item2 = ''.join(l)
return item1,item2
def get_item(self):
fifo_pop = self.fifo_line.pop
try:
char = fifo_pop()
except IndexError:
raise StopIteration
fifo_append = self.fifo_line.append
quotechar = self.quotechar
l = []
l_append = l.append
nofslashes = 0
if quotechar is None:
# search for string start
while 1:
if char in '"\'' and not nofslashes % 2:
self.quotechar = char
fifo_append(char)
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = fifo_pop()
except IndexError:
break
item = ''.join(l)
if self.lower: item = item.lower()
return item
if char==quotechar:
# string starts with quotechar
l_append(char)
try:
char = fifo_pop()
except IndexError:
return String(''.join(l))
# else continued string
while 1:
if char==quotechar and not nofslashes % 2:
l_append(char)
self.quotechar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = fifo_pop()
except IndexError:
break
return String(''.join(l))
def splitparen(line,paren='()'):
"""
Fast LineSplitterParen.
"""
stopchar = None
startchar, endchar = paren[0],paren[1]
items = []
i = 0
while 1:
try:
char = line[i]; i += 1
except IndexError:
break
nofslashes = 0
l = []
l_append = l.append
if stopchar is None:
# search for parenthesis start
while 1:
if char==startchar and not nofslashes % 2:
stopchar = endchar
i -= 1
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
item = ''.join(l)
else:
nofstarts = 0
while 1:
if char==stopchar and not nofslashes % 2 and nofstarts==1:
l_append(char)
stopchar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
if char==startchar:
nofstarts += 1
elif char==endchar:
nofstarts -= 1
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
item = ParenString(''.join(l))
items.append(item)
return items
class LineSplitterParen(LineSplitterBase):
""" Splits a line into strings and strings with parenthesis. E.g.
a(x) = b(c,d) -> ['a','(x)',' = b','(c,d)']
"""
def __init__(self, line, paren = '()'):
self.fifo_line = [c for c in line]
self.fifo_line.reverse()
self.startchar = paren[0]
self.endchar = paren[1]
self.stopchar = None
def get_item(self):
fifo_pop = self.fifo_line.pop
try:
char = fifo_pop()
except IndexError:
raise StopIteration
fifo_append = self.fifo_line.append
startchar = self.startchar
endchar = self.endchar
stopchar = self.stopchar
l = []
l_append = l.append
nofslashes = 0
if stopchar is None:
# search for parenthesis start
while 1:
if char==startchar and not nofslashes % 2:
self.stopchar = endchar
fifo_append(char)
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = fifo_pop()
except IndexError:
break
item = ''.join(l)
return item
nofstarts = 0
while 1:
if char==stopchar and not nofslashes % 2 and nofstarts==1:
l_append(char)
self.stopchar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
if char==startchar:
nofstarts += 1
elif char==endchar:
nofstarts -= 1
l_append(char)
try:
char = fifo_pop()
except IndexError:
break
return ParenString(''.join(l))
def test():
splitter = LineSplitter('abc\\\' def"12\\"3""56"dfad\'a d\'')
l = [item for item in splitter]
assert l==['abc\\\' def','"12\\"3"','"56"','dfad','\'a d\''],`l`
assert splitter.quotechar is None
l,stopchar=splitquote('abc\\\' def"12\\"3""56"dfad\'a d\'')
assert l==['abc\\\' def','"12\\"3"','"56"','dfad','\'a d\''],`l`
assert stopchar is None
splitter = LineSplitter('"abc123&')
l = [item for item in splitter]
assert l==['"abc123&'],`l`
assert splitter.quotechar=='"'
l,stopchar = splitquote('"abc123&')
assert l==['"abc123&'],`l`
assert stopchar=='"'
splitter = LineSplitter(' &abc"123','"')
l = [item for item in splitter]
assert l==[' &abc"','123']
assert splitter.quotechar is None
l,stopchar = splitquote(' &abc"123','"')
assert l==[' &abc"','123']
assert stopchar is None
l = split2('')
assert l==('',''),`l`
l = split2('12')
assert l==('12',''),`l`
l = split2('1"a"//"b"')
assert l==('1','"a"//"b"'),`l`
l = split2('"ab"')
assert l==('','"ab"'),`l`
splitter = LineSplitterParen('a(b) = b(x,y(1)) b\((a)\)')
l = [item for item in splitter]
assert l==['a', '(b)', ' = b', '(x,y(1))', ' b\\(', '(a)', '\\)'],`l`
l = splitparen('a(b) = b(x,y(1)) b\((a)\)')
assert l==['a', '(b)', ' = b', '(x,y(1))', ' b\\(', '(a)', '\\)'],`l`
l = string_replace_map('a()')
print l
print 'ok'
if __name__ == '__main__':
test()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.essential_contacts_v1.types import service
class ListContactsPager:
"""A pager for iterating through ``list_contacts`` requests.
This class thinly wraps an initial
:class:`google.cloud.essential_contacts_v1.types.ListContactsResponse` object, and
provides an ``__iter__`` method to iterate through its
``contacts`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListContacts`` requests and continue to iterate
through the ``contacts`` field on the
corresponding responses.
All the usual :class:`google.cloud.essential_contacts_v1.types.ListContactsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListContactsResponse],
request: service.ListContactsRequest,
response: service.ListContactsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.essential_contacts_v1.types.ListContactsRequest):
The initial request object.
response (google.cloud.essential_contacts_v1.types.ListContactsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListContactsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListContactsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[service.Contact]:
for page in self.pages:
yield from page.contacts
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListContactsAsyncPager:
"""A pager for iterating through ``list_contacts`` requests.
This class thinly wraps an initial
:class:`google.cloud.essential_contacts_v1.types.ListContactsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``contacts`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListContacts`` requests and continue to iterate
through the ``contacts`` field on the
corresponding responses.
All the usual :class:`google.cloud.essential_contacts_v1.types.ListContactsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListContactsResponse]],
request: service.ListContactsRequest,
response: service.ListContactsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.essential_contacts_v1.types.ListContactsRequest):
The initial request object.
response (google.cloud.essential_contacts_v1.types.ListContactsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListContactsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListContactsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[service.Contact]:
async def async_generator():
async for page in self.pages:
for response in page.contacts:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ComputeContactsPager:
"""A pager for iterating through ``compute_contacts`` requests.
This class thinly wraps an initial
:class:`google.cloud.essential_contacts_v1.types.ComputeContactsResponse` object, and
provides an ``__iter__`` method to iterate through its
``contacts`` field.
If there are more pages, the ``__iter__`` method will make additional
``ComputeContacts`` requests and continue to iterate
through the ``contacts`` field on the
corresponding responses.
All the usual :class:`google.cloud.essential_contacts_v1.types.ComputeContactsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ComputeContactsResponse],
request: service.ComputeContactsRequest,
response: service.ComputeContactsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.essential_contacts_v1.types.ComputeContactsRequest):
The initial request object.
response (google.cloud.essential_contacts_v1.types.ComputeContactsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ComputeContactsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ComputeContactsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[service.Contact]:
for page in self.pages:
yield from page.contacts
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ComputeContactsAsyncPager:
"""A pager for iterating through ``compute_contacts`` requests.
This class thinly wraps an initial
:class:`google.cloud.essential_contacts_v1.types.ComputeContactsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``contacts`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ComputeContacts`` requests and continue to iterate
through the ``contacts`` field on the
corresponding responses.
All the usual :class:`google.cloud.essential_contacts_v1.types.ComputeContactsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ComputeContactsResponse]],
request: service.ComputeContactsRequest,
response: service.ComputeContactsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.essential_contacts_v1.types.ComputeContactsRequest):
The initial request object.
response (google.cloud.essential_contacts_v1.types.ComputeContactsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ComputeContactsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ComputeContactsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[service.Contact]:
async def async_generator():
async for page in self.pages:
for response in page.contacts:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
|
# -*- coding: utf-8 -*-
import collections
import datetime
import errno
import gzip
import io
import os
import signal
import sqlite3
import stat
import time
import threading
from beaver.utils import IS_GZIPPED_FILE, REOPEN_FILES, eglob, multiline_merge
from beaver.unicode_dammit import ENCODINGS
class Worker(object):
"""Looks for changes in all files of a directory.
This is useful for watching log file changes in real-time.
It also supports files rotation.
Example:
>>> def callback(filename, lines):
... print filename, lines
...
>>> l = Worker(args, callback, ["log", "txt"])
>>> l.loop()
"""
def __init__(self, beaver_config, queue_consumer_function, callback, logger=None):
"""Arguments:
(FileConfig) @file_config:
object containing file-related configuration
(BeaverConfig) @beaver_config:
object containing global configuration
(Logger) @logger
object containing a python logger
(callable) @callback:
a function which is called every time a new line in a
file being watched is found;
this is called with "filename" and "lines" arguments.
"""
self._beaver_config = beaver_config
self._callback = callback
self._create_queue_consumer = queue_consumer_function
self._file_map = {}
self._folder = self._beaver_config.get('path')
self._last_file_mapping_update = {}
self._logger = logger
self._number_of_consumer_processes = int(self._beaver_config.get('number_of_consumer_processes'))
self._proc = [None] * self._number_of_consumer_processes
self._sincedb_path = self._beaver_config.get('sincedb_path')
self._update_time = None
self._running = True
if not callable(self._callback):
raise RuntimeError("Callback for worker is not callable")
self.update_files()
self._seek_to_end()
signal.signal(signal.SIGTERM, self.close)
def __del__(self):
"""Closes all files"""
self.close()
def close(self, signalnum=None, frame=None):
self._running = False
"""Closes all currently open file pointers"""
for id, data in self._file_map.iteritems():
data['file'].close()
self._sincedb_update_position(data['file'], fid=id, force_update=True)
self._file_map.clear()
for n in range(0,self._number_of_consumer_processes):
if self._proc[n] is not None and self._proc[n].is_alive():
self._logger.debug("Terminate Process: " + str(n))
self._proc[n].terminate()
self._proc[n].join()
def listdir(self):
"""List directory and filter files by extension.
You may want to override this to add extra logic or
globbling support.
"""
if self._folder is not None:
ls = os.listdir(self._folder)
return [x for x in ls if os.path.splitext(x)[1][1:] == "log"]
else:
return []
def create_queue_consumer_if_required(self, interval=5.0):
for n in range(0,self._number_of_consumer_processes):
if not (self._proc[n] and self._proc[n].is_alive()):
self._logger.debug("creating consumer process: " + str(n))
self._proc[n] = self._create_queue_consumer()
timer = threading.Timer(interval, self.create_queue_consumer_if_required)
timer.start()
def loop(self, interval=0.1, async=False):
"""Start the loop.
If async is True make one loop then return.
"""
self.create_queue_consumer_if_required()
while self._running:
t = time.time()
if int(time.time()) - self._update_time > self._beaver_config.get('discover_interval'):
self.update_files()
self._ensure_files_are_good(current_time=t)
unwatch_list = []
for fid, data in self._file_map.iteritems():
try:
self._run_pass(fid, data['file'])
except IOError, e:
if e.errno == errno.ESTALE:
unwatch_list.append(fid)
self.unwatch_list(unwatch_list)
if async:
return
self._logger.debug("Iteration took {0:.6f}".format(time.time() - t))
time.sleep(interval)
def _run_pass(self, fid, file):
"""Read lines from a file and performs a callback against them"""
while True:
try:
data = file.read(4096)
except IOError, e:
if e.errno == errno.ESTALE:
self.active = False
return False
lines = self._buffer_extract(data=data, fid=fid)
if not lines:
# Before returning, check if an event (maybe partial) is waiting for too long.
if self._file_map[fid]['current_event'] and time.time() - self._file_map[fid]['last_activity'] > 1:
event = '\n'.join(self._file_map[fid]['current_event'])
self._file_map[fid]['current_event'].clear()
self._callback_wrapper(filename=file.name, lines=[event])
break
self._file_map[fid]['last_activity'] = time.time()
if self._file_map[fid]['multiline_regex_after'] or self._file_map[fid]['multiline_regex_before']:
# Multiline is enabled for this file.
events = multiline_merge(
lines,
self._file_map[fid]['current_event'],
self._file_map[fid]['multiline_regex_after'],
self._file_map[fid]['multiline_regex_before'])
else:
events = lines
if events:
self._callback_wrapper(filename=file.name, lines=events)
if self._sincedb_path:
current_line_count = len(lines)
self._sincedb_update_position(file, fid=fid, lines=current_line_count)
self._sincedb_update_position(file, fid=fid)
def _buffer_extract(self, data, fid):
"""
Extract takes an arbitrary string of input data and returns an array of
tokenized entities, provided there were any available to extract. This
makes for easy processing of datagrams using a pattern like:
tokenizer.extract(data).map { |entity| Decode(entity) }.each do ..."""
# Extract token-delimited entities from the input string with the split command.
# There's a bit of craftiness here with the -1 parameter. Normally split would
# behave no differently regardless of if the token lies at the very end of the
# input buffer or not (i.e. a literal edge case) Specifying -1 forces split to
# return "" in this case, meaning that the last entry in the list represents a
# new segment of data where the token has not been encountered
entities = collections.deque(data.split(self._file_map[fid]['delimiter'], -1))
# Check to see if the buffer has exceeded capacity, if we're imposing a limit
if self._file_map[fid]['size_limit']:
if self._file_map[fid]['input_size'] + len(entities[0]) > self._file_map[fid]['size_limit']:
raise Exception('input buffer full')
self._file_map[fid]['input_size'] += len(entities[0])
# Move the first entry in the resulting array into the input buffer. It represents
# the last segment of a token-delimited entity unless it's the only entry in the list.
first_entry = entities.popleft()
if len(first_entry) > 0:
self._file_map[fid]['input'].append(first_entry)
# If the resulting array from the split is empty, the token was not encountered
# (not even at the end of the buffer). Since we've encountered no token-delimited
# entities this go-around, return an empty array.
if len(entities) == 0:
return []
# At this point, we've hit a token, or potentially multiple tokens. Now we can bring
# together all the data we've buffered from earlier calls without hitting a token,
# and add it to our list of discovered entities.
entities.appendleft(''.join(self._file_map[fid]['input']))
# Now that we've hit a token, joined the input buffer and added it to the entities
# list, we can go ahead and clear the input buffer. All of the segments that were
# stored before the join can now be garbage collected.
self._file_map[fid]['input'].clear()
# The last entity in the list is not token delimited, however, thanks to the -1
# passed to split. It represents the beginning of a new list of as-yet-untokenized
# data, so we add it to the start of the list.
self._file_map[fid]['input'].append(entities.pop())
# Set the new input buffer size, provided we're keeping track
if self._file_map[fid]['size_limit']:
self._file_map[fid]['input_size'] = len(self._file_map[fid]['input'][0])
# Now we're left with the list of extracted token-delimited entities we wanted
# in the first place. Hooray!
return entities
# Flush the contents of the input buffer, i.e. return the input buffer even though
# a token has not yet been encountered
def _buffer_flush(self, fid):
buf = ''.join(self._file_map[fid]['input'])
self._file_map[fid]['input'].clear
return buf
# Is the buffer empty?
def _buffer_empty(self, fid):
return len(self._file_map[fid]['input']) > 0
def _seek_to_end(self):
unwatch_list = []
# The first time we run the script we move all file markers at EOF.
# In case of files created afterwards we don't do this.
for fid, data in self._file_map.iteritems():
self._logger.debug("[{0}] - getting start position {1}".format(fid, data['file'].name))
start_position = self._beaver_config.get_field('start_position', data['file'].name)
is_active = data['active']
if self._sincedb_path:
sincedb_start_position = self._sincedb_start_position(data['file'], fid=fid)
if sincedb_start_position:
start_position = sincedb_start_position
if start_position == "beginning":
continue
line_count = 0
if str(start_position).isdigit():
self._logger.debug("[{0}] - going to start position {1} for {2}".format(fid, start_position, data['file'].name))
start_position = int(start_position)
for encoding in ENCODINGS:
try:
line_count = 0
while data['file'].readline():
line_count += 1
if line_count == start_position:
break
except UnicodeDecodeError:
self._logger.debug("[{0}] - UnicodeDecodeError raised for {1} with encoding {2}".format(fid, data['file'].name, data['encoding']))
data['file'] = self.open(data['file'].name, encoding=encoding)
if not data['file']:
unwatch_list.append(fid)
is_active = False
break
data['encoding'] = encoding
if line_count != start_position:
self._logger.debug("[{0}] - file at different position than {1}, assuming manual truncate for {2}".format(fid, start_position, data['file'].name))
data['file'].seek(0, os.SEEK_SET)
start_position == "beginning"
if not is_active:
continue
if start_position == "beginning":
continue
if start_position == "end":
self._logger.debug("[{0}] - getting end position for {1}".format(fid, data['file'].name))
for encoding in ENCODINGS:
try:
line_count = 0
while data['file'].readline():
line_count += 1
break
except UnicodeDecodeError:
self._logger.debug("[{0}] - UnicodeDecodeError raised for {1} with encoding {2}".format(fid, data['file'].name, data['encoding']))
data['file'] = self.open(data['file'].name, encoding=encoding)
if not data['file']:
unwatch_list.append(fid)
is_active = False
break
data['encoding'] = encoding
if not is_active:
continue
current_position = data['file'].tell()
self._logger.debug("[{0}] - line count {1} for {2}".format(fid, line_count, data['file'].name))
self._sincedb_update_position(data['file'], fid=fid, lines=line_count, force_update=True)
# Reset this, so line added processed just after this initialization
# will update the sincedb. Without this, if beaver run for less than
# sincedb_write_interval it will always re-process the last lines.
data['update_time'] = 0
tail_lines = self._beaver_config.get_field('tail_lines', data['file'].name)
tail_lines = int(tail_lines)
if tail_lines:
encoding = data['encoding']
lines = self.tail(data['file'].name, encoding=encoding, window=tail_lines, position=current_position)
if lines:
if self._file_map[fid]['multiline_regex_after'] or self._file_map[fid]['multiline_regex_before']:
# Multiline is enabled for this file.
events = multiline_merge(
lines,
self._file_map[fid]['current_event'],
self._file_map[fid]['multiline_regex_after'],
self._file_map[fid]['multiline_regex_before'])
else:
events = lines
self._callback_wrapper(filename=data['file'].name, lines=events)
self.unwatch_list(unwatch_list)
def _callback_wrapper(self, filename, lines):
now = datetime.datetime.utcnow()
timestamp = now.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (now.microsecond / 1000) + "Z"
self._callback(('callback', {
'fields': self._beaver_config.get_field('fields', filename),
'filename': filename,
'format': self._beaver_config.get_field('format', filename),
'ignore_empty': self._beaver_config.get_field('ignore_empty', filename),
'lines': lines,
'timestamp': timestamp,
'tags': self._beaver_config.get_field('tags', filename),
'type': self._beaver_config.get_field('type', filename),
}))
def _sincedb_init(self):
"""Initializes the sincedb schema in an sqlite db"""
if not self._sincedb_path:
return
if not os.path.exists(self._sincedb_path):
self._logger.debug('Initializing sincedb sqlite schema')
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
conn.execute("""
create table sincedb (
fid text primary key,
filename text,
position integer default 1
);
""")
conn.close()
def _sincedb_update_position(self, file, fid=None, lines=0, force_update=False):
"""Retrieves the starting position from the sincedb sql db for a given file
Returns a boolean representing whether or not it updated the record
"""
if not self._sincedb_path:
return False
if not fid:
fid = self.get_file_id(os.stat(file.name))
self._file_map[fid]['line'] = self._file_map[fid]['line'] + lines
old_count = self._file_map[fid]['line_in_sincedb']
lines = self._file_map[fid]['line']
current_time = int(time.time())
update_time = self._file_map[fid]['update_time']
if not force_update:
sincedb_write_interval = self._beaver_config.get_field('sincedb_write_interval', file.name)
if update_time and current_time - update_time <= sincedb_write_interval:
return False
if old_count == lines:
return False
self._sincedb_init()
self._file_map[fid]['update_time'] = current_time
self._logger.debug("[{0}] - updating sincedb for logfile {1} from {2} to {3}".format(fid, file.name, old_count, lines))
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
cursor = conn.cursor()
query = "insert or ignore into sincedb (fid, filename) values (:fid, :filename);"
cursor.execute(query, {
'fid': fid,
'filename': file.name
})
query = "update sincedb set position = :position where fid = :fid and filename = :filename"
cursor.execute(query, {
'fid': fid,
'filename': file.name,
'position': int(lines),
})
conn.close()
self._file_map[fid]['line_in_sincedb'] = lines
return True
def _sincedb_start_position(self, file, fid=None):
"""Retrieves the starting position from the sincedb sql db
for a given file
"""
if not self._sincedb_path:
return None
if not fid:
fid = self.get_file_id(os.stat(file.name))
self._sincedb_init()
conn = sqlite3.connect(self._sincedb_path, isolation_level=None)
cursor = conn.cursor()
cursor.execute("select position from sincedb where fid = :fid and filename = :filename", {
'fid': fid,
'filename': file.name
})
start_position = None
for row in cursor.fetchall():
start_position, = row
return start_position
def update_files(self):
"""Ensures all files are properly loaded.
Detects new files, file removals, file rotation, and truncation.
On non-linux platforms, it will also manually reload the file for tailing.
Note that this hack is necessary because EOF is cached on BSD systems.
"""
self._update_time = int(time.time())
ls = []
files = []
if len(self._beaver_config.get('globs')) > 0:
for name, exclude in self._beaver_config.get('globs').items():
globbed = [os.path.realpath(filename) for filename in eglob(name, exclude)]
files.extend(globbed)
self._beaver_config.addglob(name, globbed)
self._callback(("addglob", (name, globbed)))
else:
for name in self.listdir():
files.append(os.path.realpath(os.path.join(self._folder, name)))
for absname in files:
try:
st = os.stat(absname)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if not stat.S_ISREG(st.st_mode):
continue
elif int(self._beaver_config.get('ignore_old_files')) > 0 and \
datetime.datetime.fromtimestamp(st.st_mtime) < (datetime.datetime.today() - datetime.timedelta(days=int(self._beaver_config.get('ignore_old_files')))):
self._logger.debug('[{0}] - file {1} older then {2} day so ignoring it'.format(self.get_file_id(st), absname, self._beaver_config.get('ignore_old_files')))
continue
fid = self.get_file_id(st)
ls.append((fid, absname))
# add new ones
for fid, fname in ls:
if fid not in self._file_map:
self.watch(fname)
def _ensure_files_are_good(self, current_time):
"""Every N seconds, ensures that the file we are tailing is the file we expect to be tailing"""
# We cannot watch/unwatch in a single iteration
rewatch_list = []
unwatch_list = []
# check existent files
for fid, data in self._file_map.iteritems():
filename = data['file'].name
stat_interval = self._beaver_config.get_field('stat_interval', filename)
if filename in self._last_file_mapping_update and current_time - self._last_file_mapping_update[filename] <= stat_interval:
continue
self._last_file_mapping_update[filename] = time.time()
try:
st = os.stat(data['file'].name)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
unwatch_list.append(fid)
else:
raise
else:
if fid != self.get_file_id(st):
self._logger.info("[{0}] - file rotated {1}".format(fid, data['file'].name))
rewatch_list.append(fid)
elif data['file'].tell() > st.st_size:
if st.st_size == 0 and self._beaver_config.get_field('ignore_truncate', data['file'].name):
self._logger.info("[{0}] - file size is 0 {1}. ".format(fid, data['file'].name) +
"If you use another tool (i.e. logrotate) to truncate " +
"the file, your application may continue to write to " +
"the offset it last wrote later. In such a case, we'd " +
"better do nothing here")
continue
self._logger.info("[{0}] - file truncated {1}".format(fid, data['file'].name))
rewatch_list.append(fid)
elif REOPEN_FILES:
self._logger.debug("[{0}] - file reloaded (non-linux) {1}".format(fid, data['file'].name))
position = data['file'].tell()
fname = data['file'].name
data['file'].close()
file = self.open(fname, encoding=data['encoding'])
if file:
file.seek(position)
self._file_map[fid]['file'] = file
self.unwatch_list(unwatch_list)
self.rewatch_list(rewatch_list)
def rewatch_list(self, rewatch_list):
for fid in rewatch_list:
if fid not in self._file_map:
continue
f = self._file_map[fid]['file']
filename = f.name
self.unwatch(f, fid)
self.watch(filename)
def unwatch_list(self, unwatch_list):
for fid in unwatch_list:
if fid not in self._file_map:
continue
f = self._file_map[fid]['file']
self.unwatch(f, fid)
def unwatch(self, file, fid):
"""file no longer exists; if it has been renamed
try to read it for the last time in case the
log rotator has written something in it.
"""
try:
if file:
self._run_pass(fid, file)
if self._file_map[fid]['current_event']:
event = '\n'.join(self._file_map[fid]['current_event'])
self._file_map[fid]['current_event'].clear()
self._callback_wrapper(filename=file.name, lines=[event])
except IOError:
# Silently ignore any IOErrors -- file is gone
pass
if file:
self._logger.info("[{0}] - un-watching logfile {1}".format(fid, file.name))
else:
self._logger.info("[{0}] - un-watching logfile".format(fid))
self._file_map[fid]['file'].close()
del self._file_map[fid]
def watch(self, fname):
"""Opens a file for log tailing"""
try:
file = self.open(fname, encoding=self._beaver_config.get_field('encoding', fname))
if file:
fid = self.get_file_id(os.stat(fname))
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
if file:
self._logger.info("[{0}] - watching logfile {1}".format(fid, fname))
self._file_map[fid] = {
'current_event': collections.deque([]),
'delimiter': self._beaver_config.get_field('delimiter', fname),
'encoding': self._beaver_config.get_field('encoding', fname),
'file': file,
'input': collections.deque([]),
'input_size': 0,
'last_activity': time.time(),
'line': 0,
'line_in_sincedb': 0,
'multiline_regex_after': self._beaver_config.get_field('multiline_regex_after', fname),
'multiline_regex_before': self._beaver_config.get_field('multiline_regex_before', fname),
'size_limit': self._beaver_config.get_field('size_limit', fname),
'update_time': None,
'active': True,
}
def open(self, filename, encoding=None):
"""Opens a file with the appropriate call"""
try:
if IS_GZIPPED_FILE.search(filename):
_file = gzip.open(filename, "rb")
else:
file_encoding = self._beaver_config.get_field('encoding', filename)
if encoding:
_file = io.open(filename, "r", encoding=encoding, errors='replace')
elif file_encoding:
_file = io.open(filename, "r", encoding=file_encoding, errors='replace')
else:
_file = io.open(filename, "r", errors='replace')
except IOError, e:
self._logger.warning(str(e))
_file = None
return _file
def tail(self, fname, encoding, window, position=None):
"""Read last N lines from file fname."""
if window <= 0:
raise ValueError('invalid window %r' % window)
encodings = ENCODINGS
if encoding:
encodings = [encoding] + ENCODINGS
for enc in encodings:
try:
f = self.open(fname, encoding=enc)
if not f:
return []
return self.tail_read(f, window, position=position)
except IOError, err:
if err.errno == errno.ENOENT:
return []
raise
except UnicodeDecodeError:
pass
@staticmethod
def get_file_id(st):
return "%xg%x" % (st.st_dev, st.st_ino)
@classmethod
def tail_read(cls, f, window, position=None):
BUFSIZ = 1024
# open() was overridden and file was opened in text
# mode; read() will return a string instead bytes.
encoded = getattr(f, 'encoding', False)
CR = '\n' if encoded else b'\n'
data = '' if encoded else b''
f.seek(0, os.SEEK_END)
if position is None:
position = f.tell()
block = -1
exit = False
read = BUFSIZ
while not exit:
step = (block * BUFSIZ) + position
if step < 0:
step = 0
read = ((block + 1) * BUFSIZ) + position
exit = True
f.seek(step, os.SEEK_SET)
newdata = f.read(read)
data = newdata + data
if data.count(CR) > window:
break
else:
block -= 1
return data.splitlines()[-window:]
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import optparse
from optparse import OptionGroup
import sys
import urllib2
import time
import json
import base64
import xml
import xml.etree.ElementTree as ET
import os
import logging
import ssl
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('AmbariConfig')
HTTP_PROTOCOL = 'http'
HTTPS_PROTOCOL = 'https'
SET_ACTION = 'set'
GET_ACTION = 'get'
DELETE_ACTION = 'delete'
GET_REQUEST_TYPE = 'GET'
PUT_REQUEST_TYPE = 'PUT'
# JSON Keywords
PROPERTIES = 'properties'
ATTRIBUTES = 'properties_attributes'
CLUSTERS = 'Clusters'
DESIRED_CONFIGS = 'desired_configs'
TYPE = 'type'
TAG = 'tag'
ITEMS = 'items'
TAG_PREFIX = 'version'
CLUSTERS_URL = '/api/v1/clusters/{0}'
DESIRED_CONFIGS_URL = CLUSTERS_URL + '?fields=Clusters/desired_configs'
CONFIGURATION_URL = CLUSTERS_URL + '/configurations?type={1}&tag={2}'
FILE_FORMAT = \
"""
"properties": {
"key1": "value1"
"key2": "value2"
},
"properties_attributes": {
"attribute": {
"key1": "value1"
"key2": "value2"
}
}
"""
class UsageException(Exception):
pass
def api_accessor(host, login, password, protocol, port):
def do_request(api_url, request_type=GET_REQUEST_TYPE, request_body=''):
try:
url = '{0}://{1}:{2}{3}'.format(protocol, host, port, api_url)
admin_auth = base64.encodestring('%s:%s' % (login, password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
request.add_data(request_body)
request.get_method = lambda: request_type
sslContext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
response = urllib2.urlopen(request, context=sslContext)
response_body = response.read()
except Exception as exc:
raise Exception('Problem with accessing api. Reason: {0}'.format(exc))
return response_body
return do_request
def get_config_tag(cluster, config_type, accessor):
response = accessor(DESIRED_CONFIGS_URL.format(cluster))
try:
desired_tags = json.loads(response)
current_config_tag = desired_tags[CLUSTERS][DESIRED_CONFIGS][config_type][TAG]
except Exception as exc:
raise Exception('"{0}" not found in server response. Response:\n{1}'.format(config_type, response))
return current_config_tag
def create_new_desired_config(cluster, config_type, properties, attributes, accessor):
new_tag = TAG_PREFIX + str(int(time.time() * 1000000))
new_config = {
CLUSTERS: {
DESIRED_CONFIGS: {
TYPE: config_type,
TAG: new_tag,
PROPERTIES: properties
}
}
}
if len(attributes.keys()) > 0:
new_config[CLUSTERS][DESIRED_CONFIGS][ATTRIBUTES] = attributes
request_body = json.dumps(new_config)
new_file = 'doSet_{0}.json'.format(new_tag)
logger.info('### PUTting json into: {0}'.format(new_file))
output_to_file(new_file)(new_config)
accessor(CLUSTERS_URL.format(cluster), PUT_REQUEST_TYPE, request_body)
logger.info('### NEW Site:{0}, Tag:{1}'.format(config_type, new_tag))
def get_current_config(cluster, config_type, accessor):
config_tag = get_config_tag(cluster, config_type, accessor)
logger.info("### on (Site:{0}, Tag:{1})".format(config_type, config_tag))
logger.info("### URL:" + CONFIGURATION_URL.format(cluster, config_type, config_tag))
response = accessor(CONFIGURATION_URL.format(cluster, config_type, config_tag))
config_by_tag = json.loads(response)
current_config = config_by_tag[ITEMS][0]
return current_config[PROPERTIES], current_config.get(ATTRIBUTES, {})
def update_config(cluster, config_type, config_updater, accessor):
properties, attributes = config_updater(cluster, config_type, accessor)
create_new_desired_config(cluster, config_type, properties, attributes, accessor)
def update_specific_property(config_name, config_value):
def update(cluster, config_type, accessor):
properties, attributes = get_current_config(cluster, config_type, accessor)
properties[config_name] = config_value
return properties, attributes
return update
def update_from_xml(config_file):
def update(cluster, config_type, accessor):
return read_xml_data_to_map(config_file)
return update
# Used DOM parser to read data into a map
def read_xml_data_to_map(path):
configurations = {}
properties_attributes = {}
tree = ET.parse(path)
root = tree.getroot()
for properties in root.getiterator('property'):
name = properties.find('name')
value = properties.find('value')
final = properties.find('final')
if name != None:
name_text = name.text if name.text else ""
else:
logger.warn("No name is found for one of the properties in {0}, ignoring it".format(path))
continue
if value != None:
value_text = value.text if value.text else ""
else:
logger.warn("No value is found for \"{0}\" in {1}, using empty string for it".format(name_text, path))
value_text = ""
if final != None:
final_text = final.text if final.text else ""
properties_attributes[name_text] = final_text
configurations[name_text] = value_text
return configurations, {"final" : properties_attributes}
def update_from_file(config_file):
def update(cluster, config_type, accessor):
try:
with open(config_file) as in_file:
file_content = in_file.read()
except Exception as e:
raise Exception('Cannot find file "{0}" to PUT'.format(config_file))
try:
file_properties = json.loads(file_content)
except Exception as e:
raise Exception('File "{0}" should be in the following JSON format ("properties_attributes" is optional):\n{1}'.format(config_file, FILE_FORMAT))
new_properties = file_properties.get(PROPERTIES, {})
new_attributes = file_properties.get(ATTRIBUTES, {})
logger.info('### PUTting file: "{0}"'.format(config_file))
return new_properties, new_attributes
return update
def delete_specific_property(config_name):
def update(cluster, config_type, accessor):
properties, attributes = get_current_config(cluster, config_type, accessor)
properties.pop(config_name, None)
for attribute_values in attributes.values():
attribute_values.pop(config_name, None)
return properties, attributes
return update
def output_to_file(filename):
def output(config):
with open(filename, 'w') as out_file:
json.dump(config, out_file, indent=2)
return output
def output_to_console(config):
print json.dumps(config, indent=2)
def get_config(cluster, config_type, accessor, output):
properties, attributes = get_current_config(cluster, config_type, accessor)
config = {PROPERTIES: properties}
if len(attributes.keys()) > 0:
config[ATTRIBUTES] = attributes
output(config)
def set_properties(cluster, config_type, args, accessor):
logger.info('### Performing "set":')
if len(args) == 1:
config_file = args[0]
root, ext = os.path.splitext(config_file)
if ext == ".xml":
updater = update_from_xml(config_file)
elif ext == ".json":
updater = update_from_file(config_file)
else:
logger.error("File extension {0} doesn't supported".format(ext))
return -1
logger.info('### from file {0}'.format(config_file))
else:
config_name = args[0]
config_value = args[1]
updater = update_specific_property(config_name, config_value)
logger.info('### new property - "{0}":"{1}"'.format(config_name, config_value))
update_config(cluster, config_type, updater, accessor)
return 0
def delete_properties(cluster, config_type, args, accessor):
logger.info('### Performing "delete":')
if len(args) == 0:
logger.error("Not enough arguments. Expected config key.")
return -1
config_name = args[0]
logger.info('### on property "{0}"'.format(config_name))
update_config(cluster, config_type, delete_specific_property(config_name), accessor)
return 0
def get_properties(cluster, config_type, args, accessor):
logger.info("### Performing \"get\" content:")
if len(args) > 0:
filename = args[0]
output = output_to_file(filename)
logger.info('### to file "{0}"'.format(filename))
else:
output = output_to_console
get_config(cluster, config_type, accessor, output)
return 0
def get_properties2(cluster, config_type, accessor):
logger.info("### Performing \"get\" content:")
properties, attributes = get_current_config(cluster, config_type, accessor)
return properties
def main():
parser = optparse.OptionParser(usage="usage: %prog [options]")
login_options_group = OptionGroup(parser, "To specify credentials please use \"-e\" OR \"-u\" and \"-p'\"")
login_options_group.add_option("-u", "--user", dest="user", default="admin", help="Optional user ID to use for authentication. Default is 'admin'")
login_options_group.add_option("-p", "--password", dest="password", default="admin", help="Optional password to use for authentication. Default is 'admin'")
login_options_group.add_option("-e", "--credentials-file", dest="credentials_file", help="Optional file with user credentials separated by new line.")
parser.add_option_group(login_options_group)
parser.add_option("-t", "--port", dest="port", default="8080", help="Optional port number for Ambari server. Default is '8080'. Provide empty string to not use port.")
parser.add_option("-s", "--protocol", dest="protocol", default="http", help="Optional support of SSL. Default protocol is 'http'")
parser.add_option("-a", "--action", dest="action", help="Script action: <get>, <set>, <delete>")
parser.add_option("-l", "--host", dest="host", help="Server external host name")
parser.add_option("-n", "--cluster", dest="cluster", help="Name given to cluster. Ex: 'c1'")
parser.add_option("-c", "--config-type", dest="config_type", help="One of the various configuration types in Ambari. Ex: core-site, hdfs-site, mapred-queue-acls, etc.")
config_options_group = OptionGroup(parser, "To specify property(s) please use \"-f\" OR \"-k\" and \"-v'\"")
config_options_group.add_option("-f", "--file", dest="file", help="File where entire configurations are saved to, or read from. Supported extensions (.xml, .json>)")
config_options_group.add_option("-k", "--key", dest="key", help="Key that has to be set or deleted. Not necessary for 'get' action.")
config_options_group.add_option("-v", "--value", dest="value", help="Optional value to be set. Not necessary for 'get' or 'delete' actions.")
parser.add_option_group(config_options_group)
(options, args) = parser.parse_args()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
# options with default value
if not options.credentials_file and (not options.user or not options.password):
parser.error("You should use option (-e) to set file with Ambari user credentials OR use (-u) username and (-p) password")
if options.credentials_file:
if os.path.isfile(options.credentials_file):
try:
with open(options.credentials_file) as credentials_file:
file_content = credentials_file.read()
login_lines = filter(None, file_content.splitlines())
if len(login_lines) == 2:
user = login_lines[0]
password = login_lines[1]
else:
logger.error("Incorrect content of {0} file. File should contain Ambari username and password separated by new line.".format(options.credentials_file))
return -1
except Exception as e:
logger.error("You don't have permissions to {0} file".format(options.credentials_file))
return -1
else:
logger.error("File {0} doesn't exist or you don't have permissions.".format(options.credentials_file))
return -1
else:
user = options.user
password = options.password
port = options.port
protocol = options.protocol
#options without default value
if None in [options.action, options.host, options.cluster, options.config_type]:
parser.error("One of required options is not passed")
action = options.action
host = options.host
cluster = options.cluster
config_type = options.config_type
accessor = api_accessor(host, user, password, protocol, port)
if action == SET_ACTION:
if not options.file and (not options.key or not options.value):
parser.error("You should use option (-f) to set file where entire configurations are saved OR (-k) key and (-v) value for one property")
if options.file:
action_args = [options.file]
else:
action_args = [options.key, options.value]
return set_properties(cluster, config_type, action_args, accessor)
elif action == GET_ACTION:
if options.file:
action_args = [options.file]
else:
action_args = []
return get_properties(cluster, config_type, action_args, accessor)
elif action == DELETE_ACTION:
if not options.key:
parser.error("You should use option (-k) to set property name witch will be deleted")
else:
action_args = [options.key]
return delete_properties(cluster, config_type, action_args, accessor)
else:
logger.error('Action "{0}" is not supported. Supported actions: "get", "set", "delete".'.format(action))
return -1
if __name__ == "__main__":
try:
sys.exit(main())
except (KeyboardInterrupt, EOFError):
print("\nAborting ... Keyboard Interrupt.")
sys.exit(1)
|
|
# VMeter Python demos
# VMeter.net
# ver 1. 1/26/13
import pypm
import array
import time
from collections import deque
INPUT=0
OUTPUT=1
def PrintDevices(InOrOut):
for loop in range(pypm.CountDevices()):
interf,name,inp,outp,opened = pypm.GetDeviceInfo(loop)
if ((InOrOut == INPUT) & (inp == 1) |
(InOrOut == OUTPUT) & (outp ==1)):
print loop, name," ",
if (inp == 1): print "(input) ",
else: print "(output) ",
if (opened == 1): print "(opened)"
else: print "(unopened)"
print
# Using the psutil library, displays current activity. A top sided
# envelope follower is used to smooth out the display.
# The envelope follower will immediately jump to a higher level,
# but falls slowly.
def MonitorCpu(MidiOut):
import psutil # http://code.google.com/p/psutil/
cpu_percent = 0.0
while True:
new_cpu_percent = psutil.cpu_percent(interval=.05)
if new_cpu_percent > cpu_percent:
cpu_percent = new_cpu_percent
else:
cpu_percent = cpu_percent * .8;
output = int(cpu_percent * 1.27)
SendColumn(MidiOut,output)
def SendArray(array, MidiOut):
# assuming 38 length array
# need to split array into (6) 7bit chunks
# Individual LED control is sent to the aftertouch MIDI command and channels 14, 15 and 16.
# Each of the data bytes transmit 7 LED states.
bytes = [0,0,0,0,0,0]
bytes[0] = array[0] | array[1]<<1 | array[2]<<2 | array[3]<<3 | array[4]<<4 | array[5]<<5 | array[6]<<6
bytes[1] = array[7] | array[8]<<1 | array[9]<<2 | array[10]<<3 | array[11]<<4 | array[12]<<5 | array[13]<<6
bytes[2] = array[14] | array[15]<<1 | array[16]<<2 | array[17]<<3 | array[18]<<4 | array[19]<<5 | array[20]<<6
bytes[3] = array[21] | array[22]<<1 | array[23]<<2 | array[24]<<3 | array[25]<<4 | array[26]<<5 | array[27]<<6
bytes[4] = array[28] | array[29]<<1 | array[30]<<2 | array[31]<<3 | array[32]<<4 | array[33]<<5 | array[34]<<6
bytes[5] = array[35] | array[36]<<1 | array[37]<<2
MidiOut.WriteShort(0xAD,bytes[0],bytes[1])
MidiOut.WriteShort(0xAE,bytes[2],bytes[3])
MidiOut.WriteShort(0xAF,bytes[4],bytes[5])
def SetLEDsIgnoreTouch(MidiOut):
MidiOut.WriteShort(0xB0,119,107) # this causes the LEDs to no respond to touch, only MIDI input.
def EnableOnOffOutput(MidiOut):
MidiOut.WriteShort(0xB0,119,120) # now the VMeter will send 127 via ctrl #17 when touched, and 0 when released. 119 disables.
def SendColumn(MidiOut,height):
MidiOut.WriteShort(0xB0,20,height)
def EnablePressureOutput(MidiOut):
MidiOut.WriteShort(0xB0,119,122)
led_array = [1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0,1,0,
1,0,1,0,1,0,1,0]
led_array_deque = deque(led_array)
# Binary clock display.
# Each digit is displayed over 4 LEDs.
# Marker LEDs blink every half second to indicate the position of the digits.
# It displays hours, minutes and seconds, where hours are 24 hour format.
def BinaryClock(MidiOut):
from datetime import datetime
last_cycle_time = 0
led_array = [0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0]
update_time = 0
while True:
if pypm.Time() - last_cycle_time > 500:
last_cycle_time = pypm.Time()
led_array[11] = update_time # marker for minutes, just blinks with seconds
led_array[16] = update_time # marker for minutes, just blinks with seconds
led_array[26] = update_time # marker for hours, just blinks with seconds
led_array[31] = update_time # marker for hours, just blinks with seconds
if update_time == 0:
update_time = 1
else:
update_time = 0
## print "cycle"
seconds = datetime.now().strftime('%S')
seconds_first_digit = int(seconds[0])
seconds_second_digit = int(seconds[1])
minutes = datetime.now().strftime('%M')
minutes_first_digit = int(minutes[0])
minutes_second_digit = int(minutes[1])
hours = datetime.now().strftime('%H')
hours_first_digit = int(hours[0])
hours_seconds_digit = int(hours[1])
temp_counter = seconds_second_digit
for i in range(4):
led_array[i] = 0x01 & temp_counter
temp_counter = temp_counter >> 1
temp_counter = seconds_first_digit
for i in range(4):
led_array[i+4] = 0x01 & temp_counter
temp_counter = temp_counter >> 1
temp_counter = minutes_second_digit
for i in range(4):
led_array[i+12] = 0x01 & temp_counter
temp_counter = temp_counter >> 1
temp_counter = minutes_first_digit
for i in range(4):
led_array[i+17] = 0x01 & temp_counter
temp_counter = temp_counter >> 1
temp_counter = hours_seconds_digit
for i in range(4):
led_array[i+27] = 0x01 & temp_counter
temp_counter = temp_counter >> 1
temp_counter = hours_first_digit
for i in range(4):
led_array[i+32] = 0x01 & temp_counter
temp_counter = temp_counter >> 1
print hours, minutes, seconds
SendArray(led_array, MidiOut)
# A simple binary counter display.
def BinaryCounter(MidiOut):
last_cycle_time = 0
counter = 0
led_array = [0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0]
while True:
if pypm.Time() - last_cycle_time > 30:
## print "cycle"
last_cycle_time = pypm.Time()
temp_counter = counter
counter = counter + 1
for i in range(20):
led_array[i] = 0x01 & temp_counter
temp_counter = temp_counter >> 1
SendArray(led_array, MidiOut)
# A Game of Life simulation is usually performed on a 2D matrix, but here
# we apply similar rules to the 1D VMeter array of LEDs.
# Each cycle, a given LED is turned on or off based on how many of its neighbors
# are on or off.
# Different starting configurations will result in different patterns,
# some die off, some enter into a repeating cycle, and others continue to
# evolve.
# Touching the VMeter will cause the LEDs touched to switch states, which can restart
# a simulation that has died off.
def GameOfLife(MidiOut, MidiIn):
led_array = [1,1,1,1,1,1,1,0,0,0,
0,0,0,0,0,0,1,1,1,1,
0,1,1,0,0,1,1,1,0,0,
0,0,0,1,0,0,0,0]
# led_array = [1,0,1,1,1,1,1,0,0,0,
# 0,0,0,0,0,0,1,1,1,1,
# 0,1,1,0,0,1,1,1,0,0,
# 0,0,0,1,0,0,1,0]
# led_array = [1,0,0,0,0,0,0,0,0,0,
# 0,0,0,0,0,0,1,0,1,0,
# 0,1,0,0,0,1,0,0,0,0,
# 0,0,0,1,0,0,0,0]
last_cycle_time = 0
i = 0
while True:
while MidiIn.Poll(): # invert LEDs where touched
MidiData = MidiIn.Read(1)
if MidiData[0][0][0] == 0xB0:
if MidiData[0][0][1] == 20:
pos = MidiData[0][0][2]
index_pos = int(float(pos) / 127.0 * 37.0)
# print "index pos: ", index_pos
if led_array[index_pos] == 1:
led_array[index_pos] = 0
else:
led_array[index_pos] = 1
if pypm.Time() - last_cycle_time > 100:
last_cycle_time = pypm.Time()
index_array = range(2,35)
new_array = list(led_array)
# copy over 4 edge LEDs since they don't have 4 neighbors.
new_array[0] = led_array[0]
new_array[1] = led_array[1]
new_array[36] = led_array[36]
new_array[37] = led_array[37]
for i in index_array:
sum =led_array[i-2]+led_array[i-1]+led_array[i+1]+led_array[i+2]
if led_array[i] == 1: # live cell
if sum < 1:
new_array[i] = 0 # under population
elif sum < 3:
new_array[i] = 1 # just right
else:
new_array[i] = 0 # overcrowding
else: # dead cell
if sum == 2 or sum == 3:
new_array[i] = 1
else:
new_array[i] = 0
led_array = list(new_array)
SendArray(led_array, MidiOut)
def adjust_speed(new_speed,speed):
# here average the new_speed with the old speed
speed = new_speed * .2 + speed * .8
return speed
# this causes the LEDs to act like a scrolled page on a tablet.
# Simulated acceleration provides a smooth start and stop effect.
def ChaseDemoWithSpeedInput(MidiOut, MidiIn):
x = 1
speed = 500
last_time = 0
last_speed_calc_time = 0
prev_pos = 0
pos = 0
prev_last_input_time = 0
last_input_time = 0
speed = 0.0
new_speed = 0.0
pos_array = [0, 0, 0, 0, 0]
pos_array = deque(pos_array)
time_array = deque([0, 0, 0, 0, 0])
print_time = 0
led_shift_time = 0
touch_state = 0
brake_time = 0
led_deque = deque([1,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0])
SendArray(led_deque, MidiOut)
EnableOnOffOutput(MidiOut)
while True:
while MidiIn.Poll(): # throw out all but the latest input
MidiData = MidiIn.Read(1)
if MidiData[0][0][0] == 0xB0:
if MidiData[0][0][1] == 20:
pos = MidiData[0][0][2]
pos_array.appendleft(pos)
# pos_array.pop()
last_input_time = MidiData[0][1]
time_array.appendleft(last_input_time)
# time_array.pop()
# print(last_input_time)
elif MidiData[0][0][1] == 17: # on / off output. 127 is touch, 0 is release
if MidiData[0][0][2] == 127:
# print "touch"
touch_state = 1
else:
# print "release"
touch_state = 0
if last_input_time > last_speed_calc_time: # calc speed
last_speed_calc_time = pypm.Time()
pos_delta = pos_array[0] - pos_array[4]
time_delta = time_array[0] - time_array[4]
if time_delta > 0:
new_speed = float(pos_delta) / float(time_delta)
speed = adjust_speed(new_speed,speed)
# handle case where VMeter is being touched, but position isn't moving
if touch_state == 1 and pypm.Time() - last_input_time > 100:
# reduce speed to 0
if pypm.Time() - brake_time > 17:
brake_time = pypm.Time()
# print "braking"
speed = adjust_speed(0.0,speed)
if pypm.Time() - print_time > 150:
print_time = pypm.Time()
# if abs(speed) > .01:
# print "speed: ", speed, ", per: ", 1.0 / speed
if pypm.Time() - last_input_time > 100:
# friction braking
speed = adjust_speed(0.0,speed)
if abs(speed) > .001 and pypm.Time() - led_shift_time > int(2.5/abs(speed)):
led_shift_time = pypm.Time()
if speed > 0.0:
led_deque.rotate(1)
else:
led_deque.rotate(-1)
SendArray(led_deque, MidiOut)
# uses the rotate method of a deque to cause the LEDs to chase and wrap around.
def ChaseDemo2():
# led_array = [1,0,1,0,1,0,1,0,1,0,
# 1,0,1,0,1,0,1,0,1,0,
# 1,0,1,0,1,0,1,0,1,0,
# 1,0,1,0,1,0,1,0]
# led_array_deque = deque(led_array)
print(led_array_deque)
SendArray(led_array_deque, MidiOut)
timer_marker = pypm.Time()
while True:
timer_marker = pypm.Time()
while pypm.Time() < timer_marker + 500:
pass
SendArray(led_array_deque, MidiOut)
led_array_deque.rotate(1)
# another LED chasing demo utilizing bit shifting through 2x multiplication
def ChaseDemo(MidiOut):
x = 1;
while True:
MidiTime = pypm.Time()
MidiOut.WriteShort(0xAD,x,x)
MidiOut.WriteShort(0xAE,x,x)
MidiOut.WriteShort(0xAF,x,x)
x = x * 2;
if x == 128:
x = 1;
while pypm.Time() < MidiTime + 100:
pass
# Draws a single LED cursor under the finder position.
def DrawCursor(MidiOut,height): # uses global led_deque
# clear the deque - set all LEDs to off
for i in range(38):
led_array_deque[i] = 0
cursor_pos = int(float(height) / 127.0 * 38.0)
if cursor_pos > 37:
cursor_pos = 37
led_array_deque[cursor_pos] = 1 # turn on one LED
SendArray(led_array_deque, MidiOut)
# draws a bar centered at height position with a given size.
# Kinda like a fat cursor.
def DrawBar(MidiOut,height,size):
# clear the deque - set all LEDs to off
for i in range(38):
led_array_deque[i] = 0
cursor_pos = int(float(height) / 127.0 * 37.0)
lower_limit = cursor_pos - size / 2
if lower_limit < 0:
lower_limit = 0
upper_limit = cursor_pos + size / 2
if upper_limit > 37:
upper_limit = 37
i = lower_limit
while i <= upper_limit:
led_array_deque[i] = 1
i = i + 1
SendArray(led_array_deque, MidiOut)
# this draws a bar where touched instead of a cursor or column.
def DrawBarDemo(size):
# size == 0 --> pressure adj
bar_size = 1
input_pos = 64
while True:
if MidiIn.Poll():
MidiData = MidiIn.Read(1)
#print MidiData[0][0][0]," ",MidiData[0][0][1]," ",MidiData[0][0][2]
if MidiData[0][0][0] == 0xB0:
if MidiData[0][0][1] == 20:
input_pos = MidiData[0][0][2]
# SendColumn(MidiOut,input_pos)
# DrawCursor(MidiOut,input_pos)
if size == 0:
if MidiData[0][0][1] == 18:
bar_size = MidiData[0][0][2] / 6
else:
bar_size = size
DrawBar(MidiOut,input_pos,bar_size)
# main code begins here
pypm.Initialize() # always call this first, or OS may crash when you try to open a stream
PrintDevices(OUTPUT)
dev = int(raw_input("Type output number: "))
MidiOut = pypm.Output(dev, 0)
PrintDevices(INPUT)
dev = int(raw_input("Type input number: "))
MidiIn = pypm.Input(dev)
# turn off internal LED finger tracking
SetLEDsIgnoreTouch(MidiOut)
# set initial column
SendColumn(MidiOut,45)
# turn on pressure output
EnablePressureOutput(MidiOut)
demo_choice = int(raw_input("""
Choose a demo:
1) Cursor tracks finger position
2) Cursor size adjusts based on pressure
3) Monitor CPU level
4) Binary Counter
5) Binary Clock
6) Chase
7) Scrollable treadmill
8) Game of Life
"""))
if demo_choice == 1:
DrawBarDemo(1)
elif demo_choice == 2:
DrawBarDemo(0) # input 0 to adjust cursor size with pressure
elif demo_choice == 3:
MonitorCpu(MidiOut)
elif demo_choice == 4:
BinaryCounter(MidiOut)
elif demo_choice == 5:
BinaryClock(MidiOut)
elif demo_choice == 6:
ChaseDemo(MidiOut)
elif demo_choice == 7:
ChaseDemoWithSpeedInput(MidiOut,MidiIn)
elif demo_choice == 8:
GameOfLife(MidiOut, MidiIn)
# be sure to try out different starting patterns
dummy = raw_input("ready to close and terminate... (type RETURN):")
del MidiOut
del MidiIn
pypm.Terminate()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Job(Resource):
"""Contains information about the job.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The ID of the resource
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource
:vartype type: str
:ivar location: The location of the resource
:vartype location: str
:ivar tags: The tags of the resource
:vartype tags: dict
:param experiment_name: Describe the experiment information of the job
:type experiment_name: str
:param priority: Priority associated with the job. Priority associated
with the job. Priority values can range from -1000 to 1000, with -1000
being the lowest priority and 1000 being the highest priority. The default
value is 0. Default value: 0 .
:type priority: int
:param cluster: Specifies the Id of the cluster on which this job will
run.
:type cluster: :class:`ResourceId <azure.mgmt.batchai.models.ResourceId>`
:param node_count: Number of compute nodes to run the job on. The job will
be gang scheduled on that many compute nodes
:type node_count: int
:param container_settings: If provided the job will run in the specified
container. If the container was downloaded as part of cluster setup then
the same container image will be used. If not provided, the job will run
on the VM.
:type container_settings: :class:`ContainerSettings
<azure.mgmt.batchai.models.ContainerSettings>`
:param tool_type: The toolkit type of this job. Possible values are: cntk,
tensorflow, caffe, caffe2, chainer, custom. Possible values include:
'cntk', 'tensorflow', 'caffe', 'caffe2', 'chainer', 'custom'
:type tool_type: str or :class:`ToolType
<azure.mgmt.batchai.models.ToolType>`
:param cntk_settings: Specifies the settings for CNTK (aka Microsoft
Cognitive Toolkit) job.
:type cntk_settings: :class:`CNTKsettings
<azure.mgmt.batchai.models.CNTKsettings>`
:param tensor_flow_settings: Specifies the settings for Tensor Flow job.
:type tensor_flow_settings: :class:`TensorFlowSettings
<azure.mgmt.batchai.models.TensorFlowSettings>`
:param caffe_settings: Specifies the settings for Caffe job.
:type caffe_settings: :class:`CaffeSettings
<azure.mgmt.batchai.models.CaffeSettings>`
:param chainer_settings: Specifies the settings for Chainer job.
:type chainer_settings: :class:`ChainerSettings
<azure.mgmt.batchai.models.ChainerSettings>`
:param custom_toolkit_settings: Specifies the settings for custom tool kit
job.
:type custom_toolkit_settings: :class:`CustomToolkitSettings
<azure.mgmt.batchai.models.CustomToolkitSettings>`
:param job_preparation: Specifies the actions to be performed before tool
kit is launched. The specified actions will run on all the nodes that are
part of the job
:type job_preparation: :class:`JobPreparation
<azure.mgmt.batchai.models.JobPreparation>`
:param std_out_err_path_prefix: The path where the Batch AI service will
upload stdout and stderror of the job.
:type std_out_err_path_prefix: str
:param input_directories: Specifies the list of input directories for the
Job.
:type input_directories: list of :class:`InputDirectory
<azure.mgmt.batchai.models.InputDirectory>`
:param output_directories: Specifies the list of output directories where
the models will be created. .
:type output_directories: list of :class:`OutputDirectory
<azure.mgmt.batchai.models.OutputDirectory>`
:param environment_variables: Additional environment variables to be
passed to the job. Batch AI services sets the following environment
variables for all jobs: AZ_BATCHAI_INPUT_id, AZ_BATCHAI_OUTPUT_id,
AZ_BATCHAI_NUM_GPUS_PER_NODE, For distributed TensorFlow jobs, following
additional environment variables are set by the Batch AI Service:
AZ_BATCHAI_PS_HOSTS, AZ_BATCHAI_WORKER_HOSTS.
:type environment_variables: list of :class:`EnvironmentSetting
<azure.mgmt.batchai.models.EnvironmentSetting>`
:param constraints: Constraints associated with the Job.
:type constraints: :class:`JobPropertiesConstraints
<azure.mgmt.batchai.models.JobPropertiesConstraints>`
:ivar creation_time: The job creation time. The creation time of the job.
:vartype creation_time: datetime
:ivar provisioning_state: The provisioned state of the Batch AI job.
Possible values include: 'creating', 'succeeded', 'failed', 'deleting'
:vartype provisioning_state: str or :class:`ProvisioningState
<azure.mgmt.batchai.models.ProvisioningState>`
:ivar provisioning_state_transition_time: The time at which the job
entered its current provisioning state. The time at which the job entered
its current provisioning state.
:vartype provisioning_state_transition_time: datetime
:param execution_state: The current state of the job. The current state of
the job. Possible values are: queued - The job is queued and able to run.
A job enters this state when it is created, or when it is awaiting a retry
after a failed run. running - The job is running on a compute cluster.
This includes job-level preparation such as downloading resource files or
set up container specified on the job - it does not necessarily mean that
the job command line has started executing. terminating - The job is
terminated by the user, the terminate operation is in progress. succeeded
- The job has completed running succesfully and exited with exit code 0.
failed - The job has finished unsuccessfully (failed with a non-zero exit
code) and has exhausted its retry limit. A job is also marked as failed if
an error occurred launching the job. Possible values include: 'queued',
'running', 'terminating', 'succeeded', 'failed'
:type execution_state: str or :class:`ExecutionState
<azure.mgmt.batchai.models.ExecutionState>`
:ivar execution_state_transition_time: The time at which the job entered
its current execution state. The time at which the job entered its current
execution state.
:vartype execution_state_transition_time: datetime
:param execution_info: Contains information about the execution of a job
in the Azure Batch service.
:type execution_info: :class:`JobPropertiesExecutionInfo
<azure.mgmt.batchai.models.JobPropertiesExecutionInfo>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
'tags': {'readonly': True},
'creation_time': {'readonly': True},
'provisioning_state': {'readonly': True},
'provisioning_state_transition_time': {'readonly': True},
'execution_state_transition_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'experiment_name': {'key': 'properties.experimentName', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'cluster': {'key': 'properties.cluster', 'type': 'ResourceId'},
'node_count': {'key': 'properties.nodeCount', 'type': 'int'},
'container_settings': {'key': 'properties.containerSettings', 'type': 'ContainerSettings'},
'tool_type': {'key': 'properties.toolType', 'type': 'str'},
'cntk_settings': {'key': 'properties.cntkSettings', 'type': 'CNTKsettings'},
'tensor_flow_settings': {'key': 'properties.tensorFlowSettings', 'type': 'TensorFlowSettings'},
'caffe_settings': {'key': 'properties.caffeSettings', 'type': 'CaffeSettings'},
'chainer_settings': {'key': 'properties.chainerSettings', 'type': 'ChainerSettings'},
'custom_toolkit_settings': {'key': 'properties.customToolkitSettings', 'type': 'CustomToolkitSettings'},
'job_preparation': {'key': 'properties.jobPreparation', 'type': 'JobPreparation'},
'std_out_err_path_prefix': {'key': 'properties.stdOutErrPathPrefix', 'type': 'str'},
'input_directories': {'key': 'properties.inputDirectories', 'type': '[InputDirectory]'},
'output_directories': {'key': 'properties.outputDirectories', 'type': '[OutputDirectory]'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '[EnvironmentSetting]'},
'constraints': {'key': 'properties.constraints', 'type': 'JobPropertiesConstraints'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'},
'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'},
'execution_state': {'key': 'properties.executionState', 'type': 'ExecutionState'},
'execution_state_transition_time': {'key': 'properties.executionStateTransitionTime', 'type': 'iso-8601'},
'execution_info': {'key': 'properties.executionInfo', 'type': 'JobPropertiesExecutionInfo'},
}
def __init__(self, experiment_name=None, priority=0, cluster=None, node_count=None, container_settings=None, tool_type=None, cntk_settings=None, tensor_flow_settings=None, caffe_settings=None, chainer_settings=None, custom_toolkit_settings=None, job_preparation=None, std_out_err_path_prefix=None, input_directories=None, output_directories=None, environment_variables=None, constraints=None, execution_state=None, execution_info=None):
super(Job, self).__init__()
self.experiment_name = experiment_name
self.priority = priority
self.cluster = cluster
self.node_count = node_count
self.container_settings = container_settings
self.tool_type = tool_type
self.cntk_settings = cntk_settings
self.tensor_flow_settings = tensor_flow_settings
self.caffe_settings = caffe_settings
self.chainer_settings = chainer_settings
self.custom_toolkit_settings = custom_toolkit_settings
self.job_preparation = job_preparation
self.std_out_err_path_prefix = std_out_err_path_prefix
self.input_directories = input_directories
self.output_directories = output_directories
self.environment_variables = environment_variables
self.constraints = constraints
self.creation_time = None
self.provisioning_state = None
self.provisioning_state_transition_time = None
self.execution_state = execution_state
self.execution_state_transition_time = None
self.execution_info = execution_info
|
|
# -*- coding: utf-8 -*-
#
# Collection of functions related to VCF files
#
# 1 based
from future.utils import lmap
from past.builtins import xrange
from collections import namedtuple
import re
from . import g2g
from . import g2g_utils
from . import exceptions
VCF_FIELDS = ['chrom', 'pos', 'id', 'ref', 'alt', 'qual', 'filter', 'info', 'format', 'samples']
VCFRecord = namedtuple('VCFRecord', VCF_FIELDS)
GT_DATA_FIELDS = ['ref', 'left', 'right', 'gt', 'fi', 'phase', 'gt_left', 'gt_right', 'is_snp']
GTData = namedtuple('GTData', GT_DATA_FIELDS)
GENOTYPE_UNPHASED = '/'
GENOTYPE_PHASED = '|'
REGEX_ALT = re.compile("(^[A|C|G|T]+)")
LOG = g2g.get_logger()
class VCFFile(object):
"""
Simple VCF object for parsing VCF files
"""
def __init__(self, file_name):
if not file_name:
raise exceptions.G2GVCFError("A filename must be supplied")
self.file_name = file_name
self.samples = None
self.current_line = None
self.current_record = None
self.reader = g2g_utils.open_resource(file_name)
self._parse_header()
def _parse_header(self):
self.current_line = self.reader.next()
while self.current_line.startswith('##'):
self.current_line = self.reader.next()
if self.current_line.startswith('#'):
elems = self.current_line.strip().split('\t')
samples = elems[9:]
self.samples = dict(zip(samples, (x for x in xrange(len(samples)))))
else:
raise exceptions.G2GVCFError("Improperly formatted VCF file")
def parse_gt(self, sample):
if sample is None:
raise exceptions.G2GVCFError("Sample must contain a value")
sample_index = self.get_sample_index(sample)
return parse_gt(self.current_record, sample_index)
def __iter__(self):
return self
def next(self):
self.current_line = self.reader.next()
while self.current_line.startswith("#"):
self.current_line = self.reader.next()
self.current_record = parse_vcf_line(self.current_line)
return self.current_record
def get_sample_index(self, sample):
if sample is None:
raise exceptions.G2GVCFError("Sample must contain a value")
if sample in self.samples:
return self.samples[sample]
parse_vcf_line()
raise exceptions.G2GVCFError("Unknown sample: '{0}'".format(sample))
def parse_vcf_line(line):
"""
Parse a line in the VCF file.
:param line: a line from the VCF file
:type line: str
:return: :class:`.vcf.VCFRecord`
"""
if isinstance(line, str):
if line.startswith('#'):
return None
elem = line.strip().split('\t')
elif isinstance(line, list):
elem = line
try:
quality = int(elem[5])
except ValueError:
try:
quality = float(elem[5])
except ValueError:
quality = None
filter_field = None
if elem[6] != '.':
filter_field = elem[6].split(';')
info = elem[7]
try:
fmt = elem[8]
except IndexError:
fmt = None
else:
if fmt == '.':
fmt = None
return VCFRecord(elem[0], int(elem[1]), None if elem[2] == '.' else elem[2], elem[3],
elem[4].split(','), quality, filter_field, info, fmt, elem[9:])
def parse_gt(vcf_record, sample_index):
"""
Parse the GT field within the VCF line.
:param vcf_record: the VCF record
:type vcf_record: :class:`.vcf.VCFRecord`
:param sample_index: the strain or sample index
:type sample_index: int
:return: :class:`.vcf.GTData`
"""
if sample_index is None:
raise exceptions.G2GVCFError("Sample index must contain a value")
sample_data = vcf_record.samples[sample_index]
gt = None
fi = None
left = None
right = None
phase = None
# check for to see if ALT is <CN*> or something not ACGT
if vcf_record.alt.find('<') == -1 and sample_data != '.':
#if sample_data != '.':
gt_index = vcf_record.format.split(':').index('GT')
fi_index = vcf_record.format.split(':').index('FI')
try:
# parse the GT field
gt = sample_data.split(':')[gt_index]
# make sure a call can be made
if gt != '.' and gt != './.' and gt != '.|.':
if GENOTYPE_PHASED in gt:
genotypes = lmap(int, gt.split(GENOTYPE_PHASED))
phase = GENOTYPE_PHASED
elif GENOTYPE_UNPHASED in gt:
genotypes = lmap(int, gt.split(GENOTYPE_UNPHASED))
phase = GENOTYPE_UNPHASED
else:
raise ValueError("Unknown phase in GT, {0}".format(gt))
# assuming no triploids for now
if genotypes[0] == 0:
left = vcf_record.ref
else:
left = vcf_record.alt[genotypes[0]-1]
if genotypes[1] == 0:
right = vcf_record.ref
else:
right = vcf_record.alt[genotypes[1]-1]
gt_left = genotypes[0]
gt_right = genotypes[1]
# check for to see if ALT is <CN*> or something not ACGT
if not REGEX_ALT.match(gt_left):
left = None
gt_left = None
if not REGEX_ALT.match(gt_right):
right = None
gt_right = None
except ValueError as ve:
LOG.debug(ve)
except IndexError as ie:
LOG.debug(ie)
try:
fi = sample_data.split(':')[fi_index]
except ValueError as ve:
LOG.debug(ve)
except IndexError as ie:
LOG.debug(ie)
is_snp = len(vcf_record.REF) == 1 == (len(left) if left else 0) == (len(right) if right else 0)
return GTData(vcf_record.REF, left, right, gt, fi, phase, gt_left, gt_right, is_snp)
def parse_gt_tuple(vcf_record, sample_index):
"""
Parse the GT field within the VCF line.
"""
if sample_index is None:
raise exceptions.G2GVCFError("Sample index must contain a value")
sample_data = vcf_record[sample_index]
gt = None
fi = None
left = None
right = None
phase = None
gt_left = None
gt_right = None
# check for to see if ALT is <CN*> or something not ACGT
if vcf_record.alt.find('<') == -1 and sample_data != '.':
formats = vcf_record.format.split(':')
gt_index = formats.index('GT')
fi_index = formats.index('FI') if 'FI' in formats else None
try:
# parse the GT field
gt = sample_data.split(':')[gt_index]
# make sure a call can be made
if gt != '.' and gt != './.' and gt != '.|.':
if GENOTYPE_PHASED in gt:
genotypes = lmap(int, gt.split(GENOTYPE_PHASED))
phase = GENOTYPE_PHASED
elif GENOTYPE_UNPHASED in gt:
genotypes = lmap(int, gt.split(GENOTYPE_UNPHASED))
phase = GENOTYPE_UNPHASED
else:
raise ValueError("Unknown phase in GT, {0}".format(gt))
# assuming no triploids for now
if genotypes[0] == 0:
left = vcf_record.ref
else:
left = vcf_record.alt.split(',')[genotypes[0]-1]
if genotypes[1] == 0:
right = vcf_record.ref
else:
right = vcf_record.alt.split(',')[genotypes[1]-1]
gt_left = genotypes[0]
gt_right = genotypes[1]
# check for to see if ALT is <CN*> or something not ACGT
#if not REGEX_ALT.match(left) or not REGEX_ALT.match(right):
# LOG.error("VFC2VCI CN FOUND")
# gt = None
# fi = None
# left = None
# right = None
# phase = None
# gt_left = None
# gt_right = None
except ValueError as ve:
LOG.debug(ve)
except IndexError as ie:
LOG.debug(ie)
try:
if fi_index:
fi = sample_data.split(':')[fi_index]
except ValueError as ve:
LOG.debug(ve)
except IndexError as ie:
LOG.debug(ie)
is_snp = len(vcf_record.ref) == 1 == (len(left) if left else 0) == (len(right) if right else 0)
return GTData(vcf_record.ref, left, right, gt, fi, phase, gt_left, gt_right, is_snp)
|
|
#!/usr/bin/env python
# generator.py
# simple C++ generator, originally targetted for Spidermonkey bindings
#
# Copyright (c) 2011 - Zynga Inc.
from clang import cindex
import sys
import pdb
import ConfigParser
import yaml
import re
import os
import inspect
import traceback
from Cheetah.Template import Template
type_map = {
cindex.TypeKind.VOID : "void",
cindex.TypeKind.BOOL : "bool",
cindex.TypeKind.CHAR_U : "unsigned char",
cindex.TypeKind.UCHAR : "unsigned char",
cindex.TypeKind.CHAR16 : "char",
cindex.TypeKind.CHAR32 : "char",
cindex.TypeKind.USHORT : "unsigned short",
cindex.TypeKind.UINT : "unsigned int",
cindex.TypeKind.ULONG : "unsigned long",
cindex.TypeKind.ULONGLONG : "unsigned long long",
cindex.TypeKind.CHAR_S : "char",
cindex.TypeKind.SCHAR : "char",
cindex.TypeKind.WCHAR : "wchar_t",
cindex.TypeKind.SHORT : "short",
cindex.TypeKind.INT : "int",
cindex.TypeKind.LONG : "long",
cindex.TypeKind.LONGLONG : "long long",
cindex.TypeKind.FLOAT : "float",
cindex.TypeKind.DOUBLE : "double",
cindex.TypeKind.LONGDOUBLE : "long double",
cindex.TypeKind.NULLPTR : "NULL",
cindex.TypeKind.OBJCID : "id",
cindex.TypeKind.OBJCCLASS : "class",
cindex.TypeKind.OBJCSEL : "SEL",
# cindex.TypeKind.ENUM : "int"
}
INVALID_NATIVE_TYPE = "??"
default_arg_type_arr = [
# An integer literal.
cindex.CursorKind.INTEGER_LITERAL,
# A floating point number literal.
cindex.CursorKind.FLOATING_LITERAL,
# An imaginary number literal.
cindex.CursorKind.IMAGINARY_LITERAL,
# A string literal.
cindex.CursorKind.STRING_LITERAL,
# A character literal.
cindex.CursorKind.CHARACTER_LITERAL,
# [C++ 2.13.5] C++ Boolean Literal.
cindex.CursorKind.CXX_BOOL_LITERAL_EXPR,
# [C++0x 2.14.7] C++ Pointer Literal.
cindex.CursorKind.CXX_NULL_PTR_LITERAL_EXPR,
# An expression that refers to some value declaration, such as a function,
# varible, or enumerator.
cindex.CursorKind.DECL_REF_EXPR
]
def native_name_from_type(ntype, underlying=False):
kind = ntype.kind #get_canonical().kind
const = "" #"const " if ntype.is_const_qualified() else ""
if not underlying and kind == cindex.TypeKind.ENUM:
decl = ntype.get_declaration()
return get_namespaced_name(decl)
elif kind in type_map:
return const + type_map[kind]
elif kind == cindex.TypeKind.RECORD:
# might be an std::string
decl = ntype.get_declaration()
parent = decl.semantic_parent
cdecl = ntype.get_canonical().get_declaration()
cparent = cdecl.semantic_parent
if decl.spelling == "string" and parent and parent.spelling == "std":
return "std::string"
elif cdecl.spelling == "function" and cparent and cparent.spelling == "std":
return "std::function"
else:
# print >> sys.stderr, "probably a function pointer: " + str(decl.spelling)
return const + decl.spelling
else:
# name = ntype.get_declaration().spelling
# print >> sys.stderr, "Unknown type: " + str(kind) + " " + str(name)
return INVALID_NATIVE_TYPE
# pdb.set_trace()
def build_namespace(cursor, namespaces=[]):
'''
build the full namespace for a specific cursor
'''
if cursor:
parent = cursor.semantic_parent
if parent:
if parent.kind == cindex.CursorKind.NAMESPACE or parent.kind == cindex.CursorKind.CLASS_DECL:
namespaces.append(parent.displayname)
build_namespace(parent, namespaces)
return namespaces
def get_namespaced_name(declaration_cursor):
ns_list = build_namespace(declaration_cursor, [])
ns_list.reverse()
ns = "::".join(ns_list)
if len(ns) > 0:
return ns + "::" + declaration_cursor.displayname
return declaration_cursor.displayname
def generate_namespace_list(cursor, namespaces=[]):
'''
build the full namespace for a specific cursor
'''
if cursor:
parent = cursor.semantic_parent
if parent:
if parent.kind == cindex.CursorKind.NAMESPACE or parent.kind == cindex.CursorKind.CLASS_DECL:
if parent.kind == cindex.CursorKind.NAMESPACE:
namespaces.append(parent.displayname)
generate_namespace_list(parent, namespaces)
return namespaces
def get_namespace_name(declaration_cursor):
ns_list = generate_namespace_list(declaration_cursor, [])
ns_list.reverse()
ns = "::".join(ns_list)
if len(ns) > 0:
return ns + "::"
return declaration_cursor.displayname
class NativeType(object):
def __init__(self):
self.is_object = False
self.is_function = False
self.is_enum = False
self.not_supported = False
self.param_types = []
self.ret_type = None
self.namespaced_name = ""
self.namespace_name = ""
self.name = ""
self.whole_name = None
self.is_const = False
self.is_pointer = False
self.canonical_type = None
@staticmethod
def from_type(ntype):
if ntype.kind == cindex.TypeKind.POINTER:
nt = NativeType.from_type(ntype.get_pointee())
if None != nt.canonical_type:
nt.canonical_type.name += "*"
nt.canonical_type.namespaced_name += "*"
nt.canonical_type.whole_name += "*"
nt.name += "*"
nt.namespaced_name += "*"
nt.whole_name = nt.namespaced_name
nt.is_enum = False
nt.is_const = ntype.get_pointee().is_const_qualified()
nt.is_pointer = True
if nt.is_const:
nt.whole_name = "const " + nt.whole_name
elif ntype.kind == cindex.TypeKind.LVALUEREFERENCE:
nt = NativeType.from_type(ntype.get_pointee())
nt.is_const = ntype.get_pointee().is_const_qualified()
nt.whole_name = nt.namespaced_name + "&"
if nt.is_const:
nt.whole_name = "const " + nt.whole_name
if None != nt.canonical_type:
nt.canonical_type.whole_name += "&"
else:
nt = NativeType()
decl = ntype.get_declaration()
if ntype.kind == cindex.TypeKind.RECORD:
if decl.kind == cindex.CursorKind.CLASS_DECL:
nt.is_object = True
nt.name = decl.displayname
nt.namespaced_name = get_namespaced_name(decl)
nt.namespace_name = get_namespace_name(decl)
nt.whole_name = nt.namespaced_name
else:
if decl.kind == cindex.CursorKind.NO_DECL_FOUND:
nt.name = native_name_from_type(ntype)
else:
nt.name = decl.spelling
nt.namespaced_name = get_namespaced_name(decl)
nt.namespace_name = get_namespace_name(decl)
if nt.namespaced_name == "std::string":
nt.name = nt.namespaced_name
if nt.namespaced_name.startswith("std::function"):
nt.name = "std::function"
if len(nt.namespaced_name) == 0 or nt.namespaced_name.find("::") == -1:
nt.namespaced_name = nt.name
nt.whole_name = nt.namespaced_name
nt.is_const = ntype.is_const_qualified()
if nt.is_const:
nt.whole_name = "const " + nt.whole_name
# Check whether it's a std::function typedef
cdecl = ntype.get_canonical().get_declaration()
if None != cdecl.spelling and 0 == cmp(cdecl.spelling, "function"):
nt.name = "std::function"
if nt.name != INVALID_NATIVE_TYPE and nt.name != "std::string" and nt.name != "std::function":
if ntype.kind == cindex.TypeKind.UNEXPOSED or ntype.kind == cindex.TypeKind.TYPEDEF:
ret = NativeType.from_type(ntype.get_canonical())
if ret.name != "":
if decl.kind == cindex.CursorKind.TYPEDEF_DECL:
ret.canonical_type = nt
return ret
nt.is_enum = ntype.get_canonical().kind == cindex.TypeKind.ENUM
if nt.name == "std::function":
nt.namespaced_name = get_namespaced_name(cdecl)
r = re.compile('function<(.+) \((.*)\)>').search(cdecl.displayname)
(ret_type, params) = r.groups()
params = filter(None, params.split(", "))
nt.is_function = True
nt.ret_type = NativeType.from_string(ret_type)
nt.param_types = [NativeType.from_string(string) for string in params]
# mark argument as not supported
if nt.name == INVALID_NATIVE_TYPE:
nt.not_supported = True
return nt
@staticmethod
def from_string(displayname):
displayname = displayname.replace(" *", "*")
nt = NativeType()
nt.name = displayname.split("::")[-1]
nt.namespaced_name = displayname
nt.whole_name = nt.namespaced_name
nt.is_object = True
return nt
@property
def lambda_parameters(self):
params = ["%s larg%d" % (str(nt), i) for i, nt in enumerate(self.param_types)]
return ", ".join(params)
@staticmethod
def dict_has_key_re(dict, real_key_list):
for real_key in real_key_list:
for (k, v) in dict.items():
if k.startswith('@'):
k = k[1:]
match = re.match("^" + k + "$", real_key)
if match:
return True
else:
if k == real_key:
return True
return False
@staticmethod
def dict_get_value_re(dict, real_key_list):
for real_key in real_key_list:
for (k, v) in dict.items():
if k.startswith('@'):
k = k[1:]
match = re.match("^" + k + "$", real_key)
if match:
return v
else:
if k == real_key:
return v
return None
@staticmethod
def dict_replace_value_re(dict, real_key_list):
for real_key in real_key_list:
for (k, v) in dict.items():
if k.startswith('@'):
k = k[1:]
match = re.match('.*' + k, real_key)
if match:
return re.sub(k, v, real_key)
else:
if k == real_key:
return v
return None
def from_native(self, convert_opts):
assert(convert_opts.has_key('generator'))
generator = convert_opts['generator']
keys = []
if self.canonical_type != None:
keys.append(self.canonical_type.name)
keys.append(self.name)
from_native_dict = generator.config['conversions']['from_native']
if self.is_object:
if not NativeType.dict_has_key_re(from_native_dict, keys):
keys.append("object")
elif self.is_enum:
keys.append("int")
if NativeType.dict_has_key_re(from_native_dict, keys):
tpl = NativeType.dict_get_value_re(from_native_dict, keys)
tpl = Template(tpl, searchList=[convert_opts])
return str(tpl).rstrip()
return "#pragma warning NO CONVERSION FROM NATIVE FOR " + self.name
def to_native(self, convert_opts):
assert('generator' in convert_opts)
generator = convert_opts['generator']
keys = []
if self.canonical_type != None:
keys.append(self.canonical_type.name)
keys.append(self.name)
to_native_dict = generator.config['conversions']['to_native']
if self.is_object:
if not NativeType.dict_has_key_re(to_native_dict, keys):
keys.append("object")
elif self.is_enum:
keys.append("int")
if self.is_function:
tpl = Template(file=os.path.join(generator.target, "templates", "lambda.c"),
searchList=[convert_opts, self])
indent = convert_opts['level'] * "\t"
return str(tpl).replace("\n", "\n" + indent)
if NativeType.dict_has_key_re(to_native_dict, keys):
tpl = NativeType.dict_get_value_re(to_native_dict, keys)
tpl = Template(tpl, searchList=[convert_opts])
return str(tpl).rstrip()
return "#pragma warning NO CONVERSION TO NATIVE FOR " + self.name
def to_string(self, generator):
conversions = generator.config['conversions']
if conversions.has_key('native_types'):
native_types_dict = conversions['native_types']
if NativeType.dict_has_key_re(native_types_dict, [self.namespaced_name]):
return NativeType.dict_get_value_re(native_types_dict, [self.namespaced_name])
name = self.namespaced_name
to_native_dict = generator.config['conversions']['to_native']
from_native_dict = generator.config['conversions']['from_native']
use_typedef = False
typedef_name = self.canonical_type.name if None != self.canonical_type else None
if None != typedef_name:
if NativeType.dict_has_key_re(to_native_dict, [typedef_name]) or NativeType.dict_has_key_re(from_native_dict, [typedef_name]):
use_typedef = True
if use_typedef and self.canonical_type:
name = self.canonical_type.namespaced_name
return "const " + name if (self.is_pointer and self.is_const) else name
def get_whole_name(self, generator):
conversions = generator.config['conversions']
to_native_dict = conversions['to_native']
from_native_dict = conversions['from_native']
use_typedef = False
name = self.whole_name
typedef_name = self.canonical_type.name if None != self.canonical_type else None
if None != typedef_name:
if NativeType.dict_has_key_re(to_native_dict, [typedef_name]) or NativeType.dict_has_key_re(from_native_dict, [typedef_name]):
use_typedef = True
if use_typedef and self.canonical_type:
name = self.canonical_type.whole_name
to_replace = None
if conversions.has_key('native_types'):
native_types_dict = conversions['native_types']
to_replace = NativeType.dict_replace_value_re(native_types_dict, [name])
if to_replace:
name = to_replace
return name
def __str__(self):
return self.canonical_type.whole_name if None != self.canonical_type else self.whole_name
class NativeField(object):
def __init__(self, cursor):
cursor = cursor.canonical
self.cursor = cursor
self.name = cursor.displayname
self.kind = cursor.type.kind
self.location = cursor.location
member_field_re = re.compile('m_(\w+)')
match = member_field_re.match(self.name)
if match:
self.pretty_name = match.group(1)
else:
self.pretty_name = self.name
# return True if found default argument.
def iterate_param_node(param_node, depth=1):
for node in param_node.get_children():
# print(">"*depth+" "+str(node.kind))
if node.kind in default_arg_type_arr:
return True
if iterate_param_node(node, depth + 1):
return True
return False
class NativeFunction(object):
def __init__(self, cursor):
self.cursor = cursor
self.func_name = cursor.spelling
self.signature_name = self.func_name
self.arguments = []
self.static = cursor.kind == cindex.CursorKind.CXX_METHOD and cursor.is_static_method()
self.implementations = []
self.is_constructor = False
self.not_supported = False
self.is_override = False
self.ret_type = NativeType.from_type(cursor.result_type)
# parse the arguments
# if self.func_name == "spriteWithFile":
# pdb.set_trace()
for arg in cursor.type.argument_types():
nt = NativeType.from_type(arg)
self.arguments.append(nt)
# mark the function as not supported if at least one argument is not supported
if nt.not_supported:
self.not_supported = True
found_default_arg = False
index = -1
for arg_node in self.cursor.get_children():
if arg_node.kind == cindex.CursorKind.CXX_OVERRIDE_ATTR:
self.is_override = True
if arg_node.kind == cindex.CursorKind.PARM_DECL:
index += 1
if iterate_param_node(arg_node):
found_default_arg = True
break
self.min_args = index if found_default_arg else len(self.arguments)
def generate_code(self, current_class=None, generator=None, is_override=False):
gen = current_class.generator if current_class else generator
config = gen.config
tpl = Template(file=os.path.join(gen.target, "templates", "function.h"),
searchList=[current_class, self])
if not is_override:
gen.head_file.write(str(tpl))
if self.static:
if config['definitions'].has_key('sfunction'):
tpl = Template(config['definitions']['sfunction'],
searchList=[current_class, self])
self.signature_name = str(tpl)
tpl = Template(file=os.path.join(gen.target, "templates", "sfunction.c"),
searchList=[current_class, self])
else:
if not self.is_constructor:
if config['definitions'].has_key('ifunction'):
tpl = Template(config['definitions']['ifunction'],
searchList=[current_class, self])
self.signature_name = str(tpl)
else:
if config['definitions'].has_key('constructor'):
tpl = Template(config['definitions']['constructor'],
searchList=[current_class, self])
self.signature_name = str(tpl)
if self.is_constructor and gen.script_type == "spidermonkey" :
tpl = Template(file=os.path.join(gen.target, "templates", "constructor.c"),
searchList=[current_class, self])
else :
tpl = Template(file=os.path.join(gen.target, "templates", "ifunction.c"),
searchList=[current_class, self])
if not is_override:
gen.impl_file.write(str(tpl))
apidoc_function_script = Template(file=os.path.join(gen.target,
"templates",
"apidoc_function.script"),
searchList=[current_class, self])
if gen.script_type == "spidermonkey":
gen.doc_file.write(str(apidoc_function_script))
else:
if gen.script_type == "lua" and current_class != None :
current_class.doc_func_file.write(str(apidoc_function_script))
class NativeOverloadedFunction(object):
def __init__(self, func_array):
self.implementations = func_array
self.func_name = func_array[0].func_name
self.signature_name = self.func_name
self.min_args = 100
self.is_constructor = False
for m in func_array:
self.min_args = min(self.min_args, m.min_args)
def append(self, func):
self.min_args = min(self.min_args, func.min_args)
self.implementations.append(func)
def generate_code(self, current_class=None, is_override=False):
gen = current_class.generator
config = gen.config
static = self.implementations[0].static
tpl = Template(file=os.path.join(gen.target, "templates", "function.h"),
searchList=[current_class, self])
if not is_override:
gen.head_file.write(str(tpl))
if static:
if config['definitions'].has_key('sfunction'):
tpl = Template(config['definitions']['sfunction'],
searchList=[current_class, self])
self.signature_name = str(tpl)
tpl = Template(file=os.path.join(gen.target, "templates", "sfunction_overloaded.c"),
searchList=[current_class, self])
else:
if not self.is_constructor:
if config['definitions'].has_key('ifunction'):
tpl = Template(config['definitions']['ifunction'],
searchList=[current_class, self])
self.signature_name = str(tpl)
else:
if config['definitions'].has_key('constructor'):
tpl = Template(config['definitions']['constructor'],
searchList=[current_class, self])
self.signature_name = str(tpl)
tpl = Template(file=os.path.join(gen.target, "templates", "ifunction_overloaded.c"),
searchList=[current_class, self])
if not is_override:
gen.impl_file.write(str(tpl))
if current_class != None:
if gen.script_type == "lua":
apidoc_function_overload_script = Template(file=os.path.join(gen.target,
"templates",
"apidoc_function_overload.script"),
searchList=[current_class, self])
current_class.doc_func_file.write(str(apidoc_function_overload_script))
else:
if gen.script_type == "spidermonkey":
apidoc_function_overload_script = Template(file=os.path.join(gen.target,
"templates",
"apidoc_function_overload.script"),
searchList=[current_class, self])
gen.doc_file.write(str(apidoc_function_overload_script))
class NativeClass(object):
def __init__(self, cursor, generator):
# the cursor to the implementation
self.cursor = cursor
self.class_name = cursor.displayname
self.is_ref_class = self.class_name == "Ref"
self.namespaced_class_name = self.class_name
self.parents = []
self.fields = []
self.methods = {}
self.static_methods = {}
self.generator = generator
self.is_abstract = self.class_name in generator.abstract_classes
self._current_visibility = cindex.AccessSpecifierKind.PRIVATE
#for generate lua api doc
self.override_methods = {}
self.has_constructor = False
self.namespace_name = ""
registration_name = generator.get_class_or_rename_class(self.class_name)
if generator.remove_prefix:
self.target_class_name = re.sub('^' + generator.remove_prefix, '', registration_name)
else:
self.target_class_name = registration_name
self.namespaced_class_name = get_namespaced_name(cursor)
self.namespace_name = get_namespace_name(cursor)
self.parse()
@property
def underlined_class_name(self):
return self.namespaced_class_name.replace("::", "_")
def parse(self):
'''
parse the current cursor, getting all the necesary information
'''
self._deep_iterate(self.cursor)
def methods_clean(self):
'''
clean list of methods (without the ones that should be skipped)
'''
ret = []
for name, impl in self.methods.iteritems():
should_skip = False
if name == 'constructor':
should_skip = True
else:
if self.generator.should_skip(self.class_name, name):
should_skip = True
if not should_skip:
ret.append({"name": name, "impl": impl})
return ret
def static_methods_clean(self):
'''
clean list of static methods (without the ones that should be skipped)
'''
ret = []
for name, impl in self.static_methods.iteritems():
should_skip = self.generator.should_skip(self.class_name, name)
if not should_skip:
ret.append({"name": name, "impl": impl})
return ret
def override_methods_clean(self):
'''
clean list of override methods (without the ones that should be skipped)
'''
ret = []
for name, impl in self.override_methods.iteritems():
should_skip = self.generator.should_skip(self.class_name, name)
if not should_skip:
ret.append({"name": name, "impl": impl})
return ret
def generate_code(self):
'''
actually generate the code. it uses the current target templates/rules in order to
generate the right code
'''
if not self.is_ref_class:
self.is_ref_class = self._is_ref_class()
config = self.generator.config
prelude_h = Template(file=os.path.join(self.generator.target, "templates", "prelude.h"),
searchList=[{"current_class": self}])
prelude_c = Template(file=os.path.join(self.generator.target, "templates", "prelude.c"),
searchList=[{"current_class": self}])
apidoc_classhead_script = Template(file=os.path.join(self.generator.target,
"templates",
"apidoc_classhead.script"),
searchList=[{"current_class": self}])
if self.generator.script_type == "lua":
docfuncfilepath = os.path.join(self.generator.outdir + "/api", self.class_name + ".lua")
self.doc_func_file = open(docfuncfilepath, "w+")
apidoc_fun_head_script = Template(file=os.path.join(self.generator.target,
"templates",
"apidoc_function_head.script"),
searchList=[{"current_class": self}])
self.doc_func_file.write(str(apidoc_fun_head_script))
self.generator.head_file.write(str(prelude_h))
self.generator.impl_file.write(str(prelude_c))
self.generator.doc_file.write(str(apidoc_classhead_script))
for m in self.methods_clean():
m['impl'].generate_code(self)
for m in self.static_methods_clean():
m['impl'].generate_code(self)
if self.generator.script_type == "lua":
for m in self.override_methods_clean():
m['impl'].generate_code(self, is_override = True)
# generate register section
register = Template(file=os.path.join(self.generator.target, "templates", "register.c"),
searchList=[{"current_class": self}])
apidoc_classfoot_script = Template(file=os.path.join(self.generator.target,
"templates",
"apidoc_classfoot.script"),
searchList=[{"current_class": self}])
self.generator.impl_file.write(str(register))
self.generator.doc_file.write(str(apidoc_classfoot_script))
if self.generator.script_type == "lua":
apidoc_fun_foot_script = Template(file=os.path.join(self.generator.target,
"templates",
"apidoc_function_foot.script"),
searchList=[{"current_class": self}])
self.doc_func_file.write(str(apidoc_fun_foot_script))
self.doc_func_file.close()
def _deep_iterate(self, cursor=None, depth=0):
for node in cursor.get_children():
# print("%s%s - %s" % ("> " * depth, node.displayname, node.kind))
if self._process_node(node):
self._deep_iterate(node, depth + 1)
@staticmethod
def _is_method_in_parents(current_class, method_name):
if len(current_class.parents) > 0:
if method_name in current_class.parents[0].methods:
return True
return NativeClass._is_method_in_parents(current_class.parents[0], method_name)
return False
def _is_ref_class(self, depth = 0):
"""
Mark the class as 'cocos2d::Ref' or its subclass.
"""
# print ">" * (depth + 1) + " " + self.class_name
if len(self.parents) > 0:
return self.parents[0]._is_ref_class(depth + 1)
if self.is_ref_class:
return True
return False
def _process_node(self, cursor):
'''
process the node, depending on the type. If returns true, then it will perform a deep
iteration on its children. Otherwise it will continue with its siblings (if any)
@param: cursor the cursor to analyze
'''
if cursor.kind == cindex.CursorKind.CXX_BASE_SPECIFIER:
parent = cursor.get_definition()
parent_name = parent.displayname
if not self.class_name in self.generator.classes_have_no_parents:
if parent_name and parent_name not in self.generator.base_classes_to_skip:
#if parent and self.generator.in_listed_classes(parent.displayname):
if not self.generator.generated_classes.has_key(parent.displayname):
parent = NativeClass(parent, self.generator)
self.generator.generated_classes[parent.class_name] = parent
else:
parent = self.generator.generated_classes[parent.displayname]
self.parents.append(parent)
if parent_name == "Ref":
self.is_ref_class = True
elif cursor.kind == cindex.CursorKind.FIELD_DECL:
self.fields.append(NativeField(cursor))
elif cursor.kind == cindex.CursorKind.CXX_ACCESS_SPEC_DECL:
self._current_visibility = cursor.get_access_specifier()
elif cursor.kind == cindex.CursorKind.CXX_METHOD and cursor.get_availability() != cindex.AvailabilityKind.DEPRECATED:
# skip if variadic
if self._current_visibility == cindex.AccessSpecifierKind.PUBLIC and not cursor.type.is_function_variadic():
m = NativeFunction(cursor)
registration_name = self.generator.should_rename_function(self.class_name, m.func_name) or m.func_name
# bail if the function is not supported (at least one arg not supported)
if m.not_supported:
return False
if m.is_override:
if NativeClass._is_method_in_parents(self, registration_name):
if self.generator.script_type == "lua":
if not self.override_methods.has_key(registration_name):
self.override_methods[registration_name] = m
else:
previous_m = self.override_methods[registration_name]
if isinstance(previous_m, NativeOverloadedFunction):
previous_m.append(m)
else:
self.override_methods[registration_name] = NativeOverloadedFunction([m, previous_m])
return False
if m.static:
if not self.static_methods.has_key(registration_name):
self.static_methods[registration_name] = m
else:
previous_m = self.static_methods[registration_name]
if isinstance(previous_m, NativeOverloadedFunction):
previous_m.append(m)
else:
self.static_methods[registration_name] = NativeOverloadedFunction([m, previous_m])
else:
if not self.methods.has_key(registration_name):
self.methods[registration_name] = m
else:
previous_m = self.methods[registration_name]
if isinstance(previous_m, NativeOverloadedFunction):
previous_m.append(m)
else:
self.methods[registration_name] = NativeOverloadedFunction([m, previous_m])
return True
elif self._current_visibility == cindex.AccessSpecifierKind.PUBLIC and cursor.kind == cindex.CursorKind.CONSTRUCTOR and not self.is_abstract:
# Skip copy constructor
if cursor.displayname == self.class_name + "(const " + self.namespaced_class_name + " &)":
# print "Skip copy constructor: " + cursor.displayname
return True
m = NativeFunction(cursor)
m.is_constructor = True
self.has_constructor = True
if not self.methods.has_key('constructor'):
self.methods['constructor'] = m
else:
previous_m = self.methods['constructor']
if isinstance(previous_m, NativeOverloadedFunction):
previous_m.append(m)
else:
m = NativeOverloadedFunction([m, previous_m])
m.is_constructor = True
self.methods['constructor'] = m
return True
# else:
# print >> sys.stderr, "unknown cursor: %s - %s" % (cursor.kind, cursor.displayname)
return False
class Generator(object):
def __init__(self, opts):
self.index = cindex.Index.create()
self.outdir = opts['outdir']
self.prefix = opts['prefix']
self.headers = opts['headers'].split(' ')
self.classes = opts['classes']
self.classes_need_extend = opts['classes_need_extend']
self.classes_have_no_parents = opts['classes_have_no_parents'].split(' ')
self.base_classes_to_skip = opts['base_classes_to_skip'].split(' ')
self.abstract_classes = opts['abstract_classes'].split(' ')
self.clang_args = opts['clang_args']
self.target = opts['target']
self.remove_prefix = opts['remove_prefix']
self.target_ns = opts['target_ns']
self.cpp_ns = opts['cpp_ns']
self.impl_file = None
self.head_file = None
self.skip_classes = {}
self.generated_classes = {}
self.rename_functions = {}
self.rename_classes = {}
self.out_file = opts['out_file']
self.script_control_cpp = opts['script_control_cpp'] == "yes"
self.script_type = opts['script_type']
self.macro_judgement = opts['macro_judgement']
if opts['skip']:
list_of_skips = re.split(",\n?", opts['skip'])
for skip in list_of_skips:
class_name, methods = skip.split("::")
self.skip_classes[class_name] = []
match = re.match("\[([^]]+)\]", methods)
if match:
self.skip_classes[class_name] = match.group(1).split(" ")
else:
raise Exception("invalid list of skip methods")
if opts['rename_functions']:
list_of_function_renames = re.split(",\n?", opts['rename_functions'])
for rename in list_of_function_renames:
class_name, methods = rename.split("::")
self.rename_functions[class_name] = {}
match = re.match("\[([^]]+)\]", methods)
if match:
list_of_methods = match.group(1).split(" ")
for pair in list_of_methods:
k, v = pair.split("=")
self.rename_functions[class_name][k] = v
else:
raise Exception("invalid list of rename methods")
if opts['rename_classes']:
list_of_class_renames = re.split(",\n?", opts['rename_classes'])
for rename in list_of_class_renames:
class_name, renamed_class_name = rename.split("::")
self.rename_classes[class_name] = renamed_class_name
def should_rename_function(self, class_name, method_name):
if self.rename_functions.has_key(class_name) and self.rename_functions[class_name].has_key(method_name):
# print >> sys.stderr, "will rename %s to %s" % (method_name, self.rename_functions[class_name][method_name])
return self.rename_functions[class_name][method_name]
return None
def get_class_or_rename_class(self, class_name):
if self.rename_classes.has_key(class_name):
# print >> sys.stderr, "will rename %s to %s" % (method_name, self.rename_functions[class_name][method_name])
return self.rename_classes[class_name]
return class_name
def should_skip(self, class_name, method_name, verbose=False):
if class_name == "*" and self.skip_classes.has_key("*"):
for func in self.skip_classes["*"]:
if re.match(func, method_name):
return True
else:
for key in self.skip_classes.iterkeys():
if key == "*" or re.match("^" + key + "$", class_name):
if verbose:
print "%s in skip_classes" % (class_name)
if len(self.skip_classes[key]) == 1 and self.skip_classes[key][0] == "*":
if verbose:
print "%s will be skipped completely" % (class_name)
return True
if method_name != None:
for func in self.skip_classes[key]:
if re.match(func, method_name):
if verbose:
print "%s will skip method %s" % (class_name, method_name)
return True
if verbose:
print "%s will be accepted (%s, %s)" % (class_name, key, self.skip_classes[key])
return False
def in_listed_classes(self, class_name):
"""
returns True if the class is in the list of required classes and it's not in the skip list
"""
for key in self.classes:
md = re.match("^" + key + "$", class_name)
if md and not self.should_skip(class_name, None):
return True
return False
def in_listed_extend_classed(self, class_name):
"""
returns True if the class is in the list of required classes that need to extend
"""
for key in self.classes_need_extend:
md = re.match("^" + key + "$", class_name)
if md:
return True
return False
def sorted_classes(self):
'''
sorted classes in order of inheritance
'''
sorted_list = []
for class_name in self.generated_classes.iterkeys():
nclass = self.generated_classes[class_name]
sorted_list += self._sorted_parents(nclass)
# remove dupes from the list
no_dupes = []
[no_dupes.append(i) for i in sorted_list if not no_dupes.count(i)]
return no_dupes
def _sorted_parents(self, nclass):
'''
returns the sorted list of parents for a native class
'''
sorted_parents = []
for p in nclass.parents:
if p.class_name in self.generated_classes.keys():
sorted_parents += self._sorted_parents(p)
if nclass.class_name in self.generated_classes.keys():
sorted_parents.append(nclass.class_name)
return sorted_parents
def generate_code(self):
# must read the yaml file first
stream = file(os.path.join(self.target, "conversions.yaml"), "r")
data = yaml.load(stream)
self.config = data
implfilepath = os.path.join(self.outdir, self.out_file + ".cpp")
headfilepath = os.path.join(self.outdir, self.out_file + ".hpp")
docfiledir = self.outdir + "/api"
if not os.path.exists(docfiledir):
os.makedirs(docfiledir)
if self.script_type == "lua":
docfilepath = os.path.join(docfiledir, self.out_file + "_api.lua")
else:
docfilepath = os.path.join(docfiledir, self.out_file + "_api.js")
self.impl_file = open(implfilepath, "w+")
self.head_file = open(headfilepath, "w+")
self.doc_file = open(docfilepath, "w+")
layout_h = Template(file=os.path.join(self.target, "templates", "layout_head.h"),
searchList=[self])
layout_c = Template(file=os.path.join(self.target, "templates", "layout_head.c"),
searchList=[self])
apidoc_ns_script = Template(file=os.path.join(self.target, "templates", "apidoc_ns.script"),
searchList=[self])
self.head_file.write(str(layout_h))
self.impl_file.write(str(layout_c))
self.doc_file.write(str(apidoc_ns_script))
self._parse_headers()
layout_h = Template(file=os.path.join(self.target, "templates", "layout_foot.h"),
searchList=[self])
layout_c = Template(file=os.path.join(self.target, "templates", "layout_foot.c"),
searchList=[self])
self.head_file.write(str(layout_h))
self.impl_file.write(str(layout_c))
if self.script_type == "lua":
apidoc_ns_foot_script = Template(file=os.path.join(self.target, "templates", "apidoc_ns_foot.script"),
searchList=[self])
self.doc_file.write(str(apidoc_ns_foot_script))
self.impl_file.close()
self.head_file.close()
self.doc_file.close()
def _pretty_print(self, diagnostics):
print("====\nErrors in parsing headers:")
severities=['Ignored', 'Note', 'Warning', 'Error', 'Fatal']
for idx, d in enumerate(diagnostics):
print "%s. <severity = %s,\n location = %r,\n details = %r>" % (
idx+1, severities[d.severity], d.location, d.spelling)
print("====\n")
def _parse_headers(self):
for header in self.headers:
tu = self.index.parse(header, self.clang_args)
if len(tu.diagnostics) > 0:
self._pretty_print(tu.diagnostics)
is_fatal = False
for d in tu.diagnostics:
if d.severity >= cindex.Diagnostic.Error:
is_fatal = True
if is_fatal:
print("*** Found errors - can not continue")
raise Exception("Fatal error in parsing headers")
self._deep_iterate(tu.cursor)
def _deep_iterate(self, cursor, depth=0):
# get the canonical type
if cursor.kind == cindex.CursorKind.CLASS_DECL:
if cursor == cursor.type.get_declaration() and len(cursor.get_children_array()) > 0:
is_targeted_class = True
if self.cpp_ns:
is_targeted_class = False
namespaced_name = get_namespaced_name(cursor)
for ns in self.cpp_ns:
if namespaced_name.startswith(ns):
is_targeted_class = True
break
if is_targeted_class and self.in_listed_classes(cursor.displayname):
if not self.generated_classes.has_key(cursor.displayname):
nclass = NativeClass(cursor, self)
nclass.generate_code()
self.generated_classes[cursor.displayname] = nclass
return
for node in cursor.get_children():
# print("%s %s - %s" % (">" * depth, node.displayname, node.kind))
self._deep_iterate(node, depth + 1)
def scriptname_from_native(self, namespace_class_name, namespace_name):
script_ns_dict = self.config['conversions']['ns_map']
for (k, v) in script_ns_dict.items():
if k == namespace_name:
return namespace_class_name.replace("*","").replace("const ", "").replace(k, v)
if namespace_class_name.find("::") >= 0:
if namespace_class_name.find("std::") == 0:
return namespace_class_name
else:
raise Exception("The namespace (%s) conversion wasn't set in 'ns_map' section of the conversions.yaml" % namespace_class_name)
else:
return namespace_class_name.replace("*","").replace("const ", "")
def is_cocos_class(self, namespace_class_name):
script_ns_dict = self.config['conversions']['ns_map']
for (k, v) in script_ns_dict.items():
if namespace_class_name.find("std::") == 0:
return False
if namespace_class_name.find(k) >= 0:
return True
return False
def scriptname_cocos_class(self, namespace_class_name):
script_ns_dict = self.config['conversions']['ns_map']
for (k, v) in script_ns_dict.items():
if namespace_class_name.find(k) >= 0:
return namespace_class_name.replace("*","").replace("const ", "").replace(k,v)
raise Exception("The namespace (%s) conversion wasn't set in 'ns_map' section of the conversions.yaml" % namespace_class_name)
def js_typename_from_natve(self, namespace_class_name):
script_ns_dict = self.config['conversions']['ns_map']
if namespace_class_name.find("std::") == 0:
if namespace_class_name.find("std::string") == 0:
return "String"
if namespace_class_name.find("std::vector") == 0:
return "Array"
if namespace_class_name.find("std::map") == 0 or namespace_class_name.find("std::unordered_map") == 0:
return "map_object"
if namespace_class_name.find("std::function") == 0:
return "function"
for (k, v) in script_ns_dict.items():
if namespace_class_name.find(k) >= 0:
if namespace_class_name.find("cocos2d::Vec2") == 0:
return "vec2_object"
if namespace_class_name.find("cocos2d::Vec3") == 0:
return "vec3_object"
if namespace_class_name.find("cocos2d::Vec4") == 0:
return "vec4_object"
if namespace_class_name.find("cocos2d::Mat4") == 0:
return "mat4_object"
if namespace_class_name.find("cocos2d::Vector") == 0:
return "Array"
if namespace_class_name.find("cocos2d::Map") == 0:
return "map_object"
if namespace_class_name.find("cocos2d::Point") == 0:
return "point_object"
if namespace_class_name.find("cocos2d::Size") == 0:
return "size_object"
if namespace_class_name.find("cocos2d::Rect") == 0:
return "rect_object"
if namespace_class_name.find("cocos2d::Color3B") == 0:
return "color3b_object"
if namespace_class_name.find("cocos2d::Color4B") == 0:
return "color4b_object"
if namespace_class_name.find("cocos2d::Color4F") == 0:
return "color4f_object"
else:
return namespace_class_name.replace("*","").replace("const ", "").replace(k,v)
return namespace_class_name.replace("*","").replace("const ", "")
def lua_typename_from_natve(self, namespace_class_name, is_ret = False):
script_ns_dict = self.config['conversions']['ns_map']
if namespace_class_name.find("std::") == 0:
if namespace_class_name.find("std::string") == 0:
return "string"
if namespace_class_name.find("std::vector") == 0:
return "array_table"
if namespace_class_name.find("std::map") == 0 or namespace_class_name.find("std::unordered_map") == 0:
return "map_table"
if namespace_class_name.find("std::function") == 0:
return "function"
for (k, v) in script_ns_dict.items():
if namespace_class_name.find(k) >= 0:
if namespace_class_name.find("cocos2d::Vec2") == 0:
return "vec2_table"
if namespace_class_name.find("cocos2d::Vec3") == 0:
return "vec3_table"
if namespace_class_name.find("cocos2d::Vec4") == 0:
return "vec4_table"
if namespace_class_name.find("cocos2d::Vector") == 0:
return "array_table"
if namespace_class_name.find("cocos2d::Mat4") == 0:
return "mat4_table"
if namespace_class_name.find("cocos2d::Map") == 0:
return "map_table"
if namespace_class_name.find("cocos2d::Point") == 0:
return "point_table"
if namespace_class_name.find("cocos2d::Size") == 0:
return "size_table"
if namespace_class_name.find("cocos2d::Rect") == 0:
return "rect_table"
if namespace_class_name.find("cocos2d::Color3B") == 0:
return "color3b_table"
if namespace_class_name.find("cocos2d::Color4B") == 0:
return "color4b_table"
if namespace_class_name.find("cocos2d::Color4F") == 0:
return "color4f_table"
if is_ret == 1:
return namespace_class_name.replace("*","").replace("const ", "").replace(k,"")
else:
return namespace_class_name.replace("*","").replace("const ", "").replace(k,v)
return namespace_class_name.replace("*","").replace("const ","")
def api_param_name_from_native(self,native_name):
lower_name = native_name.lower()
if lower_name == "std::string":
return "str"
if lower_name.find("unsigned ") >= 0 :
return native_name.replace("unsigned ","")
if lower_name.find("unordered_map") >= 0 or lower_name.find("map") >= 0:
return "map"
if lower_name.find("vector") >= 0 :
return "array"
if lower_name == "std::function":
return "func"
else:
return lower_name
def js_ret_name_from_native(self, namespace_class_name, is_enum) :
if self.is_cocos_class(namespace_class_name):
if namespace_class_name.find("cocos2d::Vector") >=0:
return "new Array()"
if namespace_class_name.find("cocos2d::Map") >=0:
return "map_object"
if is_enum:
return 0
else:
return self.scriptname_cocos_class(namespace_class_name)
lower_name = namespace_class_name.lower()
if lower_name.find("unsigned ") >= 0:
lower_name = lower_name.replace("unsigned ","")
if lower_name == "std::string":
return ""
if lower_name == "char" or lower_name == "short" or lower_name == "int" or lower_name == "float" or lower_name == "double" or lower_name == "long":
return 0
if lower_name == "bool":
return "false"
if lower_name.find("std::vector") >= 0 or lower_name.find("vector") >= 0:
return "new Array()"
if lower_name.find("std::map") >= 0 or lower_name.find("std::unordered_map") >= 0 or lower_name.find("unordered_map") >= 0 or lower_name.find("map") >= 0:
return "map_object"
if lower_name == "std::function":
return "func"
else:
return namespace_class_name
def main():
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] {configfile}")
parser.add_option("-s", action="store", type="string", dest="section",
help="sets a specific section to be converted")
parser.add_option("-t", action="store", type="string", dest="target",
help="specifies the target vm. Will search for TARGET.yaml")
parser.add_option("-o", action="store", type="string", dest="outdir",
help="specifies the output directory for generated C++ code")
parser.add_option("-n", action="store", type="string", dest="out_file",
help="specifcies the name of the output file, defaults to the prefix in the .ini file")
(opts, args) = parser.parse_args()
# script directory
workingdir = os.path.dirname(inspect.getfile(inspect.currentframe()))
if len(args) == 0:
parser.error('invalid number of arguments')
userconfig = ConfigParser.SafeConfigParser()
userconfig.read('userconf.ini')
print 'Using userconfig \n ', userconfig.items('DEFAULT')
config = ConfigParser.SafeConfigParser()
config.read(args[0])
if (0 == len(config.sections())):
raise Exception("No sections defined in config file")
sections = []
if opts.section:
if (opts.section in config.sections()):
sections = []
sections.append(opts.section)
else:
raise Exception("Section not found in config file")
else:
print("processing all sections")
sections = config.sections()
# find available targets
targetdir = os.path.join(workingdir, "targets")
targets = []
if (os.path.isdir(targetdir)):
targets = [entry for entry in os.listdir(targetdir)
if (os.path.isdir(os.path.join(targetdir, entry)))]
if 0 == len(targets):
raise Exception("No targets defined")
if opts.target:
if (opts.target in targets):
targets = []
targets.append(opts.target)
if opts.outdir:
outdir = opts.outdir
else:
outdir = os.path.join(workingdir, "gen")
if not os.path.exists(outdir):
os.makedirs(outdir)
for t in targets:
# Fix for hidden '.svn', '.cvs' and '.git' etc. folders - these must be ignored or otherwise they will be interpreted as a target.
if t == ".svn" or t == ".cvs" or t == ".git" or t == ".gitignore":
continue
print "\n.... Generating bindings for target", t
for s in sections:
print "\n.... .... Processing section", s, "\n"
gen_opts = {
'prefix': config.get(s, 'prefix'),
'headers': (config.get(s, 'headers' , 0, dict(userconfig.items('DEFAULT')))),
'classes': config.get(s, 'classes').split(' '),
'classes_need_extend': config.get(s, 'classes_need_extend').split(' ') if config.has_option(s, 'classes_need_extend') else [],
'clang_args': (config.get(s, 'extra_arguments', 0, dict(userconfig.items('DEFAULT'))) or "").split(" "),
'target': os.path.join(workingdir, "targets", t),
'outdir': outdir,
'remove_prefix': config.get(s, 'remove_prefix'),
'target_ns': config.get(s, 'target_namespace'),
'cpp_ns': config.get(s, 'cpp_namespace').split(' ') if config.has_option(s, 'cpp_namespace') else None,
'classes_have_no_parents': config.get(s, 'classes_have_no_parents'),
'base_classes_to_skip': config.get(s, 'base_classes_to_skip'),
'abstract_classes': config.get(s, 'abstract_classes'),
'skip': config.get(s, 'skip'),
'rename_functions': config.get(s, 'rename_functions'),
'rename_classes': config.get(s, 'rename_classes'),
'out_file': opts.out_file or config.get(s, 'prefix'),
'script_control_cpp': config.get(s, 'script_control_cpp') if config.has_option(s, 'script_control_cpp') else 'no',
'script_type': t,
'macro_judgement': config.get(s, 'macro_judgement') if config.has_option(s, 'macro_judgement') else None
}
generator = Generator(gen_opts)
generator.generate_code()
if __name__ == '__main__':
try:
main()
except Exception as e:
traceback.print_exc()
sys.exit(1)
|
|
from __future__ import absolute_import, division, print_function
import os
import imp
import sys
from collections import namedtuple
"""
Objects used to configure Glue at runtime.
"""
__all__ = ['Registry', 'SettingRegistry', 'ExporterRegistry',
'ColormapRegistry', 'DataFactoryRegistry', 'QtClientRegistry',
'LinkFunctionRegistry', 'LinkHelperRegistry', 'ViewerToolRegistry',
'LayerActionRegistry', 'ProfileFitterRegistry', 'qt_client', 'data_factory',
'link_function', 'link_helper', 'colormaps', 'exporters', 'settings',
'fit_plugin', 'auto_refresh', 'importer', 'DictRegistry',
'preference_panes', 'PreferencePanesRegistry',
'DataExporterRegistry', 'data_exporter', 'layer_action',
'SubsetMaskExporterRegistry', 'SubsetMaskImporterRegistry',
'StartupActionRegistry', 'startup_action', 'QtFixedLayoutTabRegistry',
'qt_fixed_layout_tab']
CFG_DIR = os.path.join(os.path.expanduser('~'), '.glue')
class Registry(object):
"""Container to hold groups of objects or settings.
Registry instances are used by Glue to track objects
used for various tasks like data linking, widget creation, etc.
They have the following properties:
- A `members` property, which lists each item in the registry
- A `default_members` function, which can be overridden to lazily
initialize the members list
- A call interface, allowing the instance to be used as a decorator
for users to add new items to the registry in their config files
"""
def __init__(self):
self._members = []
self._lazy_members = []
self._loaded = False
@property
def members(self):
""" A list of the members in the registry.
The return value is a list. The contents of the list
are specified in each subclass"""
self._load_lazy_members()
if not self._loaded:
self._members = self.default_members() + self._members
self._loaded = True
return self._members
def default_members(self):
"""The member items provided by default. These are put in this
method so that code is only imported when needed"""
return []
def add(self, value):
"""
Add a new item to the registry.
"""
self._members.append(value)
def lazy_add(self, value):
"""
Add a reference to a plugin which will be loaded when needed.
"""
self._lazy_members.append(value)
def _load_lazy_members(self):
from glue.plugins import load_plugin
while self._lazy_members:
plugin = self._lazy_members.pop()
load_plugin(plugin)
def __iter__(self):
return iter(self.members)
def __len__(self):
return len(self.members)
def __contains__(self, value):
return value in self.members
def __call__(self, arg):
"""This is provided so that registry instances can be used
as decorators. The decorators should add the decorated
code object to the registry, and return the original function"""
self.add(arg)
return arg
class DictRegistry(Registry):
"""
Base class for registries that are based on dictionaries instead of lists
of objects.
"""
def __init__(self):
self._members = {}
self._lazy_members = []
self._loaded = False
@property
def members(self):
self._load_lazy_members()
if not self._loaded:
defaults = self.default_members()
for key in defaults:
if key in self._members:
self._members[key].extend(defaults[key])
else:
self._members[key] = defaults[key]
self._loaded = True
return self._members
def default_members(self):
return {}
class SettingRegistry(DictRegistry):
"""Stores key/value settings that code can use to customize Glue
Each member is a tuple of 3 items:
- key: the setting name [str]
- value: the default setting [object]
- validator: A function which tests whether the input is a valid value,
and raises a ValueError if invalid. On valid input,
returns the (possibly sanitized) setting value.
"""
def __init__(self):
super(SettingRegistry, self).__init__()
self._validators = {}
self._defaults = {}
def add(self, key, default=None, validator=None):
if validator is None:
validator = lambda x: x
self._defaults[key] = validator(default)
self._validators[key] = validator
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError("No such setting: {0}".format(attr))
else:
if attr in self._members:
return self._members[attr]
elif attr in self._defaults:
return self._defaults[attr]
else:
raise AttributeError("No such setting: {0}".format(attr))
def __setattr__(self, attr, value):
if attr.startswith('_'):
object.__setattr__(self, attr, value)
elif attr in self:
self._members[attr] = self._validators[attr](value)
else:
raise AttributeError("No such setting: {0}".format(attr))
def __dir__(self):
return sorted(self._members.keys())
def __contains__(self, setting):
return setting in self._defaults
def __iter__(self):
for key in self._defaults:
value = self._members.get(key, self._defaults[key])
yield key, value, self._validators[key]
def reset_defaults(self):
self._members.clear()
def is_default(self, setting):
return setting in self._defaults and not setting in self._members
class QGlueParserRegistry(Registry):
"""
Registry for parsers that can be used to interpret arguments to the
:func:`~glue.qglue` function.
The members property is a list of parsers, each represented as a named tuple
with ``data_class``, ``parser`` and ``priority`` attributes, where ``class``
defines the class for which to use the parser, and ``parser`` is a function
that takes the input data and returns a list of glue
:class:`~glue.core.Data` objects. The ``parser`` functions should take two
arguments: the variable containing the data being parsed, and a label. In
addition, the priority (defaulting to 0) can be specified in case one wants
to make sure sub-classes get tested before more general classes. The
priority should be a numerical value, and the larger it is the higher the
priority.
"""
item = namedtuple('DataFactory', 'data_class parser priority')
def add(self, data_class, parser, priority=0):
"""
Add a new parser
Parameters
----------
data_class : class
The type of of data for which to use the specified parser
parser : func
The function to use to parse the input data
priority : int, optional
The priority, which is used to determine the order in which to check
the parsers.
"""
self.members.append(self.item(data_class, parser, priority))
def __call__(self, data_class, priority=0):
def adder(func):
if isinstance(data_class, tuple):
for dc in data_class:
self.add(dc, func, priority=priority)
else:
self.add(data_class, func, priority=priority)
return func
return adder
def __iter__(self):
for member in sorted(self.members, key=lambda x: -x.priority):
yield member
class DataImportRegistry(Registry):
"""
Stores functions which can import data.
The members property is a list of importers, each represented as a
``(label, load_function)`` tuple. The ``load_function`` should take no
arguments and return a list of :class:`~glue.core.data.Data` objects.
"""
def add(self, label, importer):
"""
Add a new importer
:param label: Short label for the importer
:type label: str
:param importer: importer function
:type importer: function()
"""
self.members.append((label, importer))
def __call__(self, label):
def adder(func):
self.add(label, func)
return func
return adder
class MenubarPluginRegistry(Registry):
"""
Stores menubar plugins.
The members property is a list of menubar plugins, each represented as a
``(label, function)`` tuple. The ``function`` should take two items which
are a reference to the session and to the data collection respectively.
"""
def add(self, label, function):
"""
Add a new menubar plugin
:param label: Short label for the plugin
:type label: str
:param function: function
:type function: function()
"""
self.members.append((label, function))
def __call__(self, label):
def adder(func):
self.add(label, func)
return func
return adder
class PreferencePanesRegistry(DictRegistry):
"""
Stores preference panes
The members property is a list of tuples of Qt widget classes that can have
their own tab in the preferences window.
"""
def add(self, label, widget_cls):
self._members[label] = widget_cls
def __iter__(self):
for label in self._members:
yield label, self._members[label]
class ExporterRegistry(Registry):
"""Stores functions which can export an applocation to an output file
The members property is a list of exporters, each represented
as a (label, save_function, can_save_function, outmode) tuple.
save_function takes an (application, path) as input, and saves
the session
can_save_function takes an application as input, and raises an
exception if saving this session is not possible
outmode is a string, with one of 3 values:
'file': indicates that exporter creates a file
'directory': exporter creates a directory
'label': exporter doesn't write to disk, but needs a label
"""
def add(self, label, exporter, checker, outmode=None):
"""
Add a new exporter
Parameters
----------
label : str
Short label for the exporter
exporter : func
Exporter function which takes two arguments: the application and
optionally the path or label to create. This function should raise
an exception if export isn't possible.
checker : func
Function that checks if saving is possible, which takes one
argument: the application.
outmode : str or `None`
Indicates what kind of output is created. This can be either set to
``'file'``, ``'directory'``, ``'label'``, or `None`.
"""
self.members.append((label, exporter, checker, outmode))
class ColormapRegistry(Registry):
"""Stores colormaps for the Image Viewer. The members property is
a list of colormaps, each represented as a [name,cmap] pair.
"""
def default_members(self):
import matplotlib.cm as cm
members = []
members.append(['Gray', cm.gray])
members.append(['Purple-Blue', cm.PuBu])
members.append(['Yellow-Green-Blue', cm.YlGnBu])
members.append(['Yellow-Orange-Red', cm.YlOrRd])
members.append(['Red-Purple', cm.RdPu])
members.append(['Blue-Green', cm.BuGn])
members.append(['Hot', cm.hot])
members.append(['Red-Blue', cm.RdBu])
members.append(['Red-Yellow-Blue', cm.RdYlBu])
members.append(['Purple-Orange', cm.PuOr])
members.append(['Purple-Green', cm.PRGn])
return members
def add(self, label, cmap):
"""
Add colormap *cmap* with label *label*.
"""
self.members.append([label, cmap])
class DataFactoryRegistry(Registry):
"""Stores data factories. Data factories take filenames as input,
and return :class:`~glue.core.data.Data` instances
The members property returns a list of (function, label, identifier,
priority) namedtuples:
- Function is the factory that creates the data object
- label is a short human-readable description of the factory
- identifier is a function that takes ``(filename, **kwargs)`` as input
and returns True if the factory can open the file
- priority is a numerical value that indicates how confident the data
factory is that it should read the data, relative to other data
factories. For example, a highly specialized FITS reader for specific
FITS file types can be given a higher priority than the generic FITS
reader in order to take precedence over it.
New data factories can be registered via::
@data_factory('label_name', identifier=identifier, priority=10)
def new_factory(file_name):
...
If not specified, the priority defaults to 0.
"""
item = namedtuple('DataFactory', 'function label identifier priority deprecated')
def __call__(self, label, identifier=None, priority=None, default='', deprecated=False):
if identifier is None:
identifier = lambda *a, **k: False
if priority is None:
if deprecated:
priority = -1000
else:
priority = 0
def adder(func):
self.add(self.item(func, label, identifier, priority, deprecated))
return func
return adder
def __iter__(self):
for member in sorted(self.members, key=lambda x: (-x.priority, x.label)):
yield member
class DataExporterRegistry(Registry):
"""
Stores data exporters. Data exporters take a data/subset object as input
followed by a filename.
"""
item = namedtuple('DataFactory', 'function label extension')
def __call__(self, label, extension=[]):
def adder(func):
self.add(self.item(func, label, extension))
return func
return adder
def __iter__(self):
for member in sorted(self.members, key=lambda x: x.label):
yield member
class SubsetMaskExporterRegistry(DataExporterRegistry):
"""
Stores mask exporters. Mask exporters should take a filename followed by
a dictionary of Numpy boolean arrays all with the same dimensions.
"""
item = namedtuple('SubsetMaskExporter', 'function label extension')
class SubsetMaskImporterRegistry(DataExporterRegistry):
"""
Stores mask importers. Mask importers should take a filename and return a
dictionary of Numpy boolean arrays.
"""
item = namedtuple('SubsetMaskImporter', 'function label extension')
class QtClientRegistry(Registry):
"""
Stores QT widgets to visualize data.
The members property is a list of Qt widget classes
New widgets can be registered via::
@qt_client
class CustomWidget(QMainWindow):
...
"""
class QtFixedLayoutTabRegistry(Registry):
"""
Stores Qt pre-defined tabs (non-MDI)
New widgets can be registered via::
@qt_fixed_layout_tab
class CustomTab(QWidget):
...
"""
class ViewerToolRegistry(DictRegistry):
def add(self, tool_cls):
"""
Add a tool class to the registry. The the ``tool_id`` attribute on the
tool_cls should be set, and is used by the viewers to indicate which
tools they want to
"""
if tool_cls.tool_id in self.members:
raise ValueError("Tool ID '{0}' already registered".format(tool_cls.tool_id))
else:
self.members[tool_cls.tool_id] = tool_cls
def __call__(self, tool_cls):
self.add(tool_cls)
return tool_cls
class StartupActionRegistry(DictRegistry):
def add(self, startup_name, startup_function):
"""
Add a startup function to the registry. This is a function that will
get called once glue has been started and any data loaded, and can
be used to set up specific layouts and create links.
Startup actions are triggered by either specifying comma-separated names
of actions on the command-line::
glue --startup=mystartupaction
or by passing an iterable of startup action names to the ``startup``
keyword of ``GlueApplication``.
The startup function will be given the session object and the data
collection object.
"""
if startup_name in self.members:
raise ValueError("A startup action with the name '{0}' already exists".format(startup_name))
else:
self.members[startup_name] = startup_function
def __call__(self, name):
def adder(func):
self.add(name, func)
return func
return adder
class LinkFunctionRegistry(Registry):
"""Stores functions to convert between quantities
The members properety is a list of (function, info_string,
output_labels) namedtuples. ``info_string`` describes what the
function does. ``output_labels`` is a list of names for each output.
``category`` is a category in which the link funtion will appear (defaults
to 'General').
New link functions can be registered via
@link_function(info="maps degrees to arcseconds",
output_labels=['arcsec'])
def degrees2arcsec(degrees):
return degress * 3600
Link functions are expected to receive and return numpy arrays
"""
item = namedtuple('LinkFunction', 'function info output_labels category')
def __call__(self, info="", output_labels=None, category='General'):
out = output_labels or []
def adder(func):
self.add(self.item(func, info, out, category))
return func
return adder
class LayerActionRegistry(Registry):
"""
Stores custom menu actions available when the user select one or more
datasets, subset group, or subset in the data collection view.
This members property is a list of named tuples with the following
attributes:
* ``label``: the user-facing name of the action
* ``tooltip``: the text that appears when hovering with the mouse over the action
* ``callback``: the function to call when the action is triggered
* ``icon``: an icon image to use for the layer action
* ``single``: whether to show this action only when selecting single layers (default: `False`)
* ``data``: if ``single`` is `True` whether to only show the action when selecting a dataset
* ``subset_group``: if ``single`` is `True` whether to only show the action when selecting a subset group
* ``subset``: if ``single`` is `True` whether to only show the action when selecting a subset
The callback function is called with two arguments. If ``single`` is
`True`, the first argument is the selected layer, otherwise it is the list
of selected layers. The second argument is the
`~glue.core.data_collection.DataCollection` object.
"""
item = namedtuple('LayerAction', 'label tooltip callback icon single data subset_group, subset')
def __call__(self, label, callback=None, tooltip=None, icon=None, single=False,
data=False, subset_group=False, subset=False):
# Backward-compatibility
if callback is not None:
self.add(self.item(label, tooltip, callback, icon, True,
False, False, True))
return True
def adder(func):
self.add(self.item(label, tooltip, func, icon, single,
data, subset_group, subset))
return func
return adder
class LinkHelperRegistry(Registry):
"""Stores helper objects that compute many ComponentLinks at once
The members property is a list of (object, info_string,
input_labels) tuples. `Object` is the link helper. `info_string`
describes what `object` does. `input_labels` is a list labeling
the inputs. ``category`` is a category in which the link funtion will appear
(defaults to 'General').
Each link helper takes a list of ComponentIDs as inputs, and
returns an iterable object (e.g. list) of ComponentLinks.
New helpers can be registered via
@link_helper('Links degrees and arcseconds in both directions',
['degree', 'arcsecond'])
def new_helper(degree, arcsecond):
return [ComponentLink([degree], arcsecond, using=lambda d: d*3600),
ComponentLink([arcsecond], degree, using=lambda a: a/3600)]
"""
item = namedtuple('LinkHelper', 'helper info input_labels category')
def __call__(self, info, input_labels, category='General'):
def adder(func):
self.add(self.item(func, info, input_labels, category))
return func
return adder
class ProfileFitterRegistry(Registry):
item = namedtuple('ProfileFitter', 'cls')
def add(self, cls):
"""
Add colormap *cmap* with label *label*.
"""
self.members.append(cls)
def default_members(self):
from glue.core.fitters import __FITTERS__
return list(__FITTERS__)
class BooleanSetting(object):
def __init__(self, default=True):
self.state = default
def __call__(self, state=None):
if state not in [None, True, False]:
raise ValueError("Invalid True/False setting: %s" % state)
if state is not None:
self.state = state
return self.state
qt_client = QtClientRegistry()
qt_fixed_layout_tab = QtFixedLayoutTabRegistry()
viewer_tool = ViewerToolRegistry()
link_function = LinkFunctionRegistry()
link_helper = LinkHelperRegistry()
colormaps = ColormapRegistry()
importer = DataImportRegistry()
exporters = ExporterRegistry()
settings = SettingRegistry()
fit_plugin = ProfileFitterRegistry()
layer_action = LayerActionRegistry()
menubar_plugin = MenubarPluginRegistry()
preference_panes = PreferencePanesRegistry()
qglue_parser = QGlueParserRegistry()
startup_action = StartupActionRegistry()
# watch loaded data files for changes?
auto_refresh = BooleanSetting(False)
enable_contracts = BooleanSetting(False)
# Data and subset I/O
data_factory = DataFactoryRegistry()
data_exporter = DataExporterRegistry()
subset_mask_exporter = SubsetMaskExporterRegistry()
subset_mask_importer = SubsetMaskImporterRegistry()
# Backward-compatibility
single_subset_action = layer_action
def load_configuration(search_path=None):
"""
Find and import a config.py file
Returns:
The module object
Raises:
Exception, if no module was found
"""
search_order = search_path or _default_search_order()
result = imp.new_module('config')
for config_file in search_order:
dir = os.path.dirname(config_file)
try:
sys.path.append(dir)
config = imp.load_source('config', config_file)
result = config
except IOError:
pass
except Exception as e:
raise type(e)("Error loading config file %s:\n%s" % (config_file, e), sys.exc_info()[2])
finally:
sys.path.remove(dir)
return result
def _default_search_order():
"""
The default configuration file search order:
* current working directory
* environ var GLUERC
* HOME/.glue/config.py
* Glue's own default config
"""
search_order = [os.path.join(os.getcwd(), 'config.py')]
if 'GLUERC' in os.environ:
search_order.append(os.environ['GLUERC'])
search_order.append(os.path.join(CFG_DIR, 'config.py'))
return search_order[::-1]
###### Now define global settings ######
GRAY = '#7F7F7F'
BLUE = "#1F78B4"
GREEN = "#33A02C"
RED = "#E31A1C"
ORANGE = "#FF7F00"
PURPLE = "#6A3D9A"
YELLOW = "#FFFF99"
BROWN = "#8C510A"
PINK = "#FB9A99"
LIGHT_BLUE = "#A6CEE3"
LIGHT_GREEN = "#B2DF8A"
LIGHT_RED = "#FB9A99"
LIGHT_ORANGE = "#FDBF6F"
LIGHT_PURPLE = "#CAB2D6"
settings.add('SUBSET_COLORS', [RED, GREEN, BLUE, BROWN, ORANGE, PURPLE, PINK], validator=list)
settings.add('DATA_COLOR', '0.35')
settings.add('DATA_ALPHA', 0.8, validator=float)
settings.add('BACKGROUND_COLOR', '#FFFFFF')
settings.add('FOREGROUND_COLOR', '#000000')
settings.add('SHOW_LARGE_DATA_WARNING', True, validator=bool)
|
|
"""Support for Luftdaten stations."""
import logging
from luftdaten import Luftdaten
from luftdaten.exceptions import LuftdatenError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONF_MONITORED_CONDITIONS,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_SHOW_ON_MAP,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .config_flow import configured_sensors, duplicate_stations
from .const import CONF_SENSOR_ID, DEFAULT_SCAN_INTERVAL, DOMAIN
_LOGGER = logging.getLogger(__name__)
DATA_LUFTDATEN = "luftdaten"
DATA_LUFTDATEN_CLIENT = "data_luftdaten_client"
DATA_LUFTDATEN_LISTENER = "data_luftdaten_listener"
DEFAULT_ATTRIBUTION = "Data provided by luftdaten.info"
PLATFORMS = ["sensor"]
SENSOR_HUMIDITY = "humidity"
SENSOR_PM10 = "P1"
SENSOR_PM2_5 = "P2"
SENSOR_PRESSURE = "pressure"
SENSOR_PRESSURE_AT_SEALEVEL = "pressure_at_sealevel"
SENSOR_TEMPERATURE = "temperature"
TOPIC_UPDATE = f"{DOMAIN}_data_update"
SENSORS = {
SENSOR_TEMPERATURE: ["Temperature", "mdi:thermometer", TEMP_CELSIUS],
SENSOR_HUMIDITY: ["Humidity", "mdi:water-percent", PERCENTAGE],
SENSOR_PRESSURE: ["Pressure", "mdi:arrow-down-bold", PRESSURE_HPA],
SENSOR_PRESSURE_AT_SEALEVEL: ["Pressure at sealevel", "mdi:download", PRESSURE_HPA],
SENSOR_PM10: [
"PM10",
"mdi:thought-bubble",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
],
SENSOR_PM2_5: [
"PM2.5",
"mdi:thought-bubble-outline",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
],
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)): vol.All(
cv.ensure_list, [vol.In(SENSORS)]
)
}
)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_SENSOR_ID): cv.positive_int,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
@callback
def _async_fixup_sensor_id(hass, config_entry, sensor_id):
hass.config_entries.async_update_entry(
config_entry, data={**config_entry.data, CONF_SENSOR_ID: int(sensor_id)}
)
async def async_setup(hass, config):
"""Set up the Luftdaten component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT] = {}
hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
station_id = conf[CONF_SENSOR_ID]
if station_id not in configured_sensors(hass):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_SENSORS: conf[CONF_SENSORS],
CONF_SENSOR_ID: conf[CONF_SENSOR_ID],
CONF_SHOW_ON_MAP: conf[CONF_SHOW_ON_MAP],
},
)
)
hass.data[DOMAIN][CONF_SCAN_INTERVAL] = conf[CONF_SCAN_INTERVAL]
return True
async def async_setup_entry(hass, config_entry):
"""Set up Luftdaten as config entry."""
if not isinstance(config_entry.data[CONF_SENSOR_ID], int):
_async_fixup_sensor_id(hass, config_entry, config_entry.data[CONF_SENSOR_ID])
if (
config_entry.data[CONF_SENSOR_ID] in duplicate_stations(hass)
and config_entry.source == SOURCE_IMPORT
):
_LOGGER.warning(
"Removing duplicate sensors for station %s",
config_entry.data[CONF_SENSOR_ID],
)
hass.async_create_task(hass.config_entries.async_remove(config_entry.entry_id))
return False
session = async_get_clientsession(hass)
try:
luftdaten = LuftDatenData(
Luftdaten(config_entry.data[CONF_SENSOR_ID], hass.loop, session),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)
),
)
await luftdaten.async_update()
hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT][config_entry.entry_id] = luftdaten
except LuftdatenError as err:
raise ConfigEntryNotReady from err
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
async def refresh_sensors(event_time):
"""Refresh Luftdaten data."""
await luftdaten.async_update()
async_dispatcher_send(hass, TOPIC_UPDATE)
hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER][
config_entry.entry_id
] = async_track_time_interval(
hass,
refresh_sensors,
hass.data[DOMAIN].get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL),
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an Luftdaten config entry."""
remove_listener = hass.data[DOMAIN][DATA_LUFTDATEN_LISTENER].pop(
config_entry.entry_id
)
remove_listener()
hass.data[DOMAIN][DATA_LUFTDATEN_CLIENT].pop(config_entry.entry_id)
return await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS)
class LuftDatenData:
"""Define a generic Luftdaten object."""
def __init__(self, client, sensor_conditions):
"""Initialize the Luftdata object."""
self.client = client
self.data = {}
self.sensor_conditions = sensor_conditions
async def async_update(self):
"""Update sensor/binary sensor data."""
try:
await self.client.get_data()
if self.client.values:
self.data[DATA_LUFTDATEN] = self.client.values
self.data[DATA_LUFTDATEN].update(self.client.meta)
except LuftdatenError:
_LOGGER.error("Unable to retrieve data from luftdaten.info")
|
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
"""
Gaussian Naive Bayes classification.
This checks that GaussianNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
"""Test whether class priors are properly set. """
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
"""Test whether class priors are properly set. """
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
"""Test Multinomial Naive Bayes classification.
This checks that MultinomialNB implements fit and predict and returns
correct values for a simple toy dataset.
"""
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
"""Test picklability of discrete naive Bayes classifiers"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
"""Test input checks for the fit method"""
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
"""Test discrete NB classes' probability scores"""
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
"""Test whether discrete NB classes fit a uniform prior
when fit_prior=False and class_prior=None"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
"""Test whether discrete NB classes use provided prior"""
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
"""Test whether discrete NB classes use provided prior
when using partial_fit"""
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
"""coef_ and intercept_ should have shapes as in other linear models.
Non-regression test for issue #2127.
"""
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
"""Test for issue #4268.
Tests that the feature log prob value computed by BernoulliNB when
alpha=1.0 is equal to the expression given in Manning, Raghavan,
and Schuetze's "Introduction to Information Retrieval" book:
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
"""
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
"""
Tests that BernoulliNB when alpha=1.0 gives the same values as
those given for the toy example in Manning, Raghavan, and
Schuetze's "Introduction to Information Retrieval" book:
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
"""
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
|
|
# -*- coding: utf-8 -*-
import datetime
import math
WGS84_a = 6378137.0
WGS84_b = 6356752.314245
def ecef_from_lla(lat, lon, alt):
'''
Compute ECEF XYZ from latitude, longitude and altitude.
All using the WGS94 model.
Altitude is the distance to the WGS94 ellipsoid.
Check results here http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm
'''
a2 = WGS84_a**2
b2 = WGS84_b**2
lat = math.radians(lat)
lon = math.radians(lon)
L = 1.0 / math.sqrt(a2 * math.cos(lat)**2 + b2 * math.sin(lat)**2)
x = (a2 * L + alt) * math.cos(lat) * math.cos(lon)
y = (a2 * L + alt) * math.cos(lat) * math.sin(lon)
z = (b2 * L + alt) * math.sin(lat)
return x, y, z
def gps_distance(latlon_1, latlon_2):
'''
Distance between two (lat,lon) pairs.
>>> p1 = (42.1, -11.1)
>>> p2 = (42.2, -11.3)
>>> 19000 < gps_distance(p1, p2) < 20000
True
'''
x1, y1, z1 = ecef_from_lla(latlon_1[0], latlon_1[1], 0.)
x2, y2, z2 = ecef_from_lla(latlon_2[0], latlon_2[1], 0.)
dis = math.sqrt((x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2)
return dis
def dms_to_decimal(degrees, minutes, seconds, hemisphere):
'''
Convert from degrees, minutes, seconds to decimal degrees.
@author: mprins
'''
dms = float(degrees) + float(minutes) / 60 + float(seconds) / 3600
if hemisphere in "WwSs":
dms = -1 * dms
return dms
def decimal_to_dms(value, loc):
'''
Convert decimal position to degrees, minutes, seconds
'''
if value < 0:
loc_value = loc[0]
elif value > 0:
loc_value = loc[1]
else:
loc_value = ""
abs_value = abs(value)
deg = int(abs_value)
t1 = (abs_value-deg)*60
mint = int(t1)
sec = round((t1 - mint)* 60, 6)
return (deg, mint, sec, loc_value)
def gpgga_to_dms(gpgga):
'''
Convert GPS coordinate in GPGGA format to degree/minute/second
Reference: http://us.cactii.net/~bb/gps.py
'''
deg_min, dmin = gpgga.split('.')
degrees = int(deg_min[:-2])
minutes = float('%s.%s' % (deg_min[-2:], dmin))
decimal = degrees + (minutes/60)
return decimal
def utc_to_localtime(utc_time):
utc_offset_timedelta = datetime.datetime.utcnow() - datetime.datetime.now()
return utc_time - utc_offset_timedelta
def compute_bearing(start_lat, start_lon, end_lat, end_lon):
'''
Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html
'''
# make sure everything is in radians
start_lat = math.radians(start_lat)
start_lon = math.radians(start_lon)
end_lat = math.radians(end_lat)
end_lon = math.radians(end_lon)
dLong = end_lon - start_lon
dPhi = math.log(math.tan(end_lat/2.0+math.pi/4.0)/math.tan(start_lat/2.0+math.pi/4.0))
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = (2.0 * math.pi + dLong)
y = math.sin(dLong)*math.cos(end_lat)
x = math.cos(start_lat)*math.sin(end_lat) - math.sin(start_lat)*math.cos(end_lat)*math.cos(dLong)
bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0
return bearing
def diff_bearing(b1, b2):
'''
Compute difference between two bearings
'''
d = abs(b2-b1)
d = 360-d if d>180 else d
return d
def offset_bearing(bearing, offset):
'''
Add offset to bearing
'''
bearing = (bearing + offset) % 360
return bearing
def normalize_bearing(bearing, check_hex=False):
'''
Normalize bearing and convert from hex if
'''
if bearing > 360 and check_hex:
# fix negative value wrongly parsed in exifread
# -360 degree -> 4294966935 when converting from hex
bearing = bin(int(bearing))[2:]
bearing = ''.join([str(int(int(a)==0)) for a in bearing])
bearing = -float(int(bearing, 2))
bearing %= 360
return bearing
def interpolate_lat_lon(points, t, max_dt=1):
'''
Return interpolated lat, lon and compass bearing for time t.
Points is a list of tuples (time, lat, lon, elevation), t a datetime object.
'''
# find the enclosing points in sorted list
if (t<=points[0][0]) or (t>=points[-1][0]):
if t<=points[0][0]:
dt = abs((points[0][0]-t).total_seconds())
else:
dt = (t-points[-1][0]).total_seconds()
if dt>max_dt:
raise ValueError("Time t not in scope of gpx file.")
else:
print ("Warning: Time t not in scope of gpx file by {} seconds, extrapolating...".format(dt))
if t < points[0][0]:
before = points[0]
after = points[1]
else:
before = points[-2]
after = points[-1]
bearing = compute_bearing(before[1], before[2], after[1], after[2])
if t==points[0][0]:
x = points[0]
return (x[1], x[2], bearing, x[3])
if t==points[-1][0]:
x = points[-1]
return (x[1], x[2], bearing, x[3])
else:
for i,point in enumerate(points):
if t<point[0]:
if i>0:
before = points[i-1]
else:
before = points[i]
after = points[i]
break
# time diff
dt_before = (t-before[0]).total_seconds()
dt_after = (after[0]-t).total_seconds()
# simple linear interpolation
lat = (before[1]*dt_after + after[1]*dt_before) / (dt_before + dt_after)
lon = (before[2]*dt_after + after[2]*dt_before) / (dt_before + dt_after)
bearing = compute_bearing(before[1], before[2], after[1], after[2])
if before[3] is not None:
ele = (before[3]*dt_after + after[3]*dt_before) / (dt_before + dt_after)
else:
ele = None
return lat, lon, bearing, ele
|
|
import hashlib, binascii, struct, array, os, time, sys, optparse
import scrypt
from construct import *
def main():
options = get_args()
algorithm = get_algorithm(options)
# https://en.bitcoin.it/wiki/Difficulty
bits, target = get_difficulty(algorithm)
input_script = create_input_script(options.timestamp)
output_script = create_output_script(options.pubkey)
# hash merkle root is the double sha256 hash of the transaction(s)
tx = create_transaction(input_script, output_script,options)
hash_merkle_root = hashlib.sha256(hashlib.sha256(tx).digest()).digest()
print_block_info(options, hash_merkle_root, bits)
block_header = create_block_header(hash_merkle_root, options.time, bits, options.nonce)
genesis_hash, nonce = generate_hash(block_header, algorithm, options.nonce, target)
announce_found_genesis(genesis_hash, nonce)
def get_args():
parser = optparse.OptionParser()
parser.add_option("-t", "--time", dest="time", default=int(time.time()),
type="int", help="the (unix) time when the genesisblock is created")
parser.add_option("-z", "--timestamp", dest="timestamp", default="The Times 03/Jan/2009 Chancellor on brink of second bailout for banks",
type="string", help="the pszTimestamp found in the coinbase of the genesisblock")
parser.add_option("-n", "--nonce", dest="nonce", default=0,
type="int", help="the first value of the nonce that will be incremented when searching the genesis hash")
parser.add_option("-a", "--algorithm", dest="algorithm", default="SHA256",
help="the PoW algorithm: [SHA256|scrypt|X11|X13|X15]")
parser.add_option("-p", "--pubkey", dest="pubkey", default="04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f",
type="string", help="the pubkey found in the output script")
parser.add_option("-v", "--value", dest="value", default=5000000000,
type="int", help="the value in coins for the output, full value (exp. in bitcoin 5000000000 - To get other coins value: Block Value * 100000000)")
(options, args) = parser.parse_args()
return options
def get_algorithm(options):
supported_algorithms = ["SHA256", "scrypt", "X11", "X13", "X15"]
if options.algorithm in supported_algorithms:
return options.algorithm
else:
sys.exit("Error: Given algorithm must be one of: " + str(supported_algorithms))
def get_difficulty(algorithm):
if algorithm == "scrypt":
return 0x1e0ffff0, 0x0ffff0 * 2**(8*(0x1e - 3))
elif algorithm == "SHA256":
return 0x1d00ffff, 0x00ffff * 2**(8*(0x1d - 3))
elif algorithm == "X11" or algorithm == "X13" or algorithm == "X15":
return 0x1e0ffff0, 0x0ffff0 * 2**(8*(0x1e - 3))
def create_input_script(psz_timestamp):
psz_prefix = ""
#use OP_PUSHDATA1 if required
if len(psz_timestamp) > 76: psz_prefix = '4c'
script_prefix = '04ffff001d0104' + psz_prefix + chr(len(psz_timestamp)).encode('hex')
print (script_prefix + psz_timestamp.encode('hex'))
return (script_prefix + psz_timestamp.encode('hex')).decode('hex')
def create_output_script(pubkey):
script_len = '41'
OP_CHECKSIG = 'ac'
return (script_len + pubkey + OP_CHECKSIG).decode('hex')
def create_transaction(input_script, output_script,options):
transaction = Struct("transaction",
Bytes("version", 4),
Byte("num_inputs"),
StaticField("prev_output", 32),
UBInt32('prev_out_idx'),
Byte('input_script_len'),
Bytes('input_script', len(input_script)),
UBInt32('sequence'),
Byte('num_outputs'),
Bytes('out_value', 8),
Byte('output_script_len'),
Bytes('output_script', 0x43),
UBInt32('locktime'))
tx = transaction.parse('\x00'*(127 + len(input_script)))
tx.version = struct.pack('<I', 1)
tx.num_inputs = 1
tx.prev_output = struct.pack('<qqqq', 0,0,0,0)
tx.prev_out_idx = 0xFFFFFFFF
tx.input_script_len = len(input_script)
tx.input_script = input_script
tx.sequence = 0xFFFFFFFF
tx.num_outputs = 1
tx.out_value = struct.pack('<q' ,options.value)#0x000005f5e100)#012a05f200) #50 coins
#tx.out_value = struct.pack('<q' ,0x000000012a05f200) #50 coins
tx.output_script_len = 0x43
tx.output_script = output_script
tx.locktime = 0
return transaction.build(tx)
def create_block_header(hash_merkle_root, time, bits, nonce):
block_header = Struct("block_header",
Bytes("version",4),
Bytes("hash_prev_block", 32),
Bytes("hash_merkle_root", 32),
Bytes("time", 4),
Bytes("bits", 4),
Bytes("nonce", 4))
genesisblock = block_header.parse('\x00'*80)
genesisblock.version = struct.pack('<I', 1)
genesisblock.hash_prev_block = struct.pack('<qqqq', 0,0,0,0)
genesisblock.hash_merkle_root = hash_merkle_root
genesisblock.time = struct.pack('<I', time)
genesisblock.bits = struct.pack('<I', bits)
genesisblock.nonce = struct.pack('<I', nonce)
return block_header.build(genesisblock)
# https://en.bitcoin.it/wiki/Block_hashing_algorithm
def generate_hash(data_block, algorithm, start_nonce, target):
print 'Searching for genesis hash..'
nonce = start_nonce
last_updated = time.time()
difficulty = float(0xFFFF) * 2**208 / target
update_interval = int(1000000 * difficulty)
while True:
sha256_hash, header_hash = generate_hashes_from_block(data_block, algorithm)
last_updated = calculate_hashrate(nonce, update_interval, difficulty, last_updated)
if is_genesis_hash(header_hash, target):
if algorithm == "X11" or algorithm == "X13" or algorithm == "X15":
return (header_hash, nonce)
return (sha256_hash, nonce)
else:
nonce = nonce + 1
data_block = data_block[0:len(data_block) - 4] + struct.pack('<I', nonce)
def generate_hashes_from_block(data_block, algorithm):
sha256_hash = hashlib.sha256(hashlib.sha256(data_block).digest()).digest()[::-1]
header_hash = ""
if algorithm == 'scrypt':
header_hash = scrypt.hash(data_block,data_block,1024,1,1,32)[::-1]
elif algorithm == 'SHA256':
header_hash = sha256_hash
elif algorithm == 'X11':
try:
exec('import %s' % "xcoin_hash")
except ImportError:
sys.exit("Cannot run X11 algorithm: module xcoin_hash not found")
header_hash = xcoin_hash.getPoWHash(data_block)[::-1]
elif algorithm == 'X13':
try:
exec('import %s' % "x13_hash")
except ImportError:
sys.exit("Cannot run X13 algorithm: module x13_hash not found")
header_hash = x13_hash.getPoWHash(data_block)[::-1]
elif algorithm == 'X15':
try:
exec('import %s' % "x15_hash")
except ImportError:
sys.exit("Cannot run X15 algorithm: module x15_hash not found")
header_hash = x15_hash.getPoWHash(data_block)[::-1]
return sha256_hash, header_hash
def is_genesis_hash(header_hash, target):
return int(header_hash.encode('hex_codec'), 16) < target
def calculate_hashrate(nonce, update_interval, difficulty, last_updated):
if nonce % update_interval == update_interval - 1:
now = time.time()
hashrate = round(update_interval/(now - last_updated))
generation_time = round(difficulty * pow(2, 32) / hashrate / 3600, 1)
sys.stdout.write("\r%s hash/s, estimate: %s h"%(str(hashrate), str(generation_time)))
sys.stdout.flush()
return now
else:
return last_updated
def print_block_info(options, hash_merkle_root, bits):
print "algorithm: " + (options.algorithm)
print "merkle hash: " + hash_merkle_root[::-1].encode('hex_codec')
print "pszTimestamp: " + options.timestamp
print "pubkey: " + options.pubkey
print "time: " + str(options.time)
print "bits: " + str(hex(bits))
def announce_found_genesis(genesis_hash, nonce):
print "genesis hash found!"
print "nonce: " + str(nonce)
print "genesis hash: " + genesis_hash.encode('hex_codec')
# GOGOGO!
main()
|
|
"""Implements nose test program and collector.
"""
from __future__ import generators
import logging
import os
import sys
import time
import unittest
from nose.config import Config, all_config_files
from nose.loader import defaultTestLoader
from nose.plugins.manager import PluginManager, DefaultPluginManager, \
RestrictedPluginManager
from nose.result import TextTestResult
from nose.suite import FinalizingSuiteWrapper
from nose.util import isclass, tolist
log = logging.getLogger('nose.core')
compat_24 = sys.version_info >= (2, 4)
__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector',
'TextTestRunner']
class TextTestRunner(unittest.TextTestRunner):
"""Test runner that uses nose's TextTestResult to enable errorClasses,
as well as providing hooks for plugins to override or replace the test
output stream, results, and the test case itself.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
config=None):
if config is None:
config = Config()
self.config = config
unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
def _makeResult(self):
return TextTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config)
def run(self, test):
"""Overrides to provide plugin hooks and defer all output to
the test result class.
"""
wrapper = self.config.plugins.prepareTest(test)
if wrapper is not None:
test = wrapper
# plugins can decorate or capture the output stream
wrapped = self.config.plugins.setOutputStream(self.stream)
if wrapped is not None:
self.stream = wrapped
result = self._makeResult()
start = time.time()
test(result)
stop = time.time()
result.printErrors()
result.printSummary(start, stop)
self.config.plugins.finalize(result)
return result
class TestProgram(unittest.TestProgram):
"""Collect and run tests, returning success or failure.
The arguments to TestProgram() are the same as to
:func:`main()` and :func:`run()`:
* module: All tests are in this module (default: None)
* defaultTest: Tests to load (default: '.')
* argv: Command line arguments (default: None; sys.argv is read)
* testRunner: Test runner instance (default: None)
* testLoader: Test loader instance (default: None)
* env: Environment; ignored if config is provided (default: None;
os.environ is read)
* config: :class:`nose.config.Config` instance (default: None)
* suite: Suite or list of tests to run (default: None). Passing a
suite or lists of tests will bypass all test discovery and
loading. *ALSO NOTE* that if you pass a unittest.TestSuite
instance as the suite, context fixtures at the class, module and
package level will not be used, and many plugin hooks will not
be called. If you want normal nose behavior, either pass a list
of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
* exit: Exit after running tests and printing report (default: True)
* plugins: List of plugins to use; ignored if config is provided
(default: load plugins with DefaultPluginManager)
* addplugins: List of **extra** plugins to use. Pass a list of plugin
instances in this argument to make custom plugins available while
still using the DefaultPluginManager.
"""
verbosity = 1
def __init__(self, module=None, defaultTest='.', argv=None,
testRunner=None, testLoader=None, env=None, config=None,
suite=None, exit=True, plugins=None, addplugins=None):
if env is None:
env = os.environ
if config is None:
config = self.makeConfig(env, plugins)
if addplugins:
config.plugins.addPlugins(extraplugins=addplugins)
self.config = config
self.suite = suite
self.exit = exit
extra_args = {}
version = sys.version_info[0:2]
if version >= (2,7) and version != (3,0):
extra_args['exit'] = exit
unittest.TestProgram.__init__(
self, module=module, defaultTest=defaultTest,
argv=argv, testRunner=testRunner, testLoader=testLoader,
**extra_args)
def makeConfig(self, env, plugins=None):
"""Load a Config, pre-filled with user config files if any are
found.
"""
cfg_files = all_config_files()
if plugins:
manager = PluginManager(plugins=plugins)
else:
manager = DefaultPluginManager()
return Config(
env=env, files=cfg_files, plugins=manager)
def parseArgs(self, argv):
"""Parse argv and env and configure running environment.
"""
self.config.configure(argv, doc=self.usage())
log.debug("configured %s", self.config)
# quick outs: version, plugins (optparse would have already
# caught and exited on help)
if self.config.options.version:
from nose import __version__
sys.stdout = sys.__stdout__
print "%s version %s" % (os.path.basename(sys.argv[0]), __version__)
sys.exit(0)
if self.config.options.showPlugins:
self.showPlugins()
sys.exit(0)
if self.testLoader is None:
self.testLoader = defaultTestLoader(config=self.config)
elif isclass(self.testLoader):
self.testLoader = self.testLoader(config=self.config)
plug_loader = self.config.plugins.prepareTestLoader(self.testLoader)
if plug_loader is not None:
self.testLoader = plug_loader
log.debug("test loader is %s", self.testLoader)
# FIXME if self.module is a string, add it to self.testNames? not sure
if self.config.testNames:
self.testNames = self.config.testNames
else:
self.testNames = tolist(self.defaultTest)
log.debug('defaultTest %s', self.defaultTest)
log.debug('Test names are %s', self.testNames)
if self.config.workingDir is not None:
os.chdir(self.config.workingDir)
self.createTests()
def createTests(self):
"""Create the tests to run. If a self.suite
is set, then that suite will be used. Otherwise, tests will be
loaded from the given test names (self.testNames) using the
test loader.
"""
log.debug("createTests called with %s", self.suite)
if self.suite is not None:
# We were given an explicit suite to run. Make sure it's
# loaded and wrapped correctly.
self.test = self.testLoader.suiteClass(self.suite)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames)
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and sets
self.success to the same value.
"""
log.debug("runTests called")
if self.testRunner is None:
self.testRunner = TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
result = self.testRunner.run(self.test)
self.success = result.wasSuccessful()
if self.exit:
sys.exit(not self.success)
return self.success
def showPlugins(self):
"""Print list of available plugins.
"""
import textwrap
class DummyParser:
def __init__(self):
self.options = []
def add_option(self, *arg, **kw):
self.options.append((arg, kw.pop('help', '')))
v = self.config.verbosity
self.config.plugins.sort()
for p in self.config.plugins:
print "Plugin %s" % p.name
if v >= 2:
print " score: %s" % p.score
print '\n'.join(textwrap.wrap(p.help().strip(),
initial_indent=' ',
subsequent_indent=' '))
if v >= 3:
parser = DummyParser()
p.addOptions(parser)
if len(parser.options):
print
print " Options:"
for opts, help in parser.options:
print ' %s' % (', '.join(opts))
if help:
print '\n'.join(
textwrap.wrap(help.strip(),
initial_indent=' ',
subsequent_indent=' '))
print
def usage(cls):
import nose
if hasattr(nose, '__loader__'):
ld = nose.__loader__
if hasattr(ld, 'zipfile'):
# nose was imported from a zipfile
return ld.get_data(
os.path.join(ld.prefix, 'nose', 'usage.txt'))
return open(os.path.join(
os.path.dirname(__file__), 'usage.txt'), 'r').read()
usage = classmethod(usage)
# backwards compatibility
run_exit = main = TestProgram
def run(*arg, **kw):
"""Collect and run tests, returning success or failure.
The arguments to `run()` are the same as to `main()`:
* module: All tests are in this module (default: None)
* defaultTest: Tests to load (default: '.')
* argv: Command line arguments (default: None; sys.argv is read)
* testRunner: Test runner instance (default: None)
* testLoader: Test loader instance (default: None)
* env: Environment; ignored if config is provided (default: None;
os.environ is read)
* config: :class:`nose.config.Config` instance (default: None)
* suite: Suite or list of tests to run (default: None). Passing a
suite or lists of tests will bypass all test discovery and
loading. *ALSO NOTE* that if you pass a unittest.TestSuite
instance as the suite, context fixtures at the class, module and
package level will not be used, and many plugin hooks will not
be called. If you want normal nose behavior, either pass a list
of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
* plugins: List of plugins to use; ignored if config is provided
(default: load plugins with DefaultPluginManager)
* addplugins: List of **extra** plugins to use. Pass a list of plugin
instances in this argument to make custom plugins available while
still using the DefaultPluginManager.
With the exception that the ``exit`` argument is always set
to False.
"""
kw['exit'] = False
return TestProgram(*arg, **kw).success
def runmodule(name='__main__', **kw):
"""Collect and run tests in a single module only. Defaults to running
tests in __main__. Additional arguments to TestProgram may be passed
as keyword arguments.
"""
main(defaultTest=name, **kw)
def collector():
"""TestSuite replacement entry point. Use anywhere you might use a
unittest.TestSuite. The collector will, by default, load options from
all config files and execute loader.loadTestsFromNames() on the
configured testNames, or '.' if no testNames are configured.
"""
# plugins that implement any of these methods are disabled, since
# we don't control the test runner and won't be able to run them
# finalize() is also not called, but plugins that use it aren't disabled,
# because capture needs it.
setuptools_incompat = ('report', 'prepareTest',
'prepareTestLoader', 'prepareTestRunner',
'setOutputStream')
plugins = RestrictedPluginManager(exclude=setuptools_incompat)
conf = Config(files=all_config_files(),
plugins=plugins)
conf.configure(argv=['collector'])
loader = defaultTestLoader(conf)
if conf.testNames:
suite = loader.loadTestsFromNames(conf.testNames)
else:
suite = loader.loadTestsFromNames(('.',))
return FinalizingSuiteWrapper(suite, plugins.finalize)
if __name__ == '__main__':
main()
|
|
__author__ = 'oier'
import os
import numpy as np
from data.parameters import true_params
from data.parameters import false_params
import distance as dist
import numpy as np
def pdftotext(path):
os.system("pdftotext {data}".format(data=path))
return(path.replace(".pdf",".txt"))
import pandas as pd
def parse(path):
txt = pd.read_table(path, sep='\n', na_values=False, header=None)
for i in txt.index:
try :
if pd.isnull(float(txt.ix[i])) == False:
name = getname(i,txt)
print(name)
print(float(txt.ix[i]))
except :
pass
def getname(index, df):
name = ""
for i in range(0,index):
size = len(df.ix[i].to_string().split())
idxname = " ".join(df.ix[i].to_string().split()[1:size])
if (len( idxname )> 5) and idxname != None and idxname != "NaN":
name = idxname
#print(name)
return (name)
from collections import deque
def getnamedict(path):
dict = {}
numdict = {}
names = deque()
txt = pd.read_table(path, sep='\n', na_values=False, header=None)
name = ""
for i in txt.index:
try :
size = len(txt.ix[i].to_string().split())
nextname = " ".join(txt.ix[i].to_string().split()[1:size])
if (len( nextname )> 5) and \
nextname != None and \
nextname != "NaN" and \
isclean(nextname) and \
validateparam(nextname):
names.append(nextname)
dict[i] = nextname
#print(name)
#print(nextname)
if pd.isnull(float(txt.ix[i])) == False:
number = float(txt.ix[i])
numdict[names.pop()] = number
#print(number)
#print(i)
except :
pass
print(dict.keys())
print(dict.values())
print(numdict.keys())
print(numdict.values())
#organize(dict,numdict)
# print(dict[i])
def organize(names, numbers):
'''
:param names: must be dictionary
:param numbers: must be dictionary
:return: dictionary, dict[name] = number
'''
numbs = dict(numbers)
nams = dict(names)
conn1 = {}
conn2 = {}
array1 = np.array(nams.keys())
for i in numbs.keys():
actual = 100.0
inconn2 = False
key = min(nams.keys(), key=lambda k: abs(k - i))
print(" {} - {} ".format(key,i))
print(" {} - {} ".format(nams[key],numbs[i]))
'''
for j in numbs.keys():
actual = i - j
if ( actual > conn1[i] or conn1[i] == None):
if( conn2[j] == None):
conn1[i] = j
conn2[j] = actual
else:
best = j
inconn2 = True
else if (conn2[j] != None ):
'''
return()
def isclean(word):
w = str(word)
test = True
strg = "_[]*"
bool = True
for i in range(len(strg)):
c = strg[i]
bool = bool or (w.find(c) != -1)
test = test and (bool)
return(test)
def validateparam(word):
t_dist = []
f_dist = []
for i in true_params:
t_dist.append(dist.levenshtein(word,i))
for i in false_params:
f_dist.append(dist.levenshtein(word, i))
print("Word: {}, T: {} , F: {}".format(word, np.min(t_dist), np.min(f_dist[0])))
if( min(t_dist) == 0):
print("TRUE")
return (True)
if (min(f_dist) == 0):
print("FALSE")
return("FALSE")
if ( np.mean(t_dist )< np.mean(f_dist) ):
print("TRUE")
return(True)
print("FALSE")
return(False)
def getmyarray(path, apath):
dict = {}
appearances = {}
names = deque()
with open(path) as f:
txt = f.readlines()
#txt = pd.read_table(path, sep='\n', na_values=False, header=None)
array_txt = pd.read_table(apath, sep='\n', header=None)
name = ""
for i in txt:
actual = i.replace("\n", '')
if(len(actual.strip()) == 0):
continue
try :
number = float(actual)
if (number > 10000000):
continue
try:
appearances[actual] += 1
except:
appearances[actual] = 1
name = localgetmyarray(path, apath, actual, appearances[i])
dict[name] = i
print("name: {} numb: {}".format(name, i))
except :
pass
print(dict.keys())
print(dict.values())
def localgetmyarray(path, apath, word, count):
with open(path) as f:
txt = f.readlines()
#txt = pd.read_table(path, sep='\n', na_values=False, header=None)
f = open(apath)
array_txt_str = f.read()
name = ""
idx = [k.start() for k in re.finditer(word, array_txt_str)][count -1]
opt = len(array_txt_str)
apps ={}
for i in txt:
try :
nextname = i.replace("\n", '')
try :
float(nextname)
except :
if (len( nextname )> 5) and nextname != None and \
nextname != "NaN" and isclean(nextname):
try:
apps[nextname ] += 1
except:
apps[nextname] = 1
id = [k for k in re.finditer(nextname, array_txt_str)][apps[nextname]-1].start()
myopt = idx - id
if (myopt > 0) and (myopt < opt):
opt = myopt
name = nextname
except :
pass
print("optimum: {} number: {} found: {}".format(opt, word, name))
f.close()
return name
#DOWN FROM HERE JAVA+PYTHON PDF TO TEXT:
import re
import extractText as txt
def remove_unwanted(str):
s = re.sub(r'\[.*?\]', '',str)
s = s.replace("\*", "")
s = s.replace("\n", "")
return (s)
def line_control(str):
#may return True if str is not valid
#returns false if str is valid
if(len(str) < 15):
return True
if(len(str) == 1):
return True
if(len(str.split(" ")) > 10):
return True
return False
def line_parser(str):
item = ''
valor = ''
dict = {}
sline = str.split(" ")
helper = {}
pos = 0
for schar in sline:
try:
#dict["val"]
if(len(dict.keys()) == 3 and len(sline) > 6):
helper[pos] = dict
dict = {}
pos += 1
dict["val"] #to force failure/raise ofd exception
except:
try:
valor = ''
table = [char for char in schar if '/' in char]
if schar.find('%') != -1:
valor = schar
if len(table) > 0:
valor = schar
if(valor != ''):
dict["val"] = valor
continue
except:
pass
try:
#dict["num"]
if(len(dict.keys()) == 3 and len(sline) > 6):
helper[pos] = dict
dict = {}
pos += 1
dict["num"]
except:
try:
num = float(schar)
if(num > 10000):
return({})
dict["num"] = num
continue
except:
pass
try:
dict["item"] += " " + schar
except:
dict["item"] = schar
helper[pos] = dict
return(helper)
def getfromjava(path, dest=''):
if (dest == ''):
d = path.replace(".pdf", ".txt")
txt.extractText(path, d, '')
with open(d) as f:
text = f.readlines()
for line in text:
sline = remove_unwanted(line)
if(line_control(sline) == True):
continue
dict = line_parser(sline)
for i in dict.keys():
if(len(dict[i].keys()) == 3):
print("ITEM: {} NUM: {} VAL: {}".format(dict[i]["item"], dict[i]["num"], dict[i]["val"]))
|
|
# Copyright 2010 http://www.collabq.com
#!/usr/bin/python
#
# Perforce Defect Tracking Integration Project
# <http://www.ravenbrook.com/project/p4dti/>
#
# COVERAGE.PY -- COVERAGE TESTING
#
# Gareth Rees, Ravenbrook Limited, 2001-12-04
# Ned Batchelder, 2004-12-12
# http://nedbatchelder.com/code/modules/coverage.html
#
#
# 1. INTRODUCTION
#
# This module provides coverage testing for Python code.
#
# The intended readership is all Python developers.
#
# This document is not confidential.
#
# See [GDR 2001-12-04a] for the command-line interface, programmatic
# interface and limitations. See [GDR 2001-12-04b] for requirements and
# design.
r"""Usage:
coverage.py -x [-p] MODULE.py [ARG1 ARG2 ...]
Execute module, passing the given command-line arguments, collecting
coverage data. With the -p option, write to a temporary file containing
the machine name and process ID.
coverage.py -e
Erase collected coverage data.
coverage.py -c
Collect data from multiple coverage files (as created by -p option above)
and store it into a single file representing the union of the coverage.
coverage.py -r [-m] [-o dir1,dir2,...] FILE1 FILE2 ...
Report on the statement coverage for the given files. With the -m
option, show line numbers of the statements that weren't executed.
coverage.py -a [-d dir] [-o dir1,dir2,...] FILE1 FILE2 ...
Make annotated copies of the given files, marking statements that
are executed with > and statements that are missed with !. With
the -d option, make the copies in that directory. Without the -d
option, make each copy in the same directory as the original.
-o dir,dir2,...
Omit reporting or annotating files when their filename path starts with
a directory listed in the omit list.
e.g. python coverage.py -i -r -o c:\python23,lib\enthought\traits
Coverage data is saved in the file .coverage by default. Set the
COVERAGE_FILE environment variable to save it somewhere else."""
__version__ = "2.85.20080914" # see detailed history at the end of this file.
import compiler
import compiler.visitor
import glob
import os
import re
import string
import symbol
import sys
import threading
import token
import types
import zipimport
from socket import gethostname
# Python version compatibility
try:
strclass = basestring # new to 2.3
except:
strclass = str
# 2. IMPLEMENTATION
#
# This uses the "singleton" pattern.
#
# The word "morf" means a module object (from which the source file can
# be deduced by suitable manipulation of the __file__ attribute) or a
# filename.
#
# When we generate a coverage report we have to canonicalize every
# filename in the coverage dictionary just in case it refers to the
# module we are reporting on. It seems a shame to throw away this
# information so the data in the coverage dictionary is transferred to
# the 'cexecuted' dictionary under the canonical filenames.
#
# The coverage dictionary is called "c" and the trace function "t". The
# reason for these short names is that Python looks up variables by name
# at runtime and so execution time depends on the length of variables!
# In the bottleneck of this application it's appropriate to abbreviate
# names to increase speed.
class StatementFindingAstVisitor(compiler.visitor.ASTVisitor):
""" A visitor for a parsed Abstract Syntax Tree which finds executable
statements.
"""
def __init__(self, statements, excluded, suite_spots):
compiler.visitor.ASTVisitor.__init__(self)
self.statements = statements
self.excluded = excluded
self.suite_spots = suite_spots
self.excluding_suite = 0
def doRecursive(self, node):
for n in node.getChildNodes():
self.dispatch(n)
visitStmt = visitModule = doRecursive
def doCode(self, node):
if hasattr(node, 'decorators') and node.decorators:
self.dispatch(node.decorators)
self.recordAndDispatch(node.code)
else:
self.doSuite(node, node.code)
visitFunction = visitClass = doCode
def getFirstLine(self, node):
# Find the first line in the tree node.
lineno = node.lineno
for n in node.getChildNodes():
f = self.getFirstLine(n)
if lineno and f:
lineno = min(lineno, f)
else:
lineno = lineno or f
return lineno
def getLastLine(self, node):
# Find the first line in the tree node.
lineno = node.lineno
for n in node.getChildNodes():
lineno = max(lineno, self.getLastLine(n))
return lineno
def doStatement(self, node):
self.recordLine(self.getFirstLine(node))
visitAssert = visitAssign = visitAssTuple = visitPrint = \
visitPrintnl = visitRaise = visitSubscript = visitDecorators = \
doStatement
def visitPass(self, node):
# Pass statements have weird interactions with docstrings. If this
# pass statement is part of one of those pairs, claim that the statement
# is on the later of the two lines.
l = node.lineno
if l:
lines = self.suite_spots.get(l, [l,l])
self.statements[lines[1]] = 1
def visitDiscard(self, node):
# Discard nodes are statements that execute an expression, but then
# discard the results. This includes function calls, so we can't
# ignore them all. But if the expression is a constant, the statement
# won't be "executed", so don't count it now.
if node.expr.__class__.__name__ != 'Const':
self.doStatement(node)
def recordNodeLine(self, node):
# Stmt nodes often have None, but shouldn't claim the first line of
# their children (because the first child might be an ignorable line
# like "global a").
if node.__class__.__name__ != 'Stmt':
return self.recordLine(self.getFirstLine(node))
else:
return 0
def recordLine(self, lineno):
# Returns a bool, whether the line is included or excluded.
if lineno:
# Multi-line tests introducing suites have to get charged to their
# keyword.
if lineno in self.suite_spots:
lineno = self.suite_spots[lineno][0]
# If we're inside an excluded suite, record that this line was
# excluded.
if self.excluding_suite:
self.excluded[lineno] = 1
return 0
# If this line is excluded, or suite_spots maps this line to
# another line that is exlcuded, then we're excluded.
elif self.excluded.has_key(lineno) or \
self.suite_spots.has_key(lineno) and \
self.excluded.has_key(self.suite_spots[lineno][1]):
return 0
# Otherwise, this is an executable line.
else:
self.statements[lineno] = 1
return 1
return 0
default = recordNodeLine
def recordAndDispatch(self, node):
self.recordNodeLine(node)
self.dispatch(node)
def doSuite(self, intro, body, exclude=0):
exsuite = self.excluding_suite
if exclude or (intro and not self.recordNodeLine(intro)):
self.excluding_suite = 1
self.recordAndDispatch(body)
self.excluding_suite = exsuite
def doPlainWordSuite(self, prevsuite, suite):
# Finding the exclude lines for else's is tricky, because they aren't
# present in the compiler parse tree. Look at the previous suite,
# and find its last line. If any line between there and the else's
# first line are excluded, then we exclude the else.
lastprev = self.getLastLine(prevsuite)
firstelse = self.getFirstLine(suite)
for l in range(lastprev+1, firstelse):
if self.suite_spots.has_key(l):
self.doSuite(None, suite, exclude=self.excluded.has_key(l))
break
else:
self.doSuite(None, suite)
def doElse(self, prevsuite, node):
if node.else_:
self.doPlainWordSuite(prevsuite, node.else_)
def visitFor(self, node):
self.doSuite(node, node.body)
self.doElse(node.body, node)
visitWhile = visitFor
def visitIf(self, node):
# The first test has to be handled separately from the rest.
# The first test is credited to the line with the "if", but the others
# are credited to the line with the test for the elif.
self.doSuite(node, node.tests[0][1])
for t, n in node.tests[1:]:
self.doSuite(t, n)
self.doElse(node.tests[-1][1], node)
def visitTryExcept(self, node):
self.doSuite(node, node.body)
for i in range(len(node.handlers)):
a, b, h = node.handlers[i]
if not a:
# It's a plain "except:". Find the previous suite.
if i > 0:
prev = node.handlers[i-1][2]
else:
prev = node.body
self.doPlainWordSuite(prev, h)
else:
self.doSuite(a, h)
self.doElse(node.handlers[-1][2], node)
def visitTryFinally(self, node):
self.doSuite(node, node.body)
self.doPlainWordSuite(node.body, node.final)
def visitWith(self, node):
self.doSuite(node, node.body)
def visitGlobal(self, node):
# "global" statements don't execute like others (they don't call the
# trace function), so don't record their line numbers.
pass
the_coverage = None
class CoverageException(Exception):
pass
class coverage:
# Name of the cache file (unless environment variable is set).
cache_default = ".coverage"
# Environment variable naming the cache file.
cache_env = "COVERAGE_FILE"
# A dictionary with an entry for (Python source file name, line number
# in that file) if that line has been executed.
c = {}
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed.
cexecuted = {}
# Cache of results of calling the analysis2() method, so that you can
# specify both -r and -a without doing double work.
analysis_cache = {}
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
canonical_filename_cache = {}
def __init__(self):
global the_coverage
if the_coverage:
raise CoverageException("Only one coverage object allowed.")
self.usecache = 1
self.cache = None
self.parallel_mode = False
self.exclude_re = ''
self.nesting = 0
self.cstack = []
self.xstack = []
self.relative_dir = self.abs_file(os.curdir)+os.sep
self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')
# t(f, x, y). This method is passed to sys.settrace as a trace function.
# See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and
# the arguments and return value of the trace function.
# See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
# objects.
def t(self, f, w, unused): #pragma: no cover
if w == 'line':
self.c[(f.f_code.co_filename, f.f_lineno)] = 1
#-for c in self.cstack:
#- c[(f.f_code.co_filename, f.f_lineno)] = 1
return self.t
def help(self, error=None): #pragma: no cover
if error:
print error
print
print __doc__
sys.exit(1)
def command_line(self, argv, help_fn=None):
import getopt
help_fn = help_fn or self.help
settings = {}
optmap = {
'-a': 'annotate',
'-c': 'collect',
'-d:': 'directory=',
'-e': 'erase',
'-h': 'help',
'-i': 'ignore-errors',
'-m': 'show-missing',
'-p': 'parallel-mode',
'-r': 'report',
'-x': 'execute',
'-o:': 'omit=',
}
short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
long_opts = optmap.values()
options, args = getopt.getopt(argv, short_opts, long_opts)
for o, a in options:
if optmap.has_key(o):
settings[optmap[o]] = 1
elif optmap.has_key(o + ':'):
settings[optmap[o + ':']] = a
elif o[2:] in long_opts:
settings[o[2:]] = 1
elif o[2:] + '=' in long_opts:
settings[o[2:]+'='] = a
else: #pragma: no cover
pass # Can't get here, because getopt won't return anything unknown.
if settings.get('help'):
help_fn()
for i in ['erase', 'execute']:
for j in ['annotate', 'report', 'collect']:
if settings.get(i) and settings.get(j):
help_fn("You can't specify the '%s' and '%s' "
"options at the same time." % (i, j))
args_needed = (settings.get('execute')
or settings.get('annotate')
or settings.get('report'))
action = (settings.get('erase')
or settings.get('collect')
or args_needed)
if not action:
help_fn("You must specify at least one of -e, -x, -c, -r, or -a.")
if not args_needed and args:
help_fn("Unexpected arguments: %s" % " ".join(args))
self.parallel_mode = settings.get('parallel-mode')
self.get_ready()
if settings.get('erase'):
self.erase()
if settings.get('execute'):
if not args:
help_fn("Nothing to do.")
sys.argv = args
self.start()
import __main__
sys.path[0] = os.path.dirname(sys.argv[0])
execfile(sys.argv[0], __main__.__dict__)
if settings.get('collect'):
self.collect()
if not args:
args = self.cexecuted.keys()
ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing')
directory = settings.get('directory=')
omit = settings.get('omit=')
if omit is not None:
omit = [self.abs_file(p) for p in omit.split(',')]
else:
omit = []
if settings.get('report'):
self.report(args, show_missing, ignore_errors, omit_prefixes=omit)
if settings.get('annotate'):
self.annotate(args, directory, ignore_errors, omit_prefixes=omit)
def use_cache(self, usecache, cache_file=None):
self.usecache = usecache
if cache_file and not self.cache:
self.cache_default = cache_file
def get_ready(self, parallel_mode=False):
if self.usecache and not self.cache:
self.cache = os.environ.get(self.cache_env, self.cache_default)
if self.parallel_mode:
self.cache += "." + gethostname() + "." + str(os.getpid())
self.restore()
self.analysis_cache = {}
def start(self, parallel_mode=False):
self.get_ready()
if self.nesting == 0: #pragma: no cover
sys.settrace(self.t)
if hasattr(threading, 'settrace'):
threading.settrace(self.t)
self.nesting += 1
def stop(self):
self.nesting -= 1
if self.nesting == 0: #pragma: no cover
sys.settrace(None)
if hasattr(threading, 'settrace'):
threading.settrace(None)
def erase(self):
self.get_ready()
self.c = {}
self.analysis_cache = {}
self.cexecuted = {}
if self.cache and os.path.exists(self.cache):
os.remove(self.cache)
def exclude(self, re):
if self.exclude_re:
self.exclude_re += "|"
self.exclude_re += "(" + re + ")"
def begin_recursive(self):
self.cstack.append(self.c)
self.xstack.append(self.exclude_re)
def end_recursive(self):
self.c = self.cstack.pop()
self.exclude_re = self.xstack.pop()
# save(). Save coverage data to the coverage cache.
def save(self):
if self.usecache and self.cache:
self.canonicalize_filenames()
cache = open(self.cache, 'wb')
import marshal
marshal.dump(self.cexecuted, cache)
cache.close()
# restore(). Restore coverage data from the coverage cache (if it exists).
def restore(self):
self.c = {}
self.cexecuted = {}
assert self.usecache
if os.path.exists(self.cache):
self.cexecuted = self.restore_file(self.cache)
def restore_file(self, file_name):
try:
cache = open(file_name, 'rb')
import marshal
cexecuted = marshal.load(cache)
cache.close()
if isinstance(cexecuted, types.DictType):
return cexecuted
else:
return {}
except:
return {}
# collect(). Collect data in multiple files produced by parallel mode
def collect(self):
cache_dir, local = os.path.split(self.cache)
for f in os.listdir(cache_dir or '.'):
if not f.startswith(local):
continue
full_path = os.path.join(cache_dir, f)
cexecuted = self.restore_file(full_path)
self.merge_data(cexecuted)
def merge_data(self, new_data):
for file_name, file_data in new_data.items():
if self.cexecuted.has_key(file_name):
self.merge_file_data(self.cexecuted[file_name], file_data)
else:
self.cexecuted[file_name] = file_data
def merge_file_data(self, cache_data, new_data):
for line_number in new_data.keys():
if not cache_data.has_key(line_number):
cache_data[line_number] = new_data[line_number]
def abs_file(self, filename):
""" Helper function to turn a filename into an absolute normalized
filename.
"""
return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
def get_zip_data(self, filename):
""" Get data from `filename` if it is a zip file path, or return None
if it is not.
"""
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return data
return None
# canonical_filename(filename). Return a canonical filename for the
# file (that is, an absolute path with no redundant components and
# normalized case). See [GDR 2001-12-04b, 3.3].
def canonical_filename(self, filename):
if not self.canonical_filename_cache.has_key(filename):
f = filename
if os.path.isabs(f) and not os.path.exists(f):
if not self.get_zip_data(f):
f = os.path.basename(f)
if not os.path.isabs(f):
for path in [os.curdir] + sys.path:
g = os.path.join(path, f)
if os.path.exists(g):
f = g
break
cf = self.abs_file(f)
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
# canonicalize_filenames(). Copy results from "c" to "cexecuted",
# canonicalizing filenames on the way. Clear the "c" map.
def canonicalize_filenames(self):
for filename, lineno in self.c.keys():
if filename == '<string>':
# Can't do anything useful with exec'd strings, so skip them.
continue
f = self.canonical_filename(filename)
if not self.cexecuted.has_key(f):
self.cexecuted[f] = {}
self.cexecuted[f][lineno] = 1
self.c = {}
# morf_filename(morf). Return the filename for a module or file.
def morf_filename(self, morf):
if hasattr(morf, '__file__'):
f = morf.__file__
else:
f = morf
return self.canonical_filename(f)
# analyze_morf(morf). Analyze the module or filename passed as
# the argument. If the source code can't be found, raise an error.
# Otherwise, return a tuple of (1) the canonical filename of the
# source code for the module, (2) a list of lines of statements
# in the source code, (3) a list of lines of excluded statements,
# and (4), a map of line numbers to multi-line line number ranges, for
# statements that cross lines.
def analyze_morf(self, morf):
if self.analysis_cache.has_key(morf):
return self.analysis_cache[morf]
filename = self.morf_filename(morf)
ext = os.path.splitext(filename)[1]
source, sourcef = None, None
if ext == '.pyc':
if not os.path.exists(filename[:-1]):
source = self.get_zip_data(filename[:-1])
if not source:
raise CoverageException(
"No source for compiled code '%s'." % filename
)
filename = filename[:-1]
if not source:
sourcef = open(filename, 'rU')
source = sourcef.read()
try:
lines, excluded_lines, line_map = self.find_executable_statements(
source, exclude=self.exclude_re
)
except SyntaxError, synerr:
raise CoverageException(
"Couldn't parse '%s' as Python source: '%s' at line %d" %
(filename, synerr.msg, synerr.lineno)
)
if sourcef:
sourcef.close()
result = filename, lines, excluded_lines, line_map
self.analysis_cache[morf] = result
return result
def first_line_of_tree(self, tree):
while True:
if len(tree) == 3 and type(tree[2]) == type(1):
return tree[2]
tree = tree[1]
def last_line_of_tree(self, tree):
while True:
if len(tree) == 3 and type(tree[2]) == type(1):
return tree[2]
tree = tree[-1]
def find_docstring_pass_pair(self, tree, spots):
for i in range(1, len(tree)):
if self.is_string_constant(tree[i]) and self.is_pass_stmt(tree[i+1]):
first_line = self.first_line_of_tree(tree[i])
last_line = self.last_line_of_tree(tree[i+1])
self.record_multiline(spots, first_line, last_line)
def is_string_constant(self, tree):
try:
return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.expr_stmt
except:
return False
def is_pass_stmt(self, tree):
try:
return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.pass_stmt
except:
return False
def record_multiline(self, spots, i, j):
for l in range(i, j+1):
spots[l] = (i, j)
def get_suite_spots(self, tree, spots):
""" Analyze a parse tree to find suite introducers which span a number
of lines.
"""
for i in range(1, len(tree)):
if type(tree[i]) == type(()):
if tree[i][0] == symbol.suite:
# Found a suite, look back for the colon and keyword.
lineno_colon = lineno_word = None
for j in range(i-1, 0, -1):
if tree[j][0] == token.COLON:
# Colons are never executed themselves: we want the
# line number of the last token before the colon.
lineno_colon = self.last_line_of_tree(tree[j-1])
elif tree[j][0] == token.NAME:
if tree[j][1] == 'elif':
# Find the line number of the first non-terminal
# after the keyword.
t = tree[j+1]
while t and token.ISNONTERMINAL(t[0]):
t = t[1]
if t:
lineno_word = t[2]
else:
lineno_word = tree[j][2]
break
elif tree[j][0] == symbol.except_clause:
# "except" clauses look like:
# ('except_clause', ('NAME', 'except', lineno), ...)
if tree[j][1][0] == token.NAME:
lineno_word = tree[j][1][2]
break
if lineno_colon and lineno_word:
# Found colon and keyword, mark all the lines
# between the two with the two line numbers.
self.record_multiline(spots, lineno_word, lineno_colon)
# "pass" statements are tricky: different versions of Python
# treat them differently, especially in the common case of a
# function with a doc string and a single pass statement.
self.find_docstring_pass_pair(tree[i], spots)
elif tree[i][0] == symbol.simple_stmt:
first_line = self.first_line_of_tree(tree[i])
last_line = self.last_line_of_tree(tree[i])
if first_line != last_line:
self.record_multiline(spots, first_line, last_line)
self.get_suite_spots(tree[i], spots)
def find_executable_statements(self, text, exclude=None):
# Find lines which match an exclusion pattern.
excluded = {}
suite_spots = {}
if exclude:
reExclude = re.compile(exclude)
lines = text.split('\n')
for i in range(len(lines)):
if reExclude.search(lines[i]):
excluded[i+1] = 1
# Parse the code and analyze the parse tree to find out which statements
# are multiline, and where suites begin and end.
import parser
tree = parser.suite(text+'\n\n').totuple(1)
self.get_suite_spots(tree, suite_spots)
#print "Suite spots:", suite_spots
# Use the compiler module to parse the text and find the executable
# statements. We add newlines to be impervious to final partial lines.
statements = {}
ast = compiler.parse(text+'\n\n')
visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)
compiler.walk(ast, visitor, walker=visitor)
lines = statements.keys()
lines.sort()
excluded_lines = excluded.keys()
excluded_lines.sort()
return lines, excluded_lines, suite_spots
# format_lines(statements, lines). Format a list of line numbers
# for printing by coalescing groups of lines as long as the lines
# represent consecutive statements. This will coalesce even if
# there are gaps between statements, so if statements =
# [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then
# format_lines will return "1-2, 5-11, 13-14".
def format_lines(self, statements, lines):
pairs = []
i = 0
j = 0
start = None
pairs = []
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j = j + 1
elif start:
pairs.append((start, end))
start = None
i = i + 1
if start:
pairs.append((start, end))
def stringify(pair):
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
ret = string.join(map(stringify, pairs), ", ")
return ret
# Backward compatibility with version 1.
def analysis(self, morf):
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
filename, statements, excluded, line_map = self.analyze_morf(morf)
self.canonicalize_filenames()
if not self.cexecuted.has_key(filename):
self.cexecuted[filename] = {}
missing = []
for line in statements:
lines = line_map.get(line, [line, line])
for l in range(lines[0], lines[1]+1):
if self.cexecuted[filename].has_key(l):
break
else:
missing.append(line)
return (filename, statements, excluded, missing,
self.format_lines(statements, missing))
def relative_filename(self, filename):
""" Convert filename to relative filename from self.relative_dir.
"""
return filename.replace(self.relative_dir, "")
def morf_name(self, morf):
""" Return the name of morf as used in report.
"""
if hasattr(morf, '__name__'):
return morf.__name__
else:
return self.relative_filename(os.path.splitext(morf)[0])
def filter_by_prefix(self, morfs, omit_prefixes):
""" Return list of morfs where the morf name does not begin
with any one of the omit_prefixes.
"""
filtered_morfs = []
for morf in morfs:
for prefix in omit_prefixes:
if self.morf_name(morf).startswith(prefix):
break
else:
filtered_morfs.append(morf)
return filtered_morfs
def morf_name_compare(self, x, y):
return cmp(self.morf_name(x), self.morf_name(y))
def report(self, morfs, show_missing=1, ignore_errors=0, file=None, omit_prefixes=[]):
if not isinstance(morfs, types.ListType):
morfs = [morfs]
# On windows, the shell doesn't expand wildcards. Do it here.
globbed = []
for morf in morfs:
if isinstance(morf, strclass):
globbed.extend(glob.glob(morf))
else:
globbed.append(morf)
morfs = globbed
morfs = self.filter_by_prefix(morfs, omit_prefixes)
morfs.sort(self.morf_name_compare)
max_name = max([5,] + map(len, map(self.morf_name, morfs)))
fmt_name = "%%- %ds " % max_name
fmt_err = fmt_name + "%s: %s"
header = fmt_name % "Name" + " Stmts Exec Cover"
fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"
if show_missing:
header = header + " Missing"
fmt_coverage = fmt_coverage + " %s"
if not file:
file = sys.stdout
print >>file, header
print >>file, "-" * len(header)
total_statements = 0
total_executed = 0
for morf in morfs:
name = self.morf_name(morf)
try:
_, statements, _, missing, readable = self.analysis2(morf)
n = len(statements)
m = n - len(missing)
if n > 0:
pc = 100.0 * m / n
else:
pc = 100.0
args = (name, n, m, pc)
if show_missing:
args = args + (readable,)
print >>file, fmt_coverage % args
total_statements = total_statements + n
total_executed = total_executed + m
except KeyboardInterrupt: #pragma: no cover
raise
except:
if not ignore_errors:
typ, msg = sys.exc_info()[:2]
print >>file, fmt_err % (name, typ, msg)
if len(morfs) > 1:
print >>file, "-" * len(header)
if total_statements > 0:
pc = 100.0 * total_executed / total_statements
else:
pc = 100.0
args = ("TOTAL", total_statements, total_executed, pc)
if show_missing:
args = args + ("",)
print >>file, fmt_coverage % args
# annotate(morfs, ignore_errors).
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
def annotate(self, morfs, directory=None, ignore_errors=0, omit_prefixes=[]):
morfs = self.filter_by_prefix(morfs, omit_prefixes)
for morf in morfs:
try:
filename, statements, excluded, missing, _ = self.analysis2(morf)
self.annotate_file(filename, statements, excluded, missing, directory)
except KeyboardInterrupt:
raise
except:
if not ignore_errors:
raise
def annotate_file(self, filename, statements, excluded, missing, directory=None):
source = open(filename, 'r')
if directory:
dest_file = os.path.join(directory,
os.path.basename(filename)
+ ',cover')
else:
dest_file = filename + ',cover'
dest = open(dest_file, 'w')
lineno = 0
i = 0
j = 0
covered = 1
while 1:
line = source.readline()
if line == '':
break
lineno = lineno + 1
while i < len(statements) and statements[i] < lineno:
i = i + 1
while j < len(missing) and missing[j] < lineno:
j = j + 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
# Special logic for lines containing only 'else:'.
# See [GDR 2001-12-04b, 3.2].
if i >= len(statements) and j >= len(missing):
dest.write('! ')
elif i >= len(statements) or j >= len(missing):
dest.write('> ')
elif statements[i] == missing[j]:
dest.write('! ')
else:
dest.write('> ')
elif lineno in excluded:
dest.write('- ')
elif covered:
dest.write('> ')
else:
dest.write('! ')
dest.write(line)
source.close()
dest.close()
# Singleton object.
the_coverage = coverage()
# Module functions call methods in the singleton object.
def use_cache(*args, **kw):
return the_coverage.use_cache(*args, **kw)
def start(*args, **kw):
return the_coverage.start(*args, **kw)
def stop(*args, **kw):
return the_coverage.stop(*args, **kw)
def erase(*args, **kw):
return the_coverage.erase(*args, **kw)
def begin_recursive(*args, **kw):
return the_coverage.begin_recursive(*args, **kw)
def end_recursive(*args, **kw):
return the_coverage.end_recursive(*args, **kw)
def exclude(*args, **kw):
return the_coverage.exclude(*args, **kw)
def analysis(*args, **kw):
return the_coverage.analysis(*args, **kw)
def analysis2(*args, **kw):
return the_coverage.analysis2(*args, **kw)
def report(*args, **kw):
return the_coverage.report(*args, **kw)
def annotate(*args, **kw):
return the_coverage.annotate(*args, **kw)
def annotate_file(*args, **kw):
return the_coverage.annotate_file(*args, **kw)
# Save coverage data when Python exits. (The atexit module wasn't
# introduced until Python 2.0, so use sys.exitfunc when it's not
# available.)
try:
import atexit
atexit.register(the_coverage.save)
except ImportError:
sys.exitfunc = the_coverage.save
def main():
the_coverage.command_line(sys.argv[1:])
# Command-line interface.
if __name__ == '__main__':
main()
# A. REFERENCES
#
# [GDR 2001-12-04a] "Statement coverage for Python"; Gareth Rees;
# Ravenbrook Limited; 2001-12-04;
# <http://www.nedbatchelder.com/code/modules/rees-coverage.html>.
#
# [GDR 2001-12-04b] "Statement coverage for Python: design and
# analysis"; Gareth Rees; Ravenbrook Limited; 2001-12-04;
# <http://www.nedbatchelder.com/code/modules/rees-design.html>.
#
# [van Rossum 2001-07-20a] "Python Reference Manual (releae 2.1.1)";
# Guide van Rossum; 2001-07-20;
# <http://www.python.org/doc/2.1.1/ref/ref.html>.
#
# [van Rossum 2001-07-20b] "Python Library Reference"; Guido van Rossum;
# 2001-07-20; <http://www.python.org/doc/2.1.1/lib/lib.html>.
#
#
# B. DOCUMENT HISTORY
#
# 2001-12-04 GDR Created.
#
# 2001-12-06 GDR Added command-line interface and source code
# annotation.
#
# 2001-12-09 GDR Moved design and interface to separate documents.
#
# 2001-12-10 GDR Open cache file as binary on Windows. Allow
# simultaneous -e and -x, or -a and -r.
#
# 2001-12-12 GDR Added command-line help. Cache analysis so that it
# only needs to be done once when you specify -a and -r.
#
# 2001-12-13 GDR Improved speed while recording. Portable between
# Python 1.5.2 and 2.1.1.
#
# 2002-01-03 GDR Module-level functions work correctly.
#
# 2002-01-07 GDR Update sys.path when running a file with the -x option,
# so that it matches the value the program would get if it were run on
# its own.
#
# 2004-12-12 NMB Significant code changes.
# - Finding executable statements has been rewritten so that docstrings and
# other quirks of Python execution aren't mistakenly identified as missing
# lines.
# - Lines can be excluded from consideration, even entire suites of lines.
# - The filesystem cache of covered lines can be disabled programmatically.
# - Modernized the code.
#
# 2004-12-14 NMB Minor tweaks. Return 'analysis' to its original behavior
# and add 'analysis2'. Add a global for 'annotate', and factor it, adding
# 'annotate_file'.
#
# 2004-12-31 NMB Allow for keyword arguments in the module global functions.
# Thanks, Allen.
#
# 2005-12-02 NMB Call threading.settrace so that all threads are measured.
# Thanks Martin Fuzzey. Add a file argument to report so that reports can be
# captured to a different destination.
#
# 2005-12-03 NMB coverage.py can now measure itself.
#
# 2005-12-04 NMB Adapted Greg Rogers' patch for using relative filenames,
# and sorting and omitting files to report on.
#
# 2006-07-23 NMB Applied Joseph Tate's patch for function decorators.
#
# 2006-08-21 NMB Applied Sigve Tjora and Mark van der Wal's fixes for argument
# handling.
#
# 2006-08-22 NMB Applied Geoff Bache's parallel mode patch.
#
# 2006-08-23 NMB Refactorings to improve testability. Fixes to command-line
# logic for parallel mode and collect.
#
# 2006-08-25 NMB "#pragma: nocover" is excluded by default.
#
# 2006-09-10 NMB Properly ignore docstrings and other constant expressions that
# appear in the middle of a function, a problem reported by Tim Leslie.
# Minor changes to avoid lint warnings.
#
# 2006-09-17 NMB coverage.erase() shouldn't clobber the exclude regex.
# Change how parallel mode is invoked, and fix erase() so that it erases the
# cache when called programmatically.
#
# 2007-07-21 NMB In reports, ignore code executed from strings, since we can't
# do anything useful with it anyway.
# Better file handling on Linux, thanks Guillaume Chazarain.
# Better shell support on Windows, thanks Noel O'Boyle.
# Python 2.2 support maintained, thanks Catherine Proulx.
#
# 2007-07-22 NMB Python 2.5 now fully supported. The method of dealing with
# multi-line statements is now less sensitive to the exact line that Python
# reports during execution. Pass statements are handled specially so that their
# disappearance during execution won't throw off the measurement.
#
# 2007-07-23 NMB Now Python 2.5 is *really* fully supported: the body of the
# new with statement is counted as executable.
#
# 2007-07-29 NMB Better packaging.
#
# 2007-09-30 NMB Don't try to predict whether a file is Python source based on
# the extension. Extensionless files are often Pythons scripts. Instead, simply
# parse the file and catch the syntax errors. Hat tip to Ben Finney.
#
# 2008-05-25 NMB Open files in rU mode to avoid line ending craziness.
# Thanks, Edward Loper.
#
# 2008-09-14 NMB Add support for finding source files in eggs.
# Don't check for morf's being instances of ModuleType, instead use duck typing
# so that pseudo-modules can participate. Thanks, Imri Goldberg.
# Use os.realpath as part of the fixing of filenames so that symlinks won't
# confuse things. Thanks, Patrick Mezard.
#
#
# C. COPYRIGHT AND LICENCE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2008 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# $Id: coverage.py 100 2008-10-12 12:08:22Z nedbat $
|
|
"""Test UniFi config flow."""
from unittest.mock import patch
import aiounifi
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.unifi.const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_DPI_RESTRICTIONS,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
)
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
CONTENT_TYPE_JSON,
)
from .test_controller import setup_unifi_integration
from tests.common import MockConfigEntry
CLIENTS = [{"mac": "00:00:00:00:00:01"}]
DEVICES = [
{
"board_rev": 21,
"device_id": "mock-id",
"ip": "10.0.1.1",
"last_seen": 0,
"mac": "00:00:00:00:01:01",
"model": "U7PG2",
"name": "access_point",
"state": 1,
"type": "uap",
"version": "4.0.80.10875",
"wlan_overrides": [
{
"name": "SSID 3",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"name": "",
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
{
"radio": "na",
"radio_name": "wifi1",
"wlan_id": "012345678910111213141516",
},
],
}
]
WLANS = [
{"name": "SSID 1"},
{"name": "SSID 2", "name_combine_enabled": False, "name_combine_suffix": "_IOT"},
]
DPI_GROUPS = [
{
"_id": "5ba29dd8e3c58f026e9d7c4a",
"name": "Default",
"site_id": "5ba29dd4e3c58f026e9d7c38",
},
]
async def test_flow_works(hass, aioclient_mock, mock_discovery):
"""Test config flow."""
mock_discovery.return_value = "1"
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["data_schema"]({CONF_USERNAME: "", CONF_PASSWORD: ""}) == {
CONF_HOST: "unifi",
CONF_USERNAME: "",
CONF_PASSWORD: "",
CONF_PORT: 443,
CONF_VERIFY_SSL: False,
}
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"desc": "Site name", "name": "site_id", "role": "admin", "_id": "1"}
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Site name"
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: True,
CONF_CONTROLLER: {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: True,
},
}
async def test_flow_multiple_sites(hass, aioclient_mock):
"""Test config flow works when finding multiple sites."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"name": "default", "role": "admin", "desc": "site name", "_id": "1"},
{"name": "site2", "role": "admin", "desc": "site2 name", "_id": "2"},
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "site"
assert result["data_schema"]({"site": "1"})
assert result["data_schema"]({"site": "2"})
async def test_flow_raise_already_configured(hass, aioclient_mock):
"""Test config flow aborts since a connected config entry already exists."""
await setup_unifi_integration(hass, aioclient_mock)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.clear_requests()
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"desc": "Site name", "name": "site_id", "role": "admin", "_id": "1"}
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_aborts_configuration_updated(hass, aioclient_mock):
"""Test config flow aborts since a connected config entry already exists."""
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"host": "1.2.3.4", "site": "office"}, unique_id="2"
)
entry.add_to_hass(hass)
entry = MockConfigEntry(
domain=UNIFI_DOMAIN, data={"host": "1.2.3.4", "site": "site_id"}, unique_id="1"
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"desc": "Site name", "name": "site_id", "role": "admin", "_id": "1"}
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
with patch("homeassistant.components.unifi.async_setup_entry"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "configuration_updated"
async def test_flow_fails_user_credentials_faulty(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.Unauthorized):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "faulty_credentials"}
async def test_flow_fails_controller_unavailable(hass, aioclient_mock):
"""Test config flow."""
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
aioclient_mock.get("https://1.2.3.4:1234", status=302)
with patch("aiounifi.Controller.login", side_effect=aiounifi.errors.RequestError):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "service_unavailable"}
async def test_reauth_flow_update_configuration(hass, aioclient_mock):
"""Verify reauth flow can update controller configuration."""
config_entry = await setup_unifi_integration(hass, aioclient_mock)
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.available = False
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": SOURCE_REAUTH},
data=config_entry,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
aioclient_mock.clear_requests()
aioclient_mock.get("https://1.2.3.4:1234", status=302)
aioclient_mock.post(
"https://1.2.3.4:1234/api/login",
json={"data": "login successful", "meta": {"rc": "ok"}},
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"https://1.2.3.4:1234/api/self/sites",
json={
"data": [
{"desc": "Site name", "name": "site_id", "role": "admin", "_id": "1"}
],
"meta": {"rc": "ok"},
},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "new_name",
CONF_PASSWORD: "new_pass",
CONF_PORT: 1234,
CONF_VERIFY_SSL: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert config_entry.data[CONF_HOST] == "1.2.3.4"
assert config_entry.data[CONF_USERNAME] == "new_name"
assert config_entry.data[CONF_PASSWORD] == "new_pass"
async def test_advanced_option_flow(hass, aioclient_mock):
"""Test advanced config flow options."""
config_entry = await setup_unifi_integration(
hass,
aioclient_mock,
clients_response=CLIENTS,
devices_response=DEVICES,
wlans_response=WLANS,
dpigroup_response=DPI_GROUPS,
dpiapp_response=[],
)
result = await hass.config_entries.options.async_init(
config_entry.entry_id, context={"show_advanced_options": True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device_tracker"
assert set(
result["data_schema"].schema[CONF_SSID_FILTER].options.keys()
).intersection(("SSID 1", "SSID 2", "SSID 2_IOT", "SSID 3"))
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "client_control"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_POE_CLIENTS: False,
CONF_DPI_RESTRICTIONS: False,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "statistics_sensors"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_WIRED_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_SSID_FILTER: ["SSID 1", "SSID 2_IOT", "SSID 3"],
CONF_DETECTION_TIME: 100,
CONF_IGNORE_WIRED_BUG: False,
CONF_POE_CLIENTS: False,
CONF_DPI_RESTRICTIONS: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
}
async def test_simple_option_flow(hass, aioclient_mock):
"""Test simple config flow options."""
config_entry = await setup_unifi_integration(
hass,
aioclient_mock,
clients_response=CLIENTS,
wlans_response=WLANS,
dpigroup_response=DPI_GROUPS,
dpiapp_response=[],
)
result = await hass.config_entries.options.async_init(
config_entry.entry_id, context={"show_advanced_options": False}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "simple_options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
CONF_BLOCK_CLIENT: [CLIENTS[0]["mac"]],
}
async def test_form_ssdp(hass):
"""Test we get the form with ssdp source."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {
"host": "192.168.208.1",
"site": "default",
}
async def test_form_ssdp_aborts_if_host_already_exists(hass):
"""Test we abort if the host is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"host": "192.168.208.1", "site": "site_id"},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form_ssdp_aborts_if_serial_already_exists(hass):
"""Test we abort if the serial is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"controller": {"host": "1.2.3.4", "site": "site_id"}},
unique_id="e0:63:da:20:14:a9",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://192.168.208.1:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_form_ssdp_gets_form_with_ignored_entry(hass):
"""Test we can still setup if there is an ignored entry."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={"not_controller_key": None},
source=config_entries.SOURCE_IGNORE,
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
UNIFI_DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
"friendlyName": "UniFi Dream Machine New",
"modelDescription": "UniFi Dream Machine Pro",
"ssdp_location": "http://1.2.3.4:41417/rootDesc.xml",
"serialNumber": "e0:63:da:20:14:a9",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {
"host": "1.2.3.4",
"site": "default",
}
|
|
# -*- coding: utf-8 -*-
"""Qt utilities."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import sys
import contextlib
from ..utils._misc import _is_interactive
from ..utils.logging import info, warn
# -----------------------------------------------------------------------------
# PyQt import
# -----------------------------------------------------------------------------
_PYQT = False
try:
from PyQt4 import QtCore, QtGui, QtWebKit # noqa
from PyQt4.QtGui import QMainWindow
Qt = QtCore.Qt
_PYQT = True
except ImportError:
try:
from PyQt5 import QtCore, QtGui, QtWebKit # noqa
from PyQt5.QtGui import QMainWindow
_PYQT = True
except ImportError:
pass
def _check_qt():
if not _PYQT:
warn("PyQt is not available.")
return False
return True
if not _check_qt():
QMainWindow = object # noqa
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def _prompt(parent, message, buttons=('yes', 'no'), title='Question'):
buttons = [(button, getattr(QtGui.QMessageBox, button.capitalize()))
for button in buttons]
arg_buttons = 0
for (_, button) in buttons:
arg_buttons |= button
reply = QtGui.QMessageBox.question(parent,
title,
message,
arg_buttons,
buttons[0][1],
)
for name, button in buttons:
if reply == button:
return name
def _set_qt_widget_position_size(widget, position=None, size=None):
if position is not None:
widget.moveTo(*position)
if size is not None:
widget.resize(*size)
# -----------------------------------------------------------------------------
# Event loop integration with IPython
# -----------------------------------------------------------------------------
_APP = None
_APP_RUNNING = False
def _try_enable_ipython_qt():
"""Try to enable IPython Qt event loop integration.
Returns True in the following cases:
* python -i test.py
* ipython -i test.py
* ipython and %run test.py
Returns False in the following cases:
* python test.py
* ipython test.py
"""
try:
from IPython import get_ipython
ip = get_ipython()
except ImportError:
return False
if not _is_interactive():
return False
if ip:
ip.enable_gui('qt')
global _APP_RUNNING
_APP_RUNNING = True
return True
return False
def enable_qt():
if not _check_qt():
return
try:
from IPython import get_ipython
ip = get_ipython()
ip.enable_gui('qt')
global _APP_RUNNING
_APP_RUNNING = True
info("Qt event loop activated.")
except:
warn("Qt event loop not activated.")
# -----------------------------------------------------------------------------
# Qt app
# -----------------------------------------------------------------------------
def start_qt_app():
"""Start a Qt application if necessary.
If a new Qt application is created, this function returns it.
If no new application is created, the function returns None.
"""
# Only start a Qt application if there is no
# IPython event loop integration.
if not _check_qt():
return
global _APP
if _try_enable_ipython_qt():
return
try:
from vispy import app
app.use_app("pyqt4")
except ImportError:
pass
if QtGui.QApplication.instance():
_APP = QtGui.QApplication.instance()
return
if _APP:
return
_APP = QtGui.QApplication(sys.argv)
return _APP
def run_qt_app():
"""Start the Qt application's event loop."""
global _APP_RUNNING
if not _check_qt():
return
if _APP is not None and not _APP_RUNNING:
_APP_RUNNING = True
_APP.exec_()
if not _is_interactive():
_APP_RUNNING = False
@contextlib.contextmanager
def qt_app():
"""Context manager to ensure that a Qt app is running."""
if not _check_qt():
return
app = start_qt_app()
yield app
run_qt_app()
# -----------------------------------------------------------------------------
# Testing utilities
# -----------------------------------------------------------------------------
def _close_qt_after(window, duration):
"""Close a Qt window after a given duration."""
def callback():
window.close()
QtCore.QTimer.singleShot(int(1000 * duration), callback)
_MAX_ITER = 100
_DELAY = max(0, float(os.environ.get('PHY_EVENT_LOOP_DELAY', .1)))
def _debug_trace():
"""Set a tracepoint in the Python debugger that works with Qt."""
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
pyqtRemoveInputHook()
set_trace()
|
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from mock import patch, ANY, call
from netaddr import IPAddress, IPNetwork
from nose.tools import assert_equal
from subprocess32 import CalledProcessError
from libnetwork_plugin import docker_plugin
from pycalico.datastore_datatypes import Endpoint
from pycalico.datastore_errors import DataStoreError
TEST_ENDPOINT_ID = "TEST_ENDPOINT_ID"
TEST_NETWORK_ID = "TEST_NETWORK_ID"
# Expected 500 error response.
ERROR_RESPONSE_500 = {"Err": "500: Internal Server Error"}
class TestPlugin(unittest.TestCase):
def setUp(self):
self.app = docker_plugin.app.test_client()
def tearDown(self):
pass
def test_404(self):
rv = self.app.post('/')
assert_equal(rv.status_code, 404)
def test_activate(self):
rv = self.app.post('/Plugin.Activate')
activate_response = {"Implements": ["NetworkDriver"]}
self.assertDictEqual(json.loads(rv.data), activate_response)
@patch("libnetwork_plugin.docker_plugin.client.profile_exists", autospec=True, return_value=False)
@patch("libnetwork_plugin.docker_plugin.client.create_profile", autospec=True)
def test_create_network(self, m_create, m_exists):
"""
Test create_network when the profile does not exist.
"""
rv = self.app.post('/NetworkDriver.CreateNetwork',
data='{"NetworkID": "%s"}' % TEST_NETWORK_ID)
m_exists.assert_called_once_with(TEST_NETWORK_ID)
m_create.assert_called_once_with(TEST_NETWORK_ID)
self.assertDictEqual(json.loads(rv.data), {})
@patch("libnetwork_plugin.docker_plugin.client.profile_exists", autospec=True, return_value=True)
@patch("libnetwork_plugin.docker_plugin.client.create_profile", autospec=True)
def test_create_network_exists(self, m_create, m_exists):
"""
Test create_network when the profile already exists.
"""
rv = self.app.post('/NetworkDriver.CreateNetwork',
data='{"NetworkID": "%s"}' % TEST_NETWORK_ID)
m_exists.assert_called_once_with(TEST_NETWORK_ID)
assert_equal(m_create.call_count, 0)
self.assertDictEqual(json.loads(rv.data), {})
@patch("libnetwork_plugin.docker_plugin.client.remove_profile", autospec=True)
def test_delete_network(self, m_remove):
"""
Test the delete_network hook correctly removes the etcd data and
returns the correct response.
"""
rv = self.app.post('/NetworkDriver.DeleteNetwork',
data='{"NetworkID": "%s"}' % TEST_NETWORK_ID)
m_remove.assert_called_once_with(TEST_NETWORK_ID)
self.assertDictEqual(json.loads(rv.data), {})
@patch("libnetwork_plugin.docker_plugin.client.remove_profile", autospec=True)
def test_delete_network_no_profile(self, m_remove):
"""
Test the delete_network hook correctly removes the etcd data and
returns the correct response.
"""
m_remove.side_effect = KeyError
rv = self.app.post('/NetworkDriver.DeleteNetwork',
data='{"NetworkID": "%s"}' % TEST_NETWORK_ID)
m_remove.assert_called_once_with(TEST_NETWORK_ID)
self.assertDictEqual(json.loads(rv.data), {})
def test_oper_info(self):
"""
Test oper_info returns the correct data.
"""
rv = self.app.post('/NetworkDriver.EndpointOperInfo',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
self.assertDictEqual(json.loads(rv.data), {"Value": {}})
@patch("libnetwork_plugin.docker_plugin.client.get_default_next_hops", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.read_cnm_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.create_veth", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.set_endpoint", autospec=True)
def test_join(self, m_set, m_veth, m_read, m_next_hops):
"""
Test the join() processing correctly creates the veth and the Endpoint.
"""
endpoint_json = {"Interfaces":
[
{"Address": "1.2.3.4",
"AddressIPv6": "FE80::0202:B3FF:FE1E:8329",
"ID": 0,
"MacAddress": "EE:EE:EE:EE:EE:EE"}
]
}
m_read.return_value = endpoint_json
m_next_hops.return_value = {4: IPAddress("1.2.3.4"),
6: IPAddress("fe80::202:b3ff:fe1e:8329")}
# Actually make the request to the plugin.
rv = self.app.post('/NetworkDriver.Join',
data='{"EndpointID": "%s", "NetworkID": "%s"}' %
(TEST_ENDPOINT_ID, TEST_NETWORK_ID))
m_read.assert_called_once_with(TEST_ENDPOINT_ID)
# Check that the create_veth and set_endpoint are called with this
# endpoint.
endpoint = Endpoint("hostname",
"docker",
"libnetwork",
TEST_ENDPOINT_ID,
"active",
"EE:EE:EE:EE:EE:EE")
endpoint.ipv4_gateway = IPAddress("1.2.3.4")
endpoint.ipv6_gateway = IPAddress("FE80::0202:B3FF:FE1E:8329")
endpoint.ipv4_nets.add(IPNetwork("1.2.3.4/32"))
endpoint.ipv6_nets.add(IPNetwork("FE80::0202:B3FF:FE1E:8329/128"))
endpoint.profile_ids.append(TEST_NETWORK_ID)
m_veth.assert_called_once_with(endpoint)
m_set.assert_called_once_with(endpoint)
expected_response = """{
"Gateway": "1.2.3.4",
"GatewayIPv6": "fe80::202:b3ff:fe1e:8329",
"InterfaceNames": [
{
"DstPrefix": "cali",
"SrcName": "tmpTEST_ENDPOI"
}
],
"StaticRoutes": [
{
"Destination": "1.2.3.4/32",
"InterfaceID": 0,
"NextHop": "",
"RouteType": 1
},
{
"Destination": "fe80::202:b3ff:fe1e:8329/128",
"InterfaceID": 0,
"NextHop": "",
"RouteType": 1
}
]
}"""
self.maxDiff=None
self.assertDictEqual(json.loads(rv.data),
json.loads(expected_response))
@patch("libnetwork_plugin.docker_plugin.client.get_default_next_hops", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.read_cnm_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.create_veth", autospec=True)
@patch("libnetwork_plugin.docker_plugin.remove_veth", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.set_endpoint", autospec=True)
def test_join_veth_fail(self, m_set, m_del_veth, m_veth, m_read, m_next_hops):
"""
Test the join() processing when create_veth fails.
"""
m_veth.side_effect = CalledProcessError(2, "testcmd")
endpoint_json = {"Interfaces":
[
{"Address": "1.2.3.4",
"ID": 0,
"MacAddress": "EE:EE:EE:EE:EE:EE"}
]
}
m_read.return_value = endpoint_json
m_next_hops.return_value = {4: IPAddress("1.2.3.4"),
6: None}
# Actually make the request to the plugin.
rv = self.app.post('/NetworkDriver.Join',
data='{"EndpointID": "%s", "NetworkID": "%s"}' %
(TEST_ENDPOINT_ID, TEST_NETWORK_ID))
m_read.assert_called_once_with(TEST_ENDPOINT_ID)
# Check that the create_veth is called with this
# endpoint.
endpoint = Endpoint("hostname",
"docker",
"libnetwork",
TEST_ENDPOINT_ID,
"active",
"EE:EE:EE:EE:EE:EE")
endpoint.ipv4_gateway = IPAddress("1.2.3.4")
endpoint.ipv4_nets.add(IPNetwork("1.2.3.4/32"))
endpoint.profile_ids.append(TEST_NETWORK_ID)
# Check that create veth is called with the expected endpoint, and
# that set_endpoint is not (since create_veth is raising an exception).
m_veth.assert_called_once_with(endpoint)
assert_equal(m_set.call_count, 0)
# Check that we delete the veth.
m_del_veth.assert_called_once_with(endpoint)
# Expect a 500 response.
self.assertDictEqual(json.loads(rv.data), ERROR_RESPONSE_500)
@patch("libnetwork_plugin.docker_plugin.client.get_default_next_hops", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.read_cnm_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.create_veth", autospec=True)
@patch("libnetwork_plugin.docker_plugin.remove_veth", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.set_endpoint", autospec=True, side_effect=DataStoreError)
def test_join_set_fail(self, m_set, m_del_veth, m_veth, m_read, m_next_hops):
"""
Test the join() processing when set_endpoint fails.
"""
endpoint_json = {"Interfaces":
[
{"Address": "1.2.3.4",
"ID": 0,
"MacAddress": "EE:EE:EE:EE:EE:EE"}
]
}
m_read.return_value = endpoint_json
m_next_hops.return_value = {4: IPAddress("1.2.3.4"),
6: None}
# Actually make the request to the plugin.
rv = self.app.post('/NetworkDriver.Join',
data='{"EndpointID": "%s", "NetworkID": "%s"}' %
(TEST_ENDPOINT_ID, TEST_NETWORK_ID))
m_read.assert_called_once_with(TEST_ENDPOINT_ID)
# Check that the create_veth is called with this
# endpoint.
endpoint = Endpoint("hostname",
"docker",
"libnetwork",
TEST_ENDPOINT_ID,
"active",
"EE:EE:EE:EE:EE:EE")
endpoint.ipv4_gateway = IPAddress("1.2.3.4")
endpoint.ipv4_nets.add(IPNetwork("1.2.3.4/32"))
endpoint.profile_ids.append(TEST_NETWORK_ID)
# Check that create veth and set_endpoint are called with the
# endpoint. The set throws a DataStoreError and so we clean up the
# veth.
m_veth.assert_called_once_with(endpoint)
m_set.assert_called_once_with(endpoint)
# Check that we delete the veth.
m_del_veth.assert_called_once_with(endpoint)
# Expect a 500 response.
self.assertDictEqual(json.loads(rv.data), ERROR_RESPONSE_500)
@patch("libnetwork_plugin.docker_plugin.remove_veth", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.get_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.remove_endpoint", autospec=True)
def test_leave(self, m_remove, m_get, m_veth):
"""
Test leave() processing removes the endpoint and veth.
"""
endpoint = Endpoint("hostname",
"docker",
"libnetwork",
TEST_ENDPOINT_ID,
"active",
"EE:EE:EE:EE:EE:EE")
m_get.return_value = endpoint
# Send the leave request.
rv = self.app.post('/NetworkDriver.Leave',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
self.assertDictEqual(json.loads(rv.data), {})
# Check parameters
m_get.assert_called_once_with(hostname=ANY,
orchestrator_id="docker",
workload_id="libnetwork",
endpoint_id=TEST_ENDPOINT_ID)
m_remove.assert_called_once_with(endpoint)
m_veth.assert_called_once_with(endpoint)
@patch("libnetwork_plugin.docker_plugin.remove_veth", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.get_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.remove_endpoint", autospec=True)
def test_leave_no_endpoint(self, m_remove, m_get, m_veth):
"""
Test the leave processing when these is no endpoint.
"""
m_get.side_effect = KeyError
# Send the leave request.
rv = self.app.post('/NetworkDriver.Leave',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
self.assertDictEqual(json.loads(rv.data), ERROR_RESPONSE_500)
# Check parameters
m_get.assert_called_once_with(hostname=ANY,
orchestrator_id="docker",
workload_id="libnetwork",
endpoint_id=TEST_ENDPOINT_ID)
assert_equal(m_remove.call_count, 0)
assert_equal(m_veth.call_count, 0)
@patch("libnetwork_plugin.docker_plugin.remove_veth", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.get_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.remove_endpoint", autospec=True)
def test_leave_delete_failed(self, m_remove, m_get, m_veth):
"""
Test the leave processing when these is no endpoint.
"""
endpoint = Endpoint("hostname",
"docker",
"libnetwork",
TEST_ENDPOINT_ID,
"active",
"EE:EE:EE:EE:EE:EE")
m_get.return_value = endpoint
m_remove.side_effect = DataStoreError
# Send the leave request.
rv = self.app.post('/NetworkDriver.Leave',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
self.assertDictEqual(json.loads(rv.data), {})
# Check parameters
m_get.assert_called_once_with(hostname=ANY,
orchestrator_id="docker",
workload_id="libnetwork",
endpoint_id=TEST_ENDPOINT_ID)
m_remove.assert_called_once_with(endpoint)
m_veth.assert_called_once_with(endpoint)
@patch("libnetwork_plugin.docker_plugin.backout_ip_assignments", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.read_cnm_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.delete_cnm_endpoint", autospec=True)
def test_delete_endpoint(self, m_delete, m_read, m_backout):
"""
Test delete_endpoint() deletes the endpoint and backout IP assignment.
"""
ep = {"test": 1}
m_read.return_value = ep
m_delete.return_value = True
rv = self.app.post('/NetworkDriver.DeleteEndpoint',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
m_read.assert_called_once_with(TEST_ENDPOINT_ID)
m_delete.assert_called_once_with(TEST_ENDPOINT_ID)
m_backout.assert_called_once_with(ep)
self.assertDictEqual(json.loads(rv.data), {})
@patch("libnetwork_plugin.docker_plugin.backout_ip_assignments", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.read_cnm_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.delete_cnm_endpoint", autospec=True)
def test_delete_endpoint_does_not_exist(self, m_delete, m_read, m_backout):
"""
Test delete_endpoint() when the endpoint does not exist.
"""
m_read.return_value = None
rv = self.app.post('/NetworkDriver.DeleteEndpoint',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
m_read.assert_called_once_with(TEST_ENDPOINT_ID)
assert_equal(m_delete.call_count, 0)
assert_equal(m_backout.call_count, 0)
self.assertDictEqual(json.loads(rv.data), {})
@patch("libnetwork_plugin.docker_plugin.backout_ip_assignments", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.read_cnm_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.delete_cnm_endpoint", autospec=True)
def test_delete_endpoint_just_deleted(self, m_delete, m_read, m_backout):
"""
Test delete_endpoint() when the endpoint is deleted just before we
were about to.
"""
ep = {"test": 1}
m_read.return_value = ep
m_delete.return_value = False
rv = self.app.post('/NetworkDriver.DeleteEndpoint',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
m_read.assert_called_once_with(TEST_ENDPOINT_ID)
m_delete.assert_called_once_with(TEST_ENDPOINT_ID)
assert_equal(m_backout.call_count, 0)
self.assertDictEqual(json.loads(rv.data), {})
@patch("libnetwork_plugin.docker_plugin.client.cnm_endpoint_exists", autospec=True, return_value=False)
@patch("libnetwork_plugin.docker_plugin.assign_ip", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.write_cnm_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.get_default_next_hops", autospec=True)
def test_create_endpoint(self, m_next_hops, m_write, m_assign_ip, m_exists):
"""
Test the create_endpoint hook correctly writes the appropriate data
to etcd based on IP assignment.
"""
# Iterate using various different mixtures of next hops and IP
# assignments.
#
# (IPv4 NH, IPv4 addr, IPv6 NH, IPv6 addr)
parms = [(IPAddress("10.20.30.40"), IPAddress("1.2.3.4"),
IPAddress("aa:bb::ff"), IPAddress("aa:bb::bb")),
(IPAddress("10.20.30.40"), None,
IPAddress("aa:bb::ff"), IPAddress("aa:bb::bb")),
(IPAddress("10.20.30.40"), IPAddress("1.2.3.4"),
IPAddress("aa:bb::ff"), None),
(IPAddress("10.20.30.40"), IPAddress("1.2.3.4"),
None, None),
(None, None,
IPAddress("aa:bb::ff"), IPAddress("aa:bb::bb"))]
# Loop through different combinations of IP availability.
for ipv4_nh, ipv4, ipv6_nh, ipv6 in parms:
# Return the required next hops.
m_next_hops.return_value = {4: ipv4_nh,
6: ipv6_nh}
# Return the required assigned IPs.
def assign_ip(version):
if version == 4:
return ipv4
elif version == 6:
return ipv6
raise AssertionError("Unexpected version: %s" % version)
m_assign_ip.side_effect = assign_ip
# Invoke create endpoint.
rv = self.app.post('/NetworkDriver.CreateEndpoint',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
# Assert cnm_endpoint_exists was called.
m_exists.assert_called_once_with(TEST_ENDPOINT_ID)
# Construct the expected data.
expected_data = {
"Interfaces":
[
{"ID": 0, "MacAddress": "EE:EE:EE:EE:EE:EE"}
]
}
if ipv4:
expected_data["Interfaces"][0]["Address"] = str(ipv4)
if ipv6:
expected_data["Interfaces"][0]["AddressIPv6"] = str(ipv6)
# Assert that the assign IP was called the correct number of
# times based on whether a next hop was returned.
expected_assign_count = 0
if ipv4_nh:
expected_assign_count += 1
if ipv6_nh:
expected_assign_count += 1
assert_equal(m_assign_ip.call_count, expected_assign_count)
# Assert expected data is written to etcd and returned from
# request.
m_write.assert_called_once_with(TEST_ENDPOINT_ID,
expected_data)
self.assertDictEqual(json.loads(rv.data), expected_data)
# Reset the Mocks before continuing.
m_write.reset_mock()
m_next_hops.reset_mock()
m_assign_ip.reset_mock()
m_exists.reset_mock()
@patch("libnetwork_plugin.docker_plugin.client.cnm_endpoint_exists", autospec=True, return_value=False)
@patch("libnetwork_plugin.docker_plugin.client.write_cnm_endpoint", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.get_default_next_hops", autospec=True)
def test_create_endpoint_no_ip(self, m_next_hops, m_write, m_exists):
"""
Test the create_endpoint hook writes no data and returns a 500 error
when no IP addresses can be assigned.
"""
m_next_hops.return_value = {4: None, 6: None}
# Invoke create endpoint.
rv = self.app.post('/NetworkDriver.CreateEndpoint',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
# Assert cnm_endpoint_exists was called.
m_exists.assert_called_once_with(TEST_ENDPOINT_ID)
# Assert no data is written and returns 500 response.
assert_equal(m_write.call_count, 0)
self.assertDictEqual(json.loads(rv.data), ERROR_RESPONSE_500)
@patch("libnetwork_plugin.docker_plugin.client.cnm_endpoint_exists", autospec=True, return_value=True)
@patch("libnetwork_plugin.docker_plugin.client.write_cnm_endpoint", autospec=True)
def test_create_endpoint_exists(self, m_write, m_exists):
"""
Test the create_endpoint hook writes no data and returns a 500 error
when no IP addresses can be assigned.
"""
# Invoke create endpoint.
rv = self.app.post('/NetworkDriver.CreateEndpoint',
data='{"EndpointID": "%s"}' % TEST_ENDPOINT_ID)
# Assert cnm_endpoint_exists was called.
m_exists.assert_called_once_with(TEST_ENDPOINT_ID)
# Assert no data is written.
assert_equal(m_write.call_count, 0)
# Assert empty data is returned.
self.assertDictEqual(json.loads(rv.data), {})
@patch("libnetwork_plugin.docker_plugin.client.get_ip_pools", autospec=True)
@patch("pycalico.ipam.SequentialAssignment.allocate", autospec=True)
def test_assign_ip(self, m_allocate, m_pools):
"""
Test assign_ip assigns an IP address.
"""
m_pools.return_value = [IPNetwork("1.2.3.0/24"), IPNetwork("2.3.4.5/32")]
m_allocate.return_value = IPAddress("1.2.3.6")
ip = docker_plugin.assign_ip(4)
assert_equal(ip, IPNetwork("1.2.3.6"))
m_pools.assert_called_once_with(4)
m_allocate.assert_called_once_with(ANY, IPNetwork("1.2.3.0/24"))
@patch("libnetwork_plugin.docker_plugin.client.get_ip_pools", autospec=True)
@patch("pycalico.ipam.SequentialAssignment.allocate", autospec=True)
def test_assign_ip_no_ip(self, m_allocate, m_pools):
"""
Test assign_ip when no IP addresses can be allocated.
"""
m_pools.return_value = [IPNetwork("1.2.3.0/24"),
IPNetwork("2.3.4.5/32")]
m_allocate.return_value = None
ip = docker_plugin.assign_ip(4)
assert_equal(ip, None)
m_pools.assert_called_once_with(4)
# We should have attempted to allocate for each pool.
m_allocate.assert_has_calls([call(ANY, IPNetwork("1.2.3.0/24")),
call(ANY, IPNetwork("2.3.4.5/32"))])
@patch("libnetwork_plugin.docker_plugin.client.get_ip_pools", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.unassign_address", autospec=True)
def test_unassign_ip(self, m_unassign, m_pools):
"""
Test unassign_ip unassigns an IP address.
"""
m_pools.return_value = [IPNetwork("1.2.3.0/24"), IPNetwork("2.3.0.0/16")]
m_unassign.return_value = True
self.assertTrue(docker_plugin.unassign_ip(IPAddress("2.3.4.5")))
m_pools.assert_called_once_with(4)
m_unassign.assert_called_once_with(IPNetwork("2.3.0.0/16"),
IPAddress("2.3.4.5"))
@patch("libnetwork_plugin.docker_plugin.client.get_ip_pools", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.unassign_address", autospec=True)
def test_unassign_ip_no_pools(self, m_unassign, m_pools):
"""
Test unassign_ip when the IP does not fall in any configured pools.
"""
m_pools.return_value = [IPNetwork("1.2.3.0/24"), IPNetwork("2.3.0.0/16")]
m_unassign.return_value = False
self.assertFalse(docker_plugin.unassign_ip(IPAddress("2.30.11.11")))
m_pools.assert_called_once_with(4)
self.assertEquals(m_unassign.call_count, 0)
@patch("libnetwork_plugin.docker_plugin.client.get_ip_pools", autospec=True)
@patch("libnetwork_plugin.docker_plugin.client.unassign_address", autospec=True)
def test_unassign_ip_not_in_pools(self, m_unassign, m_pools):
"""
Test unassign_ip when the IP does not fall in any configured pools.
"""
m_pools.return_value = [IPNetwork("1.2.3.0/24"),
IPNetwork("2.3.0.0/16"),
IPNetwork("1.2.0.0/16")]
m_unassign.return_value = False
self.assertFalse(docker_plugin.unassign_ip(IPAddress("1.2.3.4")))
m_pools.assert_called_once_with(4)
m_unassign.assert_has_calls([call(IPNetwork("1.2.3.0/24"),
IPAddress("1.2.3.4")),
call(IPNetwork("1.2.0.0/16"),
IPAddress("1.2.3.4"))])
@patch("libnetwork_plugin.docker_plugin.unassign_ip", autospec=True)
def test_backout_ip_assignments(self, m_unassign):
"""
Test backout_ip_assignment processing.
:return:
"""
m_unassign.return_value = True
cnm_ep = {"Interfaces": [{"Address": "1.2.3.4"}]}
docker_plugin.backout_ip_assignments(cnm_ep)
m_unassign.assert_called_once_with(IPAddress("1.2.3.4"))
m_unassign.reset_mock()
cnm_ep = {"Interfaces": [{"AddressIPv6": "aa:bb::ff"}]}
docker_plugin.backout_ip_assignments(cnm_ep)
m_unassign.assert_called_once_with(IPAddress("aa:bb::ff"))
m_unassign.reset_mock()
cnm_ep = {"Interfaces": [{"Address": "1.2.3.4",
"AddressIPv6": "aa:bb::ff"}]}
docker_plugin.backout_ip_assignments(cnm_ep)
m_unassign.assert_has_calls([call(IPAddress("1.2.3.4")),
call(IPAddress("aa:bb::ff"))])
@patch("libnetwork_plugin.docker_plugin.unassign_ip", autospec=True)
def test_backout_ip_assignments_failed_unassign(self, m_unassign):
"""
Test backout_ip_assignment processing when unassignment fails.
:return:
"""
m_unassign.return_value = False
cnm_ep = {"Interfaces": [{"Address": "1.2.3.4"}]}
docker_plugin.backout_ip_assignments(cnm_ep)
m_unassign.assert_called_once_with(IPAddress("1.2.3.4"))
@patch("pycalico.netns.set_veth_mac", autospec=True)
@patch("pycalico.netns.create_veth", autospec=True)
def test_create_veth(self, m_create, m_set):
"""
Test create_veth calls through to netns to create the veth and
set the MAC.
"""
endpoint = Endpoint("hostname",
"docker",
"libnetwork",
TEST_ENDPOINT_ID,
"active",
"EE:EE:EE:EE:EE:EE")
docker_plugin.create_veth(endpoint)
m_create.assert_called_once_with(endpoint.name,
endpoint.temp_interface_name)
m_set.assert_called_once_with(endpoint.temp_interface_name,
endpoint.mac)
@patch("pycalico.netns.remove_veth", autospec=True, side_effect=CalledProcessError(2, "test"))
def test_remove_veth_fail(self, m_remove):
"""
Test remove_veth calls through to netns to remove the veth.
Fail with a CalledProcessError to write the log.
"""
endpoint = Endpoint("hostname",
"docker",
"libnetwork",
TEST_ENDPOINT_ID,
"active",
"EE:EE:EE:EE:EE:EE")
docker_plugin.remove_veth(endpoint)
m_remove.assert_called_once_with(endpoint.name)
|
|
import signal
import unittest
import os
from contextlib import contextmanager
from functools import wraps
import sys
import uvio
from uvio.process import ProcessOptions, Popen, PIPE
from uvio import sync
from uvio.pipes import Pipe
from inspect import iscoroutinefunction
class Test(unittest.TestCase):
def test_options(self):
opts = ProcessOptions(['echo', 'hello'], cwd='.')
self.assertEqual(opts.cwd, '.')
self.assertEqual(opts.executable, 'echo')
self.assertEqual(opts.args, ['echo', 'hello'])
del opts
def test_stdio_option_pipe(self):
pipe1 = Pipe()
pipe2 = Pipe()
pipe3 = Pipe()
opts = ProcessOptions(['ok'], stdin=pipe1, stdout=pipe2, stderr=pipe3)
self.assertIs(opts.stdin, pipe1)
self.assertIs(opts.stdout, pipe2)
self.assertIs(opts.stderr, pipe3)
@sync(timeout=1)
async def test_stdio_fd(self):
with open("test.out", "w") as fd:
p0 = await Popen(['python', '-c', 'print("hello")'], stdout=fd)
self.assertEqual(await p0.returncode, 0)
with open("test.out", "r") as fd:
self.assertEqual(fd.read(), 'hello\n')
@sync(timeout=1)
async def test_stdout_env(self):
stdout_captured = None
stdout_ended = False
p0 = await Popen(
['python', '-c', 'import os; print("env: {}".format(os.environ["FOOVAR"]))'],
stdout=PIPE,
env={"FOOVAR": "WOW!"}
)
self.assertIsNotNone(p0.stdout)
self.assertIsNone(p0.stdin)
self.assertIsNone(p0.stderr)
@p0.stdout.data
def data(buf):
nonlocal stdout_captured
stdout_captured = buf
@p0.stdout.end
def end():
nonlocal stdout_ended
stdout_ended = True
self.assertEqual(await p0.returncode, 0)
self.assertTrue(stdout_ended)
self.assertEqual(stdout_captured, b'env: WOW!\n')
@sync(timeout=1)
async def test_stdout_pipe(self):
stdout_captured = None
stdout_ended = False
p0 = await Popen(['python', '-c', 'print("hello")'], stdout=PIPE)
self.assertIsNotNone(p0.stdout)
self.assertIsNone(p0.stdin)
self.assertIsNone(p0.stderr)
@p0.stdout.data
def data(buf):
nonlocal stdout_captured
stdout_captured = buf
@p0.stdout.end
def end():
nonlocal stdout_ended
stdout_ended = True
self.assertEqual(await p0.returncode, 0)
self.assertTrue(stdout_ended)
self.assertEqual(stdout_captured, b'hello\n')
@sync(timeout=1)
async def test_stderr_pipe(self):
stderr_captured = None
stderr_ended = False
p0 = await Popen(['python', '-c', 'import sys; print("hello", file=sys.stderr)'], stderr=PIPE)
self.assertIsNotNone(p0.stderr)
self.assertIsNone(p0.stdout)
self.assertIsNone(p0.stdin)
@p0.stderr.data
def data(buf):
nonlocal stderr_captured
stderr_captured = buf
@p0.stderr.end
def end():
nonlocal stderr_ended
stderr_ended = True
self.assertEqual(await p0.returncode, 0)
self.assertTrue(stderr_ended)
self.assertEqual(stderr_captured, b'hello\n')
@sync(timeout=1)
async def test_stdio_pipe(self):
stdout_captured = None
stdout_ended = False
p0 = await Popen(
['python', '-c', 'import sys; print("echo: +{}+".format(sys.stdin.read()))'],
stdin=PIPE, stdout=PIPE, stderr=sys.stderr
)
self.assertIsNotNone(p0.stdout)
self.assertIsNotNone(p0.stdin)
self.assertIsNone(p0.stderr)
@p0.stdout.data
def data(buf):
nonlocal stdout_captured
stdout_captured = buf
@p0.stdout.end
def end():
nonlocal stdout_ended
stdout_ended = True
p0.stdin.write(b"write me")
p0.stdin.close()
self.assertEqual(await p0.returncode, 0)
self.assertTrue(stdout_ended)
self.assertEqual(stdout_captured, b'echo: +write me+\n')
@sync(timeout=1)
async def test_simple(self):
p0 = await Popen(['python', '-c', 'print("hello")'])
self.assertEqual(await p0.returncode, 0)
@sync(timeout=1)
async def test_exit_status(self):
p0 = await Popen(['python', '-c', 'exit(17)'])
self.assertEqual(await p0.returncode, 17)
@sync(timeout=1)
async def test_kill(self):
p0 = await Popen(['python', '-c', 'import time; time.sleep(12)'])
await uvio.sleep(.1)
p0.kill()
self.assertEqual(await p0.returncode, 0)
@sync(timeout=1)
async def test_interupt(self):
p0 = await Popen(['python', '-c', 'import time; time.sleep(17)'], stdout=sys.stderr, stderr=sys.stderr)
await uvio.sleep(.1)
p0.kill(signal.SIGINT)
rc = p0.returncode
self.assertEqual(await rc, 1)
if __name__ == '__main__':
unittest.main()
|
|
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from itertools import izip
import operator
from vistrails.core.data_structures.bijectivedict import Bidict
from vistrails.core.modules.utils import create_port_spec_string, parse_port_spec_string
from vistrails.core.system import get_vistrails_basic_pkg_id, \
get_module_registry
from vistrails.core.utils import enum, VistrailsInternalError
from vistrails.core.vistrail.port_spec_item import PortSpecItem
from vistrails.db.domain import DBPortSpec, IdScope
from ast import literal_eval
import unittest
import copy
PortEndPoint = enum('PortEndPoint',
['Invalid', 'Source', 'Destination'])
################################################################################
class PortSpec(DBPortSpec):
port_type_map = Bidict([('input', 'destination'),
('output', 'source'),
('invalid', 'invalid')])
end_point_map = Bidict([('source', PortEndPoint.Source),
('destination', PortEndPoint.Destination),
('invalid', PortEndPoint.Invalid)])
##########################################################################
# Constructors and copy
def __init__(self, *args, **kwargs):
signature = None
if 'signature' in kwargs:
signature = kwargs['signature']
del kwargs['signature']
sigstring = None
if 'sigstring' in kwargs:
sigstring = kwargs['sigstring']
del kwargs['sigstring']
defaults = None
if 'defaults' in kwargs:
defaults = kwargs['defaults']
del kwargs['defaults']
labels = None
if 'labels' in kwargs:
labels = kwargs['labels']
del kwargs['labels']
values = None
if 'values' in kwargs:
values = kwargs['values']
del kwargs['values']
entry_types = None
if 'entry_types' in kwargs:
entry_types = kwargs['entry_types']
del kwargs['entry_types']
if 'items' in kwargs and 'portSpecItems' not in kwargs:
kwargs['portSpecItems'] = kwargs['items']
del kwargs['items']
if 'optional' not in kwargs:
kwargs['optional'] = 0 # False
elif not isinstance(kwargs['optional'], (int, long)):
if isinstance(kwargs['optional'], bool):
if kwargs['optional']:
kwargs['optional'] = 1
else:
kwargs['optional'] = 0
else:
raise VistrailsInternalError("Cannot parse 'optional' kw "
"-- must be an int or bool")
if 'min_conns' not in kwargs:
kwargs['min_conns'] = 0
elif kwargs['optional'] == 1 and kwargs['min_conns'] > 0:
raise VistrailsInternalError("A mandatory port cannot be set "
"to optional")
if 'max_conns' not in kwargs:
kwargs['max_conns'] = -1
if kwargs['min_conns'] >= 0 and kwargs['max_conns'] >= 0 and \
kwargs['min_conns'] > kwargs['max_conns']:
raise VistrailsInternalError("Minimum number of connections "
"cannot be greater than maximum "
"number of connections")
if 'sort_key' not in kwargs:
kwargs['sort_key'] = -1
if 'depth' not in kwargs:
kwargs['depth'] = 0
if 'id' not in kwargs:
kwargs['id'] = -1
if 'tooltip' in kwargs:
self._tooltip = kwargs['tooltip']
del kwargs['tooltip']
else:
self._tooltip = None
if 'docstring' in kwargs:
self._docstring = kwargs['docstring']
del kwargs['docstring']
else:
self._docstring = None
if 'shape' in kwargs:
self._shape = kwargs['shape']
del kwargs['shape']
else:
self._shape = None
DBPortSpec.__init__(self, *args, **kwargs)
if sum(1 for container in (self.port_spec_items, signature, sigstring)
if container) > 1:
raise ValueError("Please specify only one of portSpecItems,"
" signature, or sigstring kwargs.")
self.create_spec_items(self.port_spec_items, signature, sigstring,
defaults, labels, values, entry_types)
self._short_sigstring = None
# if signature is not None:
# self.create_entries(signature)
# if not self.sigstring and self._entries is not None:
# # create sigstring from entries
# self.create_sigstring_and_descriptors()
# DAKOOP: removed this---we will check in module_registry and pipeline
# validation, this way, we can let errors go all the way up
# elif self._entries is None and self.sigstring:
# # create entries from sigstring
# self.create_entries_and_descriptors()
# else:
# raise VistrailsInternalError("Need to specify signature or "
# "sigstring to create PortSpec")
# if self._entries is not None and self._tooltip is None:
# self.create_tooltip()
self.is_valid = True
def __copy__(self):
return PortSpec.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBPortSpec.do_copy(self, new_ids, id_scope, id_remap)
cp._short_sigstring = self._short_sigstring
cp._tooltip = self._tooltip
cp._shape = self._shape
cp._docstring = self._docstring
cp.is_valid = self.is_valid
cp.__class__ = PortSpec
# if cp._entries is not None:
# cp.create_tooltip()
return cp
@staticmethod
def convert(_port_spec):
if _port_spec.__class__ == PortSpec:
return
_port_spec.__class__ = PortSpec
for _port_spec_item in _port_spec.db_portSpecItems:
PortSpecItem.convert(_port_spec_item)
_port_spec._short_sigstring = None
_port_spec._tooltip = None
_port_spec._shape = None
_port_spec._docstring = None
_port_spec.is_valid = True
_port_spec.port_spec_items.sort(key=operator.attrgetter('db_pos'))
@staticmethod
def from_sigstring(sigstring):
"""from_sig(sigstring: string) -> PortSpec
Returns a portspec from the given sigstring.
"""
return PortSpec(sigstring=sigstring)
##########################################################################
# Properties
id = DBPortSpec.db_id
name = DBPortSpec.db_name
type = DBPortSpec.db_type
optional = DBPortSpec.db_optional
sort_key = DBPortSpec.db_sort_key
min_conns = DBPortSpec.db_min_conns
max_conns = DBPortSpec.db_max_conns
_depth = DBPortSpec.db_depth
port_spec_items = DBPortSpec.db_portSpecItems
items = DBPortSpec.db_portSpecItems
def _get_sigstring(self):
return create_port_spec_string([i.spec_tuple
for i in self.port_spec_items])
sigstring = property(_get_sigstring)
def is_mandatory(self):
return (self.min_conns > 0)
def _get_labels(self):
return [i.label for i in self.port_spec_items]
labels = property(_get_labels)
def _get_defaults(self):
return [i.default for i in self.port_spec_items]
defaults = property(_get_defaults)
def _get_short_sigstring(self):
if self._short_sigstring is None:
self.create_tooltip()
return self._short_sigstring
short_sigstring = property(_get_short_sigstring)
def _get_signature(self):
signature = []
for i in self.port_spec_items:
signature.append((i.descriptor.module, i.label))
return signature
signature = property(_get_signature)
def _get_depth(self):
return self._depth or 0
def _set_depth(self, depth):
self._depth = depth
depth = property(_get_depth, _set_depth)
def toolTip(self):
if self._tooltip is None:
self.create_tooltip()
return self._tooltip
def shape(self):
return self._shape
def docstring(self):
return self._docstring
def descriptors(self):
return [i.descriptor for i in self.port_spec_items]
##########################################################################
# Methods
def _resize_attrs(self, target, *lists):
for rlist in lists:
if len(target) > len(rlist):
rlist.extend(None for i in xrange(len(target)-len(rlist)))
def _set_attrs(self, item, *attrs):
attr_order = ['default', 'label', 'values', 'entry_type']
if item is None:
kwargs = dict(izip(attr_order, attrs))
return kwargs
else:
for (attr_key, attr) in izip(attr_order, attrs):
if attr is not None:
setattr(item, attr_key, attr)
def create_spec_items(self, items=None, signature=None, sigstring=None,
defaults=None, labels=None, values=None,
entry_types=None):
if defaults is None:
defaults = []
elif isinstance(defaults, basestring):
defaults = literal_eval(defaults)
if labels is None:
labels = []
elif isinstance(labels, basestring):
labels = literal_eval(labels)
if values is None:
values = []
elif isinstance(values, basestring):
values = literal_eval(values)
if entry_types is None:
entry_types = []
elif isinstance(entry_types, basestring):
entry_types = literal_eval(entry_types)
attrs = [defaults, labels, values, entry_types]
if items:
self.set_items(items, *attrs)
elif signature is not None:
items = self.get_items_from_entries(signature, *attrs)
elif sigstring is not None:
items = self.get_items_from_sigstring(sigstring, *attrs)
self.port_spec_items = items
def set_items(self, items, *attrs):
self._resize_attrs(items, *attrs)
for i, item_tuple in enumerate(izip(items, *attrs)):
item_tuple[0].pos = i
self._set_attrs(*item_tuple)
def get_items_from_entries(self, signature, *attrs):
# This is reasonably messy code. The intent is that a
# signature given by the user in a call like this
# add_input_port(module, name, signature) should be one of the
# following:
# type only: add_input_port(_, _, Float)
# type plus description: add_input_port(_, _, (Float, 'radius'))
# multiple parameters, where each parameter can be either of the above:
# add_input_port(_, _, [Float, (Integer, 'count')])
registry = get_module_registry()
entries = []
def canonicalize(sig_item):
if isinstance(sig_item, tuple):
# assert len(sig_item) == 2
# assert isinstance(sig_item[0], type)
# assert isinstance(sig_item[1], str)
descriptor = registry.get_descriptor(sig_item[0])
label = sig_item[1]
return (descriptor, label)
elif isinstance(sig_item, list):
descriptor = registry.get_descriptor_by_name(
get_vistrails_basic_pkg_id(), 'List')
return (descriptor, None)
else:
# isinstance(sig_item, type):
return (registry.get_descriptor(sig_item), None)
# def _add_entry(sig_item):
ps_items = []
if not isinstance(signature, list):
signature = [signature]
self._resize_attrs(signature, *attrs)
for i, item_tuple in enumerate(izip(signature, *attrs)):
descriptor, item_label = canonicalize(item_tuple[0])
kwargs = self._set_attrs(None, *item_tuple[1:])
if not kwargs['label']:
if item_label != "<no description>":
kwargs['label'] = item_label
ps_item = PortSpecItem(pos=i,
package=descriptor.identifier,
module=descriptor.name,
namespace=descriptor.namespace,
**kwargs)
ps_items.append(ps_item)
return ps_items
def get_items_from_sigstring(self, sigstring, *attrs):
ps_items = []
specs_list = parse_port_spec_string(sigstring)
if len(specs_list) == 0:
return ps_items
self._resize_attrs(specs_list, *attrs)
for i, item_tuple in enumerate(izip(specs_list, *attrs)):
kwargs = self._set_attrs(None, *item_tuple[1:])
ps_item = PortSpecItem(pos=i,
package=item_tuple[0][0],
module=item_tuple[0][1],
namespace=item_tuple[0][2],
**kwargs)
ps_items.append(ps_item)
return ps_items
def create_tooltip(self):
"""Creates a short_sigstring that does not include package names for
use with the tooltip. Note, however, that such sigstrings
can't be used to reconstruct a spec. They should only be used
for human-readable purposes.
"""
self._short_sigstring = \
"(" + ",".join(d.name for d in self.descriptors()) + ")"
if self.type in ['input', 'output']:
port_string = self.type.capitalize()
else:
port_string = 'Invalid'
depth = " (depth %s)" % self.depth if self.depth else ''
self._tooltip = "%s port %s\n%s%s" % (port_string,
self.name,
self._short_sigstring,
depth)
##########################################################################
# Operators
def __str__(self):
"""__str__() -> str - Returns a string representation of an PortSpec
object.
"""
rep = "<portSpec id=%s name=%s type=%s signature=%s depth=%s />"
return rep % (str(self.id), str(self.name),
str(self.type), str(self.sigstring), str(self.depth))
def __eq__(self, other):
""" __eq__(other: PortSpec) -> boolean
Returns True if self and other have the same attributes. Used by ==
operator.
"""
if self is None and other is None:
return True
if type(self) != type(other) or \
self.name != other.name or \
self.type != other.type:
return False
if len(self.descriptors()) != len(other.descriptors()):
return False
for (mine, their) in izip(self.descriptors(), other.descriptors()):
if mine != their:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def type_equals(self, other):
"""type_equals(other: PortSpec) -> Bool
Checks equality ignoring description strings. Only cares about types.
Does not do subtyping or supertyping: match must be perfect.
"""
if self is None and other is None:
return True
if len(self.descriptors()) != len(other.descriptors()):
return False
for (mine, their) in izip(self.descriptors(), other.descriptors()):
if mine != their:
return False
return True
def key_no_id(self):
"""key_no_id(): tuple. returns a tuple that identifies
the port without caring about ids. Used for sorting
port lists."""
return (self.type,
self.name,
self.signature)
################################################################################
# Testing
class TestPortSpec(unittest.TestCase):
def create_port_spec(self, id_scope=IdScope()):
# FIXME add a valid port spec
port_spec = PortSpec(id=id_scope.getNewId(PortSpec.vtType),
name='SetValue',
type='input',
sigstring='(%s:String)' % \
get_vistrails_basic_pkg_id(),
)
return port_spec
def test_copy(self):
id_scope = IdScope()
s1 = self.create_port_spec(id_scope)
s2 = copy.copy(s1)
self.assertEquals(s1, s2)
self.assertEquals(s1.id, s2.id)
s3 = s1.do_copy(True, id_scope, {})
self.assertEquals(s1, s3)
self.assertNotEquals(s1.id, s3.id)
def test_serialization(self):
import vistrails.core.db.io
s1 = self.create_port_spec()
xml_str = vistrails.core.db.io.serialize(s1)
s2 = vistrails.core.db.io.unserialize(xml_str, PortSpec)
self.assertEquals(s1, s2)
self.assertEquals(s1.id, s2.id)
def test_create_from_signature(self):
from vistrails.core.modules.basic_modules import Float
port_spec = PortSpec(id=-1,
name="SetXYZ",
type='input',
signature=[(Float, "x"), (Float, "y"),
(Float, "z")])
def test_create_from_items(self):
basic_pkg = get_vistrails_basic_pkg_id()
item_a = PortSpecItem(pos=0,
package=basic_pkg,
module="Integer",
label="a",
default="123")
item_b = PortSpecItem(pos=1,
package=basic_pkg,
module="String",
label="b",
default="abc")
port_spec = PortSpec(id=-1,
name="SetValue",
type='input',
portSpecItems=[item_a, item_b])
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import collections
from absl.testing import parameterized
import hypothesis as hp
from hypothesis import strategies as hps
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math.psd_kernels import hypothesis_testlib as kernel_hps
# pylint is unable to handle @hps.composite (e.g. complains "No value for
# argument '...' in function call"), so disable this lint for the file.
# pylint: disable=no-value-for-parameter
EXTRA_TENSOR_CONVERSION_KERNELS = {
# The transformation is applied to each input individually.
'KumaraswamyTransformed': 1,
}
INSTANTIABLE_BUT_NOT_SLICABLE = [
'FeatureTransformed', # Requires slicing in to the `transformation_fn`.
]
KERNELS_OK_TO_SLICE = (set(list(kernel_hps.INSTANTIABLE_BASE_KERNELS.keys()) +
list(kernel_hps.SPECIAL_KERNELS)) -
set(INSTANTIABLE_BUT_NOT_SLICABLE))
def assert_no_none_grad(kernel, method, wrt_vars, grads):
for var, grad in zip(wrt_vars, grads):
# For the GeneralizedMatern kernel, gradients with respect to `df` don't
# exist.
if tensor_util.is_ref(var) and var.name.strip('_0123456789:') == 'df':
continue
if grad is None:
raise AssertionError('Missing `{}` -> {} grad for kernel {}'.format(
method, var, kernel))
@test_util.test_all_tf_execution_regimes
class KernelPropertiesTest(test_util.TestCase):
@parameterized.named_parameters(
{'testcase_name': dname, 'kernel_name': dname}
for dname in sorted(list(kernel_hps.INSTANTIABLE_BASE_KERNELS.keys()) +
list(kernel_hps.SPECIAL_KERNELS)))
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(
default_max_examples=10,
suppress_health_check=[
hp.HealthCheck.too_slow,
hp.HealthCheck.data_too_large])
def testKernelGradient(self, kernel_name, data):
event_dim = data.draw(hps.integers(min_value=2, max_value=3))
feature_ndims = data.draw(hps.integers(min_value=1, max_value=2))
feature_dim = data.draw(hps.integers(min_value=2, max_value=4))
batch_shape = data.draw(tfp_hps.shapes(max_ndims=2))
kernel, kernel_parameter_variable_names = data.draw(
kernel_hps.kernels(
batch_shape=batch_shape,
kernel_name=kernel_name,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=True))
# Check that variable parameters get passed to the kernel.variables
kernel_variables_names = [
v.name.strip('_0123456789:') for v in kernel.variables]
kernel_parameter_variable_names = [
n.strip('_0123456789:') for n in kernel_parameter_variable_names]
self.assertEqual(
set(kernel_parameter_variable_names),
set(kernel_variables_names))
example_ndims = data.draw(hps.integers(min_value=1, max_value=2))
input_batch_shape = data.draw(tfp_hps.broadcast_compatible_shape(
kernel.batch_shape))
xs = tf.identity(data.draw(kernel_hps.kernel_input(
batch_shape=input_batch_shape,
example_ndims=example_ndims,
feature_dim=feature_dim,
feature_ndims=feature_ndims)))
# Check that we pick up all relevant kernel parameters.
wrt_vars = [xs] + list(kernel.variables)
self.evaluate([v.initializer for v in kernel.variables])
max_permissible = 2 + EXTRA_TENSOR_CONVERSION_KERNELS.get(kernel_name, 0)
with tf.GradientTape() as tape:
with tfp_hps.assert_no_excessive_var_usage(
'method `apply` of {}'.format(kernel),
max_permissible=max_permissible
):
tape.watch(wrt_vars)
with tfp_hps.no_tf_rank_errors():
diag = kernel.apply(xs, xs, example_ndims=example_ndims)
grads = tape.gradient(diag, wrt_vars)
assert_no_none_grad(kernel, 'apply', wrt_vars, grads)
# Check that copying the kernel works.
with tfp_hps.no_tf_rank_errors():
diag2 = self.evaluate(kernel.copy().apply(
xs, xs, example_ndims=example_ndims))
self.assertAllClose(diag, diag2)
@parameterized.named_parameters(
{'testcase_name': dname, 'kernel_name': dname}
for dname in sorted(list(kernel_hps.INSTANTIABLE_BASE_KERNELS.keys()) +
list(kernel_hps.SPECIAL_KERNELS)))
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings(
default_max_examples=10,
suppress_health_check=[
hp.HealthCheck.too_slow,
hp.HealthCheck.data_too_large])
def testCompositeTensor(self, kernel_name, data):
kernel, _ = data.draw(
kernel_hps.kernels(
kernel_name=kernel_name,
event_dim=2,
feature_dim=2,
feature_ndims=1,
enable_vars=True))
self.assertIsInstance(kernel, tf.__internal__.CompositeTensor)
xs = tf.identity(data.draw(kernel_hps.kernel_input(
batch_shape=[],
example_ndims=1,
feature_dim=2,
feature_ndims=1)))
with tfp_hps.no_tf_rank_errors():
diag = kernel.apply(xs, xs, example_ndims=1)
# Test flatten/unflatten.
flat = tf.nest.flatten(kernel, expand_composites=True)
unflat = tf.nest.pack_sequence_as(kernel, flat, expand_composites=True)
# Test tf.function.
@tf.function
def diag_fn(k):
return k.apply(xs, xs, example_ndims=1)
self.evaluate([v.initializer for v in kernel.variables])
with tfp_hps.no_tf_rank_errors():
self.assertAllClose(diag, diag_fn(kernel))
self.assertAllClose(diag, diag_fn(unflat))
@test_util.test_all_tf_execution_regimes
class PSDKernelSlicingTest(test_util.TestCase):
def _test_slicing(
self,
data,
kernel_name,
kernel,
feature_dim,
feature_ndims):
example_ndims = data.draw(hps.integers(min_value=0, max_value=2))
batch_shape = kernel.batch_shape
slices = data.draw(tfp_hps.valid_slices(batch_shape))
slice_str = 'kernel[{}]'.format(', '.join(tfp_hps.stringify_slices(
slices)))
# Make sure the slice string appears in Hypothesis' attempted example log
hp.note('Using slice ' + slice_str)
if not slices: # Nothing further to check.
return
sliced_zeros = np.zeros(batch_shape)[slices]
sliced_kernel = kernel[slices]
hp.note('Using sliced kernel {}.'.format(sliced_kernel))
hp.note('Using sliced zeros {}.'.format(sliced_zeros.shape))
# Check that slicing modifies batch shape as expected.
self.assertAllEqual(sliced_zeros.shape, sliced_kernel.batch_shape)
xs = tf.identity(data.draw(kernel_hps.kernel_input(
batch_shape=[],
example_ndims=example_ndims,
feature_dim=feature_dim,
feature_ndims=feature_ndims)))
# Check that apply of sliced kernels executes.
with tfp_hps.no_tf_rank_errors():
results = self.evaluate(kernel.apply(xs, xs, example_ndims=example_ndims))
hp.note('Using results shape {}.'.format(results.shape))
sliced_results = self.evaluate(
sliced_kernel.apply(xs, xs, example_ndims=example_ndims))
# Come up with the slices for apply (which must also include example dims).
apply_slices = (
tuple(slices) if isinstance(slices, collections.abc.Sequence) else
(slices,))
apply_slices += tuple([slice(None)] * example_ndims)
# Check that sampling a sliced kernel produces the same shape as
# slicing the samples from the original.
self.assertAllClose(results[apply_slices], sliced_results)
@parameterized.named_parameters(
{'testcase_name': dname, 'kernel_name': dname}
for dname in sorted(KERNELS_OK_TO_SLICE))
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings()
def testKernels(self, kernel_name, data):
event_dim = data.draw(hps.integers(min_value=2, max_value=3))
feature_ndims = data.draw(hps.integers(min_value=1, max_value=2))
feature_dim = data.draw(hps.integers(min_value=2, max_value=4))
kernel, _ = data.draw(
kernel_hps.kernels(
kernel_name=kernel_name,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=False))
# Check that all kernels still register as non-iterable despite
# defining __getitem__. (Because __getitem__ magically makes an object
# iterable for some reason.)
with self.assertRaisesRegex(TypeError, 'not iterable'):
iter(kernel)
# Test slicing
self._test_slicing(data, kernel_name, kernel, feature_dim, feature_ndims)
if __name__ == '__main__':
test_util.main()
|
|
from __future__ import absolute_import, unicode_literals
import unittest
import mock
from mopidy.models import Album, Artist, Playlist, Ref, SearchResult, Track
from mopidy.mpd.protocol import music_db, stored_playlists
from tests.mpd import protocol
# TODO: split into more modules for faster parallel tests?
class QueryFromMpdSearchFormatTest(unittest.TestCase):
def test_dates_are_extracted(self):
result = music_db._query_from_mpd_search_parameters(
['Date', '1974-01-02', 'Date', '1975'], music_db._SEARCH_MAPPING)
self.assertEqual(result['date'][0], '1974-01-02')
self.assertEqual(result['date'][1], '1975')
def test_empty_value_is_ignored(self):
result = music_db._query_from_mpd_search_parameters(
['Date', ''], music_db._SEARCH_MAPPING)
self.assertEqual(result, {})
def test_whitespace_value_is_ignored(self):
result = music_db._query_from_mpd_search_parameters(
['Date', ' '], music_db._SEARCH_MAPPING)
self.assertEqual(result, {})
# TODO Test more mappings
class QueryFromMpdListFormatTest(unittest.TestCase):
pass # TODO
# TODO: why isn't core.playlists.filter getting deprecation warnings?
class MusicDatabaseHandlerTest(protocol.BaseTestCase):
def test_count(self):
self.send_request('count "artist" "needle"')
self.assertInResponse('songs: 0')
self.assertInResponse('playtime: 0')
self.assertInResponse('OK')
def test_count_without_quotes(self):
self.send_request('count artist "needle"')
self.assertInResponse('songs: 0')
self.assertInResponse('playtime: 0')
self.assertInResponse('OK')
def test_count_with_multiple_pairs(self):
self.send_request('count "artist" "foo" "album" "bar"')
self.assertInResponse('songs: 0')
self.assertInResponse('playtime: 0')
self.assertInResponse('OK')
def test_count_correct_length(self):
# Count the lone track
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[
Track(uri='dummy:a', name='foo', date='2001', length=4000),
])
self.send_request('count "title" "foo"')
self.assertInResponse('songs: 1')
self.assertInResponse('playtime: 4')
self.assertInResponse('OK')
# Count multiple tracks
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[
Track(uri='dummy:b', date="2001", length=50000),
Track(uri='dummy:c', date="2001", length=600000),
])
self.send_request('count "date" "2001"')
self.assertInResponse('songs: 2')
self.assertInResponse('playtime: 650')
self.assertInResponse('OK')
def test_count_with_track_length_none(self):
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[
Track(uri='dummy:b', date="2001", length=None),
])
self.send_request('count "date" "2001"')
self.assertInResponse('songs: 1')
self.assertInResponse('playtime: 0')
self.assertInResponse('OK')
def test_findadd(self):
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[Track(uri='dummy:a', name='A')])
self.assertEqual(self.core.tracklist.length.get(), 0)
self.send_request('findadd "title" "A"')
self.assertEqual(self.core.tracklist.length.get(), 1)
self.assertEqual(self.core.tracklist.tracks.get()[0].uri, 'dummy:a')
self.assertInResponse('OK')
def test_searchadd(self):
self.backend.library.dummy_search_result = SearchResult(
tracks=[Track(uri='dummy:a', name='A')])
self.assertEqual(self.core.tracklist.length.get(), 0)
self.send_request('searchadd "title" "a"')
self.assertEqual(self.core.tracklist.length.get(), 1)
self.assertEqual(self.core.tracklist.tracks.get()[0].uri, 'dummy:a')
self.assertInResponse('OK')
def test_searchaddpl_appends_to_existing_playlist(self):
playlist = self.core.playlists.create('my favs').get()
playlist = playlist.replace(tracks=[
Track(uri='dummy:x', name='X'),
Track(uri='dummy:y', name='y'),
])
self.core.playlists.save(playlist)
self.backend.library.dummy_search_result = SearchResult(
tracks=[Track(uri='dummy:a', name='A')])
items = self.core.playlists.get_items(playlist.uri).get()
self.assertEqual(len(items), 2)
self.send_request('searchaddpl "my favs" "title" "a"')
items = self.core.playlists.get_items(playlist.uri).get()
self.assertEqual(len(items), 3)
self.assertEqual(items[0].uri, 'dummy:x')
self.assertEqual(items[1].uri, 'dummy:y')
self.assertEqual(items[2].uri, 'dummy:a')
self.assertInResponse('OK')
def test_searchaddpl_creates_missing_playlist(self):
self.backend.library.dummy_search_result = SearchResult(
tracks=[Track(uri='dummy:a', name='A')])
playlists = self.core.playlists.as_list().get()
self.assertNotIn('my favs', {p.name for p in playlists})
self.send_request('searchaddpl "my favs" "title" "a"')
playlists = self.core.playlists.as_list().get()
playlist = {p.name: p for p in playlists}['my favs']
items = self.core.playlists.get_items(playlist.uri).get()
self.assertEqual(len(items), 1)
self.assertEqual(items[0].uri, 'dummy:a')
self.assertInResponse('OK')
def test_listall_without_uri(self):
tracks = [Track(uri='dummy:/a', name='a'),
Track(uri='dummy:/foo/b', name='b')]
self.backend.library.dummy_library = tracks
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo'),
Ref.album(uri='dummy:/album', name='album'),
Ref.artist(uri='dummy:/artist', name='artist'),
Ref.playlist(uri='dummy:/pl', name='pl')],
'dummy:/foo': [Ref.track(uri='dummy:/foo/b', name='b')]}
self.send_request('listall')
self.assertInResponse('file: dummy:/a')
self.assertInResponse('directory: /dummy/foo')
self.assertInResponse('directory: /dummy/album')
self.assertInResponse('directory: /dummy/artist')
self.assertInResponse('directory: /dummy/pl')
self.assertInResponse('file: dummy:/foo/b')
self.assertInResponse('OK')
def test_listall_with_uri(self):
tracks = [Track(uri='dummy:/a', name='a'),
Track(uri='dummy:/foo/b', name='b')]
self.backend.library.dummy_library = tracks
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')],
'dummy:/foo': [Ref.track(uri='dummy:/foo/b', name='b')]}
self.send_request('listall "/dummy/foo"')
self.assertNotInResponse('file: dummy:/a')
self.assertInResponse('directory: /dummy/foo')
self.assertInResponse('file: dummy:/foo/b')
self.assertInResponse('OK')
def test_listall_with_unknown_uri(self):
self.send_request('listall "/unknown"')
self.assertEqualResponse('ACK [50@0] {listall} Not found')
def test_listall_for_dir_with_and_without_leading_slash_is_the_same(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')]}
response1 = self.send_request('listall "dummy"')
response2 = self.send_request('listall "/dummy"')
self.assertEqual(response1, response2)
def test_listall_for_dir_with_and_without_trailing_slash_is_the_same(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')]}
response1 = self.send_request('listall "dummy"')
response2 = self.send_request('listall "dummy/"')
self.assertEqual(response1, response2)
def test_listall_duplicate(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.directory(uri='dummy:/a1', name='a'),
Ref.directory(uri='dummy:/a2', name='a')]}
self.send_request('listall')
self.assertInResponse('directory: /dummy/a')
self.assertInResponse('directory: /dummy/a [2]')
def test_listallinfo_without_uri(self):
tracks = [Track(uri='dummy:/a', name='a'),
Track(uri='dummy:/foo/b', name='b')]
self.backend.library.dummy_library = tracks
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo'),
Ref.album(uri='dummy:/album', name='album'),
Ref.artist(uri='dummy:/artist', name='artist'),
Ref.playlist(uri='dummy:/pl', name='pl')],
'dummy:/foo': [Ref.track(uri='dummy:/foo/b', name='b')]}
self.send_request('listallinfo')
self.assertInResponse('file: dummy:/a')
self.assertInResponse('Title: a')
self.assertInResponse('directory: /dummy/foo')
self.assertInResponse('directory: /dummy/album')
self.assertInResponse('directory: /dummy/artist')
self.assertInResponse('directory: /dummy/pl')
self.assertInResponse('file: dummy:/foo/b')
self.assertInResponse('Title: b')
self.assertInResponse('OK')
def test_listallinfo_with_uri(self):
tracks = [Track(uri='dummy:/a', name='a'),
Track(uri='dummy:/foo/b', name='b')]
self.backend.library.dummy_library = tracks
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')],
'dummy:/foo': [Ref.track(uri='dummy:/foo/b', name='b')]}
self.send_request('listallinfo "/dummy/foo"')
self.assertNotInResponse('file: dummy:/a')
self.assertNotInResponse('Title: a')
self.assertInResponse('directory: /dummy/foo')
self.assertInResponse('file: dummy:/foo/b')
self.assertInResponse('Title: b')
self.assertInResponse('OK')
def test_listallinfo_with_unknown_uri(self):
self.send_request('listallinfo "/unknown"')
self.assertEqualResponse('ACK [50@0] {listallinfo} Not found')
def test_listallinfo_for_dir_with_and_without_leading_slash_is_same(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')]}
response1 = self.send_request('listallinfo "dummy"')
response2 = self.send_request('listallinfo "/dummy"')
self.assertEqual(response1, response2)
def test_listallinfo_for_dir_with_and_without_trailing_slash_is_same(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')]}
response1 = self.send_request('listallinfo "dummy"')
response2 = self.send_request('listallinfo "dummy/"')
self.assertEqual(response1, response2)
def test_listallinfo_duplicate(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.directory(uri='dummy:/a1', name='a'),
Ref.directory(uri='dummy:/a2', name='a')]}
self.send_request('listallinfo')
self.assertInResponse('directory: /dummy/a')
self.assertInResponse('directory: /dummy/a [2]')
def test_listfiles(self):
self.send_request('listfiles')
self.assertEqualResponse('ACK [0@0] {listfiles} Not implemented')
@mock.patch.object(stored_playlists, '_get_last_modified')
def test_lsinfo_without_path_returns_same_as_for_root(
self, last_modified_mock):
last_modified_mock.return_value = '2015-08-05T22:51:06Z'
self.backend.playlists.set_dummy_playlists([
Playlist(name='a', uri='dummy:/a')])
response1 = self.send_request('lsinfo')
response2 = self.send_request('lsinfo "/"')
self.assertEqual(response1, response2)
@mock.patch.object(stored_playlists, '_get_last_modified')
def test_lsinfo_with_empty_path_returns_same_as_for_root(
self, last_modified_mock):
last_modified_mock.return_value = '2015-08-05T22:51:06Z'
self.backend.playlists.set_dummy_playlists([
Playlist(name='a', uri='dummy:/a')])
response1 = self.send_request('lsinfo ""')
response2 = self.send_request('lsinfo "/"')
self.assertEqual(response1, response2)
@mock.patch.object(stored_playlists, '_get_last_modified')
def test_lsinfo_for_root_includes_playlists(self, last_modified_mock):
last_modified_mock.return_value = '2015-08-05T22:51:06Z'
self.backend.playlists.set_dummy_playlists([
Playlist(name='a', uri='dummy:/a')])
self.send_request('lsinfo "/"')
self.assertInResponse('playlist: a')
self.assertInResponse('Last-Modified: 2015-08-05T22:51:06Z')
self.assertInResponse('OK')
def test_lsinfo_for_root_includes_dirs_for_each_lib_with_content(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')]}
self.send_request('lsinfo "/"')
self.assertInResponse('directory: dummy')
self.assertInResponse('OK')
@mock.patch.object(stored_playlists, '_get_last_modified')
def test_lsinfo_for_dir_with_and_without_leading_slash_is_the_same(
self, last_modified_mock):
last_modified_mock.return_value = '2015-08-05T22:51:06Z'
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')]}
response1 = self.send_request('lsinfo "dummy"')
response2 = self.send_request('lsinfo "/dummy"')
self.assertEqual(response1, response2)
@mock.patch.object(stored_playlists, '_get_last_modified')
def test_lsinfo_for_dir_with_and_without_trailing_slash_is_the_same(
self, last_modified_mock):
last_modified_mock.return_value = '2015-08-05T22:51:06Z'
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')]}
response1 = self.send_request('lsinfo "dummy"')
response2 = self.send_request('lsinfo "dummy/"')
self.assertEqual(response1, response2)
def test_lsinfo_for_dir_includes_tracks(self):
self.backend.library.dummy_library = [
Track(uri='dummy:/a', name='a'),
]
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a')]}
self.send_request('lsinfo "/dummy"')
self.assertInResponse('file: dummy:/a')
self.assertInResponse('Title: a')
self.assertInResponse('OK')
def test_lsinfo_for_dir_includes_subdirs(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.directory(uri='dummy:/foo', name='foo')]}
self.send_request('lsinfo "/dummy"')
self.assertInResponse('directory: dummy/foo')
self.assertInResponse('OK')
def test_lsinfo_for_empty_dir_returns_nothing(self):
self.backend.library.dummy_browse_result = {
'dummy:/': []}
self.send_request('lsinfo "/dummy"')
self.assertInResponse('OK')
def test_lsinfo_for_dir_does_not_recurse(self):
self.backend.library.dummy_library = [
Track(uri='dummy:/a', name='a'),
]
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.directory(uri='dummy:/foo', name='foo')],
'dummy:/foo': [Ref.track(uri='dummy:/a', name='a')]}
self.send_request('lsinfo "/dummy"')
self.assertNotInResponse('file: dummy:/a')
self.assertInResponse('OK')
def test_lsinfo_for_dir_does_not_include_self(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.directory(uri='dummy:/foo', name='foo')],
'dummy:/foo': [Ref.track(uri='dummy:/a', name='a')]}
self.send_request('lsinfo "/dummy"')
self.assertNotInResponse('directory: dummy')
self.assertInResponse('OK')
def test_lsinfo_for_root_returns_browse_result_before_playlists(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.track(uri='dummy:/a', name='a'),
Ref.directory(uri='dummy:/foo', name='foo')]}
self.backend.playlists.set_dummy_playlists([
Playlist(name='a', uri='dummy:/a')])
response = self.send_request('lsinfo "/"')
self.assertLess(response.index('directory: dummy'),
response.index('playlist: a'))
def test_lsinfo_duplicate(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.directory(uri='dummy:/a1', name='a'),
Ref.directory(uri='dummy:/a2', name='a')]}
self.send_request('lsinfo "/dummy"')
self.assertInResponse('directory: dummy/a')
self.assertInResponse('directory: dummy/a [2]')
def test_update_without_uri(self):
self.send_request('update')
self.assertInResponse('updating_db: 0')
self.assertInResponse('OK')
def test_update_with_uri(self):
self.send_request('update "file:///dev/urandom"')
self.assertInResponse('updating_db: 0')
self.assertInResponse('OK')
def test_rescan_without_uri(self):
self.send_request('rescan')
self.assertInResponse('updating_db: 0')
self.assertInResponse('OK')
def test_rescan_with_uri(self):
self.send_request('rescan "file:///dev/urandom"')
self.assertInResponse('updating_db: 0')
self.assertInResponse('OK')
class MusicDatabaseFindTest(protocol.BaseTestCase):
def test_find_includes_fake_artist_and_album_tracks(self):
self.backend.library.dummy_find_exact_result = SearchResult(
albums=[Album(uri='dummy:album:a', name='A', date='2001')],
artists=[Artist(uri='dummy:artist:b', name='B')],
tracks=[Track(uri='dummy:track:c', name='C')])
self.send_request('find "any" "foo"')
self.assertInResponse('file: dummy:artist:b')
self.assertInResponse('Title: Artist: B')
self.assertInResponse('file: dummy:album:a')
self.assertInResponse('Title: Album: A')
self.assertInResponse('Date: 2001')
self.assertInResponse('file: dummy:track:c')
self.assertInResponse('Title: C')
self.assertInResponse('OK')
def test_find_artist_does_not_include_fake_artist_tracks(self):
self.backend.library.dummy_find_exact_result = SearchResult(
albums=[Album(uri='dummy:album:a', name='A', date='2001')],
artists=[Artist(uri='dummy:artist:b', name='B')],
tracks=[Track(uri='dummy:track:c', name='C')])
self.send_request('find "artist" "foo"')
self.assertNotInResponse('file: dummy:artist:b')
self.assertNotInResponse('Title: Artist: B')
self.assertInResponse('file: dummy:album:a')
self.assertInResponse('Title: Album: A')
self.assertInResponse('Date: 2001')
self.assertInResponse('file: dummy:track:c')
self.assertInResponse('Title: C')
self.assertInResponse('OK')
def test_find_albumartist_does_not_include_fake_artist_tracks(self):
self.backend.library.dummy_find_exact_result = SearchResult(
albums=[Album(uri='dummy:album:a', name='A', date='2001')],
artists=[Artist(uri='dummy:artist:b', name='B')],
tracks=[Track(uri='dummy:track:c', name='C')])
self.send_request('find "albumartist" "foo"')
self.assertNotInResponse('file: dummy:artist:b')
self.assertNotInResponse('Title: Artist: B')
self.assertInResponse('file: dummy:album:a')
self.assertInResponse('Title: Album: A')
self.assertInResponse('Date: 2001')
self.assertInResponse('file: dummy:track:c')
self.assertInResponse('Title: C')
self.assertInResponse('OK')
def test_find_artist_and_album_does_not_include_fake_tracks(self):
self.backend.library.dummy_find_exact_result = SearchResult(
albums=[Album(uri='dummy:album:a', name='A', date='2001')],
artists=[Artist(uri='dummy:artist:b', name='B')],
tracks=[Track(uri='dummy:track:c', name='C')])
self.send_request('find "artist" "foo" "album" "bar"')
self.assertNotInResponse('file: dummy:artist:b')
self.assertNotInResponse('Title: Artist: B')
self.assertNotInResponse('file: dummy:album:a')
self.assertNotInResponse('Title: Album: A')
self.assertNotInResponse('Date: 2001')
self.assertInResponse('file: dummy:track:c')
self.assertInResponse('Title: C')
self.assertInResponse('OK')
def test_find_album(self):
self.send_request('find "album" "what"')
self.assertInResponse('OK')
def test_find_album_without_quotes(self):
self.send_request('find album "what"')
self.assertInResponse('OK')
def test_find_artist(self):
self.send_request('find "artist" "what"')
self.assertInResponse('OK')
def test_find_artist_without_quotes(self):
self.send_request('find artist "what"')
self.assertInResponse('OK')
def test_find_albumartist(self):
self.send_request('find "albumartist" "what"')
self.assertInResponse('OK')
def test_find_albumartist_without_quotes(self):
self.send_request('find albumartist "what"')
self.assertInResponse('OK')
def test_find_composer(self):
self.send_request('find "composer" "what"')
self.assertInResponse('OK')
def test_find_composer_without_quotes(self):
self.send_request('find composer "what"')
self.assertInResponse('OK')
def test_find_performer(self):
self.send_request('find "performer" "what"')
self.assertInResponse('OK')
def test_find_performer_without_quotes(self):
self.send_request('find performer "what"')
self.assertInResponse('OK')
def test_find_filename(self):
self.send_request('find "filename" "afilename"')
self.assertInResponse('OK')
def test_find_filename_without_quotes(self):
self.send_request('find filename "afilename"')
self.assertInResponse('OK')
def test_find_file(self):
self.send_request('find "file" "afilename"')
self.assertInResponse('OK')
def test_find_file_without_quotes(self):
self.send_request('find file "afilename"')
self.assertInResponse('OK')
def test_find_title(self):
self.send_request('find "title" "what"')
self.assertInResponse('OK')
def test_find_title_without_quotes(self):
self.send_request('find title "what"')
self.assertInResponse('OK')
def test_find_track_no(self):
self.send_request('find "track" "10"')
self.assertInResponse('OK')
def test_find_track_no_without_quotes(self):
self.send_request('find track "10"')
self.assertInResponse('OK')
def test_find_track_no_without_filter_value(self):
self.send_request('find "track" ""')
self.assertInResponse('OK')
def test_find_genre(self):
self.send_request('find "genre" "what"')
self.assertInResponse('OK')
def test_find_genre_without_quotes(self):
self.send_request('find genre "what"')
self.assertInResponse('OK')
def test_find_date(self):
self.send_request('find "date" "2002-01-01"')
self.assertInResponse('OK')
def test_find_date_without_quotes(self):
self.send_request('find date "2002-01-01"')
self.assertInResponse('OK')
def test_find_date_with_capital_d_and_incomplete_date(self):
self.send_request('find Date "2005"')
self.assertInResponse('OK')
def test_find_else_should_fail(self):
self.send_request('find "somethingelse" "what"')
self.assertEqualResponse('ACK [2@0] {find} incorrect arguments')
def test_find_album_and_artist(self):
self.send_request('find album "album_what" artist "artist_what"')
self.assertInResponse('OK')
def test_find_without_filter_value(self):
self.send_request('find "album" ""')
self.assertInResponse('OK')
class MusicDatabaseListTest(protocol.BaseTestCase):
def test_list(self):
self.backend.library.dummy_get_distinct_result = {
'artist': set(['A Artist'])}
self.send_request('list "artist" "artist" "foo"')
self.assertInResponse('Artist: A Artist')
self.assertInResponse('OK')
def test_list_foo_returns_ack(self):
self.send_request('list "foo"')
self.assertEqualResponse('ACK [2@0] {list} incorrect arguments')
# Track title
def test_list_title(self):
self.send_request('list "title"')
self.assertInResponse('OK')
# Artist
def test_list_artist_with_quotes(self):
self.send_request('list "artist"')
self.assertInResponse('OK')
def test_list_artist_without_quotes(self):
self.send_request('list artist')
self.assertInResponse('OK')
def test_list_artist_without_quotes_and_capitalized(self):
self.send_request('list Artist')
self.assertInResponse('OK')
def test_list_artist_with_query_of_one_token(self):
self.send_request('list "artist" "anartist"')
self.assertEqualResponse(
'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_artist_with_unknown_field_in_query_returns_ack(self):
self.send_request('list "artist" "foo" "bar"')
self.assertEqualResponse('ACK [2@0] {list} not able to parse args')
def test_list_artist_by_artist(self):
self.send_request('list "artist" "artist" "anartist"')
self.assertInResponse('OK')
def test_list_artist_by_album(self):
self.send_request('list "artist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_artist_by_full_date(self):
self.send_request('list "artist" "date" "2001-01-01"')
self.assertInResponse('OK')
def test_list_artist_by_year(self):
self.send_request('list "artist" "date" "2001"')
self.assertInResponse('OK')
def test_list_artist_by_genre(self):
self.send_request('list "artist" "genre" "agenre"')
self.assertInResponse('OK')
def test_list_artist_by_artist_and_album(self):
self.send_request(
'list "artist" "artist" "anartist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_artist_without_filter_value(self):
self.send_request('list "artist" "artist" ""')
self.assertInResponse('OK')
def test_list_artist_should_not_return_artists_without_names(self):
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[Track(artists=[Artist(name='')])])
self.send_request('list "artist"')
self.assertNotInResponse('Artist: ')
self.assertInResponse('OK')
# Albumartist
def test_list_albumartist_with_quotes(self):
self.send_request('list "albumartist"')
self.assertInResponse('OK')
def test_list_albumartist_without_quotes(self):
self.send_request('list albumartist')
self.assertInResponse('OK')
def test_list_albumartist_without_quotes_and_capitalized(self):
self.send_request('list Albumartist')
self.assertInResponse('OK')
def test_list_albumartist_with_query_of_one_token(self):
self.send_request('list "albumartist" "anartist"')
self.assertEqualResponse(
'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_albumartist_with_unknown_field_in_query_returns_ack(self):
self.send_request('list "albumartist" "foo" "bar"')
self.assertEqualResponse('ACK [2@0] {list} not able to parse args')
def test_list_albumartist_by_artist(self):
self.send_request('list "albumartist" "artist" "anartist"')
self.assertInResponse('OK')
def test_list_albumartist_by_album(self):
self.send_request('list "albumartist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_albumartist_by_full_date(self):
self.send_request('list "albumartist" "date" "2001-01-01"')
self.assertInResponse('OK')
def test_list_albumartist_by_year(self):
self.send_request('list "albumartist" "date" "2001"')
self.assertInResponse('OK')
def test_list_albumartist_by_genre(self):
self.send_request('list "albumartist" "genre" "agenre"')
self.assertInResponse('OK')
def test_list_albumartist_by_artist_and_album(self):
self.send_request(
'list "albumartist" "artist" "anartist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_albumartist_without_filter_value(self):
self.send_request('list "albumartist" "artist" ""')
self.assertInResponse('OK')
def test_list_albumartist_should_not_return_artists_without_names(self):
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[Track(album=Album(artists=[Artist(name='')]))])
self.send_request('list "albumartist"')
self.assertNotInResponse('Artist: ')
self.assertNotInResponse('Albumartist: ')
self.assertNotInResponse('Composer: ')
self.assertNotInResponse('Performer: ')
self.assertInResponse('OK')
# Composer
def test_list_composer_with_quotes(self):
self.send_request('list "composer"')
self.assertInResponse('OK')
def test_list_composer_without_quotes(self):
self.send_request('list composer')
self.assertInResponse('OK')
def test_list_composer_without_quotes_and_capitalized(self):
self.send_request('list Composer')
self.assertInResponse('OK')
def test_list_composer_with_query_of_one_token(self):
self.send_request('list "composer" "anartist"')
self.assertEqualResponse(
'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_composer_with_unknown_field_in_query_returns_ack(self):
self.send_request('list "composer" "foo" "bar"')
self.assertEqualResponse('ACK [2@0] {list} not able to parse args')
def test_list_composer_by_artist(self):
self.send_request('list "composer" "artist" "anartist"')
self.assertInResponse('OK')
def test_list_composer_by_album(self):
self.send_request('list "composer" "album" "analbum"')
self.assertInResponse('OK')
def test_list_composer_by_full_date(self):
self.send_request('list "composer" "date" "2001-01-01"')
self.assertInResponse('OK')
def test_list_composer_by_year(self):
self.send_request('list "composer" "date" "2001"')
self.assertInResponse('OK')
def test_list_composer_by_genre(self):
self.send_request('list "composer" "genre" "agenre"')
self.assertInResponse('OK')
def test_list_composer_by_artist_and_album(self):
self.send_request(
'list "composer" "artist" "anartist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_composer_without_filter_value(self):
self.send_request('list "composer" "artist" ""')
self.assertInResponse('OK')
def test_list_composer_should_not_return_artists_without_names(self):
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[Track(composers=[Artist(name='')])])
self.send_request('list "composer"')
self.assertNotInResponse('Artist: ')
self.assertNotInResponse('Albumartist: ')
self.assertNotInResponse('Composer: ')
self.assertNotInResponse('Performer: ')
self.assertInResponse('OK')
# Performer
def test_list_performer_with_quotes(self):
self.send_request('list "performer"')
self.assertInResponse('OK')
def test_list_performer_without_quotes(self):
self.send_request('list performer')
self.assertInResponse('OK')
def test_list_performer_without_quotes_and_capitalized(self):
self.send_request('list Albumartist')
self.assertInResponse('OK')
def test_list_performer_with_query_of_one_token(self):
self.send_request('list "performer" "anartist"')
self.assertEqualResponse(
'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_performer_with_unknown_field_in_query_returns_ack(self):
self.send_request('list "performer" "foo" "bar"')
self.assertEqualResponse('ACK [2@0] {list} not able to parse args')
def test_list_performer_by_artist(self):
self.send_request('list "performer" "artist" "anartist"')
self.assertInResponse('OK')
def test_list_performer_by_album(self):
self.send_request('list "performer" "album" "analbum"')
self.assertInResponse('OK')
def test_list_performer_by_full_date(self):
self.send_request('list "performer" "date" "2001-01-01"')
self.assertInResponse('OK')
def test_list_performer_by_year(self):
self.send_request('list "performer" "date" "2001"')
self.assertInResponse('OK')
def test_list_performer_by_genre(self):
self.send_request('list "performer" "genre" "agenre"')
self.assertInResponse('OK')
def test_list_performer_by_artist_and_album(self):
self.send_request(
'list "performer" "artist" "anartist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_performer_without_filter_value(self):
self.send_request('list "performer" "artist" ""')
self.assertInResponse('OK')
def test_list_performer_should_not_return_artists_without_names(self):
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[Track(performers=[Artist(name='')])])
self.send_request('list "performer"')
self.assertNotInResponse('Artist: ')
self.assertNotInResponse('Albumartist: ')
self.assertNotInResponse('Composer: ')
self.assertNotInResponse('Performer: ')
self.assertInResponse('OK')
# Album
def test_list_album_with_quotes(self):
self.send_request('list "album"')
self.assertInResponse('OK')
def test_list_album_without_quotes(self):
self.send_request('list album')
self.assertInResponse('OK')
def test_list_album_without_quotes_and_capitalized(self):
self.send_request('list Album')
self.assertInResponse('OK')
def test_list_album_with_artist_name(self):
self.backend.library.dummy_get_distinct_result = {
'album': set(['foo'])}
self.send_request('list "album" "anartist"')
self.assertInResponse('Album: foo')
self.assertInResponse('OK')
def test_list_album_with_artist_name_without_filter_value(self):
self.send_request('list "album" ""')
self.assertInResponse('OK')
def test_list_album_by_artist(self):
self.send_request('list "album" "artist" "anartist"')
self.assertInResponse('OK')
def test_list_album_by_album(self):
self.send_request('list "album" "album" "analbum"')
self.assertInResponse('OK')
def test_list_album_by_albumartist(self):
self.send_request('list "album" "albumartist" "anartist"')
self.assertInResponse('OK')
def test_list_album_by_composer(self):
self.send_request('list "album" "composer" "anartist"')
self.assertInResponse('OK')
def test_list_album_by_performer(self):
self.send_request('list "album" "performer" "anartist"')
self.assertInResponse('OK')
def test_list_album_by_full_date(self):
self.send_request('list "album" "date" "2001-01-01"')
self.assertInResponse('OK')
def test_list_album_by_year(self):
self.send_request('list "album" "date" "2001"')
self.assertInResponse('OK')
def test_list_album_by_genre(self):
self.send_request('list "album" "genre" "agenre"')
self.assertInResponse('OK')
def test_list_album_by_artist_and_album(self):
self.send_request(
'list "album" "artist" "anartist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_album_without_filter_value(self):
self.send_request('list "album" "artist" ""')
self.assertInResponse('OK')
def test_list_album_should_not_return_albums_without_names(self):
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[Track(album=Album(name=''))])
self.send_request('list "album"')
self.assertNotInResponse('Album: ')
self.assertInResponse('OK')
# Date
def test_list_date_with_quotes(self):
self.send_request('list "date"')
self.assertInResponse('OK')
def test_list_date_without_quotes(self):
self.send_request('list date')
self.assertInResponse('OK')
def test_list_date_without_quotes_and_capitalized(self):
self.send_request('list Date')
self.assertInResponse('OK')
def test_list_date_with_query_of_one_token(self):
self.send_request('list "date" "anartist"')
self.assertEqualResponse(
'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_date_by_artist(self):
self.send_request('list "date" "artist" "anartist"')
self.assertInResponse('OK')
def test_list_date_by_album(self):
self.send_request('list "date" "album" "analbum"')
self.assertInResponse('OK')
def test_list_date_by_full_date(self):
self.send_request('list "date" "date" "2001-01-01"')
self.assertInResponse('OK')
def test_list_date_by_year(self):
self.send_request('list "date" "date" "2001"')
self.assertInResponse('OK')
def test_list_date_by_genre(self):
self.send_request('list "date" "genre" "agenre"')
self.assertInResponse('OK')
def test_list_date_by_artist_and_album(self):
self.send_request('list "date" "artist" "anartist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_date_without_filter_value(self):
self.send_request('list "date" "artist" ""')
self.assertInResponse('OK')
def test_list_date_should_not_return_blank_dates(self):
self.backend.library.dummy_find_exact_result = SearchResult(
tracks=[Track(date='')])
self.send_request('list "date"')
self.assertNotInResponse('Date: ')
self.assertInResponse('OK')
# Genre
def test_list_genre_with_quotes(self):
self.send_request('list "genre"')
self.assertInResponse('OK')
def test_list_genre_without_quotes(self):
self.send_request('list genre')
self.assertInResponse('OK')
def test_list_genre_without_quotes_and_capitalized(self):
self.send_request('list Genre')
self.assertInResponse('OK')
def test_list_genre_with_query_of_one_token(self):
self.send_request('list "genre" "anartist"')
self.assertEqualResponse(
'ACK [2@0] {list} should be "Album" for 3 arguments')
def test_list_genre_by_artist(self):
self.send_request('list "genre" "artist" "anartist"')
self.assertInResponse('OK')
def test_list_genre_by_album(self):
self.send_request('list "genre" "album" "analbum"')
self.assertInResponse('OK')
def test_list_genre_by_full_date(self):
self.send_request('list "genre" "date" "2001-01-01"')
self.assertInResponse('OK')
def test_list_genre_by_year(self):
self.send_request('list "genre" "date" "2001"')
self.assertInResponse('OK')
def test_list_genre_by_genre(self):
self.send_request('list "genre" "genre" "agenre"')
self.assertInResponse('OK')
def test_list_genre_by_artist_and_album(self):
self.send_request(
'list "genre" "artist" "anartist" "album" "analbum"')
self.assertInResponse('OK')
def test_list_genre_without_filter_value(self):
self.send_request('list "genre" "artist" ""')
self.assertInResponse('OK')
class MusicDatabaseSearchTest(protocol.BaseTestCase):
def test_search(self):
self.backend.library.dummy_search_result = SearchResult(
albums=[Album(uri='dummy:album:a', name='A')],
artists=[Artist(uri='dummy:artist:b', name='B')],
tracks=[Track(uri='dummy:track:c', name='C')])
self.send_request('search "any" "foo"')
self.assertInResponse('file: dummy:album:a')
self.assertInResponse('Title: Album: A')
self.assertInResponse('file: dummy:artist:b')
self.assertInResponse('Title: Artist: B')
self.assertInResponse('file: dummy:track:c')
self.assertInResponse('Title: C')
self.assertInResponse('OK')
def test_search_album(self):
self.send_request('search "album" "analbum"')
self.assertInResponse('OK')
def test_search_album_without_quotes(self):
self.send_request('search album "analbum"')
self.assertInResponse('OK')
def test_search_album_without_filter_value(self):
self.send_request('search "album" ""')
self.assertInResponse('OK')
def test_search_artist(self):
self.send_request('search "artist" "anartist"')
self.assertInResponse('OK')
def test_search_artist_without_quotes(self):
self.send_request('search artist "anartist"')
self.assertInResponse('OK')
def test_search_artist_without_filter_value(self):
self.send_request('search "artist" ""')
self.assertInResponse('OK')
def test_search_albumartist(self):
self.send_request('search "albumartist" "analbumartist"')
self.assertInResponse('OK')
def test_search_albumartist_without_quotes(self):
self.send_request('search albumartist "analbumartist"')
self.assertInResponse('OK')
def test_search_albumartist_without_filter_value(self):
self.send_request('search "albumartist" ""')
self.assertInResponse('OK')
def test_search_composer(self):
self.send_request('search "composer" "acomposer"')
self.assertInResponse('OK')
def test_search_composer_without_quotes(self):
self.send_request('search composer "acomposer"')
self.assertInResponse('OK')
def test_search_composer_without_filter_value(self):
self.send_request('search "composer" ""')
self.assertInResponse('OK')
def test_search_performer(self):
self.send_request('search "performer" "aperformer"')
self.assertInResponse('OK')
def test_search_performer_without_quotes(self):
self.send_request('search performer "aperformer"')
self.assertInResponse('OK')
def test_search_performer_without_filter_value(self):
self.send_request('search "performer" ""')
self.assertInResponse('OK')
def test_search_filename(self):
self.send_request('search "filename" "afilename"')
self.assertInResponse('OK')
def test_search_filename_without_quotes(self):
self.send_request('search filename "afilename"')
self.assertInResponse('OK')
def test_search_filename_without_filter_value(self):
self.send_request('search "filename" ""')
self.assertInResponse('OK')
def test_search_file(self):
self.send_request('search "file" "afilename"')
self.assertInResponse('OK')
def test_search_file_without_quotes(self):
self.send_request('search file "afilename"')
self.assertInResponse('OK')
def test_search_file_without_filter_value(self):
self.send_request('search "file" ""')
self.assertInResponse('OK')
def test_search_title(self):
self.send_request('search "title" "atitle"')
self.assertInResponse('OK')
def test_search_title_without_quotes(self):
self.send_request('search title "atitle"')
self.assertInResponse('OK')
def test_search_title_without_filter_value(self):
self.send_request('search "title" ""')
self.assertInResponse('OK')
def test_search_any(self):
self.send_request('search "any" "anything"')
self.assertInResponse('OK')
def test_search_any_without_quotes(self):
self.send_request('search any "anything"')
self.assertInResponse('OK')
def test_search_any_without_filter_value(self):
self.send_request('search "any" ""')
self.assertInResponse('OK')
def test_search_track_no(self):
self.send_request('search "track" "10"')
self.assertInResponse('OK')
def test_search_track_no_without_quotes(self):
self.send_request('search track "10"')
self.assertInResponse('OK')
def test_search_track_no_without_filter_value(self):
self.send_request('search "track" ""')
self.assertInResponse('OK')
def test_search_genre(self):
self.send_request('search "genre" "agenre"')
self.assertInResponse('OK')
def test_search_genre_without_quotes(self):
self.send_request('search genre "agenre"')
self.assertInResponse('OK')
def test_search_genre_without_filter_value(self):
self.send_request('search "genre" ""')
self.assertInResponse('OK')
def test_search_date(self):
self.send_request('search "date" "2002-01-01"')
self.assertInResponse('OK')
def test_search_date_without_quotes(self):
self.send_request('search date "2002-01-01"')
self.assertInResponse('OK')
def test_search_date_with_capital_d_and_incomplete_date(self):
self.send_request('search Date "2005"')
self.assertInResponse('OK')
def test_search_date_without_filter_value(self):
self.send_request('search "date" ""')
self.assertInResponse('OK')
def test_search_comment(self):
self.send_request('search "comment" "acomment"')
self.assertInResponse('OK')
def test_search_comment_without_quotes(self):
self.send_request('search comment "acomment"')
self.assertInResponse('OK')
def test_search_comment_without_filter_value(self):
self.send_request('search "comment" ""')
self.assertInResponse('OK')
def test_search_else_should_fail(self):
self.send_request('search "sometype" "something"')
self.assertEqualResponse('ACK [2@0] {search} incorrect arguments')
|
|
<<<<<<< HEAD
<<<<<<< HEAD
import os
import sys
import linecache
import re
import tkinter as tk
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.ObjectBrowser import ObjectTreeItem, make_objecttreeitem
from idlelib.PyShell import PyShellFileList
def StackBrowser(root, flist=None, tb=None, top=None):
if top is None:
from tkinter import Toplevel
top = Toplevel(root)
sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
item = StackTreeItem(flist, tb)
node = TreeNode(sc.canvas, None, item)
node.expand()
class StackTreeItem(TreeItem):
def __init__(self, flist=None, tb=None):
self.flist = flist
self.stack = self.get_stack(tb)
self.text = self.get_exception()
def get_stack(self, tb):
if tb is None:
tb = sys.last_traceback
stack = []
if tb and tb.tb_frame is None:
tb = tb.tb_next
while tb is not None:
stack.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
return stack
def get_exception(self):
type = sys.last_type
value = sys.last_value
if hasattr(type, "__name__"):
type = type.__name__
s = str(type)
if value is not None:
s = s + ": " + str(value)
return s
def GetText(self):
return self.text
def GetSubList(self):
sublist = []
for info in self.stack:
item = FrameTreeItem(info, self.flist)
sublist.append(item)
return sublist
class FrameTreeItem(TreeItem):
def __init__(self, info, flist):
self.info = info
self.flist = flist
def GetText(self):
frame, lineno = self.info
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(...), line %d: %s" % (modname, funcname,
lineno, sourceline)
return item
def GetSubList(self):
frame, lineno = self.info
sublist = []
if frame.f_globals is not frame.f_locals:
item = VariablesTreeItem("<locals>", frame.f_locals, self.flist)
sublist.append(item)
item = VariablesTreeItem("<globals>", frame.f_globals, self.flist)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if self.flist:
frame, lineno = self.info
filename = frame.f_code.co_filename
if os.path.isfile(filename):
self.flist.gotofileline(filename, lineno)
class VariablesTreeItem(ObjectTreeItem):
def GetText(self):
return self.labeltext
def GetLabelText(self):
return None
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return list(self.object.keys())
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem(key + " =", value, setfunction)
sublist.append(item)
return sublist
def _stack_viewer(parent):
root = tk.Tk()
root.title("Test StackViewer")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
flist = PyShellFileList(root)
try: # to obtain a traceback object
intentional_name_error
except NameError:
exc_type, exc_value, exc_tb = sys.exc_info()
# inject stack trace to sys
sys.last_type = exc_type
sys.last_value = exc_value
sys.last_traceback = exc_tb
StackBrowser(root, flist=flist, top=root, tb=exc_tb)
# restore sys to original state
del sys.last_type
del sys.last_value
del sys.last_traceback
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_stack_viewer)
=======
import os
import sys
import linecache
import re
import tkinter as tk
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.ObjectBrowser import ObjectTreeItem, make_objecttreeitem
from idlelib.PyShell import PyShellFileList
def StackBrowser(root, flist=None, tb=None, top=None):
if top is None:
from tkinter import Toplevel
top = Toplevel(root)
sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
item = StackTreeItem(flist, tb)
node = TreeNode(sc.canvas, None, item)
node.expand()
class StackTreeItem(TreeItem):
def __init__(self, flist=None, tb=None):
self.flist = flist
self.stack = self.get_stack(tb)
self.text = self.get_exception()
def get_stack(self, tb):
if tb is None:
tb = sys.last_traceback
stack = []
if tb and tb.tb_frame is None:
tb = tb.tb_next
while tb is not None:
stack.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
return stack
def get_exception(self):
type = sys.last_type
value = sys.last_value
if hasattr(type, "__name__"):
type = type.__name__
s = str(type)
if value is not None:
s = s + ": " + str(value)
return s
def GetText(self):
return self.text
def GetSubList(self):
sublist = []
for info in self.stack:
item = FrameTreeItem(info, self.flist)
sublist.append(item)
return sublist
class FrameTreeItem(TreeItem):
def __init__(self, info, flist):
self.info = info
self.flist = flist
def GetText(self):
frame, lineno = self.info
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(...), line %d: %s" % (modname, funcname,
lineno, sourceline)
return item
def GetSubList(self):
frame, lineno = self.info
sublist = []
if frame.f_globals is not frame.f_locals:
item = VariablesTreeItem("<locals>", frame.f_locals, self.flist)
sublist.append(item)
item = VariablesTreeItem("<globals>", frame.f_globals, self.flist)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if self.flist:
frame, lineno = self.info
filename = frame.f_code.co_filename
if os.path.isfile(filename):
self.flist.gotofileline(filename, lineno)
class VariablesTreeItem(ObjectTreeItem):
def GetText(self):
return self.labeltext
def GetLabelText(self):
return None
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return list(self.object.keys())
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem(key + " =", value, setfunction)
sublist.append(item)
return sublist
def _stack_viewer(parent):
root = tk.Tk()
root.title("Test StackViewer")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
flist = PyShellFileList(root)
try: # to obtain a traceback object
intentional_name_error
except NameError:
exc_type, exc_value, exc_tb = sys.exc_info()
# inject stack trace to sys
sys.last_type = exc_type
sys.last_value = exc_value
sys.last_traceback = exc_tb
StackBrowser(root, flist=flist, top=root, tb=exc_tb)
# restore sys to original state
del sys.last_type
del sys.last_value
del sys.last_traceback
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_stack_viewer)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import os
import sys
import linecache
import re
import tkinter as tk
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.ObjectBrowser import ObjectTreeItem, make_objecttreeitem
from idlelib.PyShell import PyShellFileList
def StackBrowser(root, flist=None, tb=None, top=None):
if top is None:
from tkinter import Toplevel
top = Toplevel(root)
sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
item = StackTreeItem(flist, tb)
node = TreeNode(sc.canvas, None, item)
node.expand()
class StackTreeItem(TreeItem):
def __init__(self, flist=None, tb=None):
self.flist = flist
self.stack = self.get_stack(tb)
self.text = self.get_exception()
def get_stack(self, tb):
if tb is None:
tb = sys.last_traceback
stack = []
if tb and tb.tb_frame is None:
tb = tb.tb_next
while tb is not None:
stack.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
return stack
def get_exception(self):
type = sys.last_type
value = sys.last_value
if hasattr(type, "__name__"):
type = type.__name__
s = str(type)
if value is not None:
s = s + ": " + str(value)
return s
def GetText(self):
return self.text
def GetSubList(self):
sublist = []
for info in self.stack:
item = FrameTreeItem(info, self.flist)
sublist.append(item)
return sublist
class FrameTreeItem(TreeItem):
def __init__(self, info, flist):
self.info = info
self.flist = flist
def GetText(self):
frame, lineno = self.info
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(...), line %d: %s" % (modname, funcname,
lineno, sourceline)
return item
def GetSubList(self):
frame, lineno = self.info
sublist = []
if frame.f_globals is not frame.f_locals:
item = VariablesTreeItem("<locals>", frame.f_locals, self.flist)
sublist.append(item)
item = VariablesTreeItem("<globals>", frame.f_globals, self.flist)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if self.flist:
frame, lineno = self.info
filename = frame.f_code.co_filename
if os.path.isfile(filename):
self.flist.gotofileline(filename, lineno)
class VariablesTreeItem(ObjectTreeItem):
def GetText(self):
return self.labeltext
def GetLabelText(self):
return None
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return list(self.object.keys())
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem(key + " =", value, setfunction)
sublist.append(item)
return sublist
def _stack_viewer(parent):
root = tk.Tk()
root.title("Test StackViewer")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
flist = PyShellFileList(root)
try: # to obtain a traceback object
intentional_name_error
except NameError:
exc_type, exc_value, exc_tb = sys.exc_info()
# inject stack trace to sys
sys.last_type = exc_type
sys.last_value = exc_value
sys.last_traceback = exc_tb
StackBrowser(root, flist=flist, top=root, tb=exc_tb)
# restore sys to original state
del sys.last_type
del sys.last_value
del sys.last_traceback
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_stack_viewer)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import unittest
from pyowm.weatherapi25.weather_manager import WeatherManager
from pyowm.commons.http_client import HttpClient
from pyowm.constants import WEATHER_API_VERSION
from pyowm.config import DEFAULT_CONFIG
from pyowm.weatherapi25.historian import Historian
from pyowm.weatherapi25.forecast import Forecast
from pyowm.weatherapi25.forecaster import Forecaster
from pyowm.weatherapi25.location import Location
from pyowm.weatherapi25.observation import Observation
from pyowm.weatherapi25.one_call import OneCall
from pyowm.weatherapi25.stationhistory import StationHistory
from pyowm.weatherapi25.weather import Weather
from tests.unit.weatherapi25.json_test_responses import (
OBSERVATION_JSON, SEARCH_RESULTS_JSON, THREE_HOURS_FORECAST_JSON, DAILY_FORECAST_JSON,
THREE_HOURS_FORECAST_AT_COORDS_JSON, DAILY_FORECAST_AT_COORDS_JSON,
THREE_HOURS_FORECAST_AT_ID_JSON, DAILY_FORECAST_AT_ID_JSON,
CITY_WEATHER_HISTORY_JSON, STATION_TICK_WEATHER_HISTORY_JSON,
STATION_WEATHER_HISTORY_JSON, THREE_HOURS_FORECAST_NOT_FOUND_JSON,
DAILY_FORECAST_NOT_FOUND_JSON, STATION_HISTORY_NO_ITEMS_JSON,
WEATHER_AT_PLACES_IN_BBOX_JSON, ONE_CALL_JSON, ONE_CALL_HISTORY_JSON)
class TestWeatherManager(unittest.TestCase):
__test_instance = WeatherManager('fakeapikey', DEFAULT_CONFIG)
# --- MOCKS ---
def mock_api_call_returning_single_obs(self, uri, params=None, headers=None):
return 200, json.loads(OBSERVATION_JSON)
def mock_api_call_ping(self, uri, params=None, headers=None):
return 200, json.loads(OBSERVATION_JSON)
def mock_api_call_returning_multiple_obs(self, uri, params=None, headers=None):
return 200, json.loads(SEARCH_RESULTS_JSON)
def mock_api_call_returning_3h_forecast(self, uri, params=None, headers=None):
return 200, json.loads(THREE_HOURS_FORECAST_JSON)
def mock_api_call_returning_empty_3h_forecast(self, uri, params=None, headers=None):
return 200, json.loads(THREE_HOURS_FORECAST_NOT_FOUND_JSON)
def mock_api_call_returning_empty_daily_forecast(self, uri, params=None, headers=None):
return 200, json.loads(DAILY_FORECAST_NOT_FOUND_JSON)
def mock_api_call_returning_3h_forecast_at_coords(self,uri, params=None, headers=None):
return 200, json.loads(THREE_HOURS_FORECAST_AT_COORDS_JSON)
def mock_api_call_returning_3h_forecast_at_id(self, uri, params=None, headers=None):
return 200, json.loads(THREE_HOURS_FORECAST_AT_ID_JSON)
def mock_api_call_returning_daily_forecast(self, uri, params=None, headers=None):
return 200, json.loads(DAILY_FORECAST_JSON)
def mock_api_call_returning_daily_forecast_at_coords(self, uri, params=None, headers=None):
return 200, json.loads(DAILY_FORECAST_AT_COORDS_JSON)
def mock_api_call_returning_daily_forecast_at_id(self, uri, params=None, headers=None):
return 200, json.loads(DAILY_FORECAST_AT_ID_JSON)
def mock_api_call_returning_city_weather_history(self, uri, params=None, headers=None):
return 200, json.loads(CITY_WEATHER_HISTORY_JSON)
def mock_api_call_returning_station_tick_weather_history(self, uri, params=None, headers=None):
return 200, json.loads(STATION_TICK_WEATHER_HISTORY_JSON)
def mock_api_call_returning_station_hour_weather_history(self, uri, params=None, headers=None):
return 200, json.loads(STATION_WEATHER_HISTORY_JSON)
def mock_call_api_returning_station_day_weather_history(self, uri, params=None, headers=None):
return 200, json.loads(STATION_WEATHER_HISTORY_JSON)
def mock_call_api_returning_station_history_with_no_items(self, uri, params=None, headers=None):
return 200, json.loads(STATION_HISTORY_NO_ITEMS_JSON)
def mock_api_call_returning_weather_at_places_in_bbox(self, uri, params=None, headers=None):
return 200, json.loads(WEATHER_AT_PLACES_IN_BBOX_JSON)
def mock_api_call_returning_weather_history_at_coords(self, uri, params=None, headers=None):
return 200, json.loads(CITY_WEATHER_HISTORY_JSON)
def mock_api_call_returning_onecall_data(self, uri, params=None, headers=None):
return 200, json.loads(ONE_CALL_JSON)
def mock_api_call_returning_onecall_history_data(self, uri, params=None, headers=None):
return 200, json.loads(ONE_CALL_HISTORY_JSON)
def mock__retrieve_station_history(self, station_ID, limit, interval):
return None
# -- TESTS --
def test_get_weather_api_version(self):
result = self.__test_instance.weather_api_version()
self.assertIsInstance(result, tuple)
self.assertEqual(result, WEATHER_API_VERSION)
def test_repr(self):
print(self.__test_instance)
def test_instantiation_with_wrong_params(self):
with self.assertRaises(AssertionError):
WeatherManager(None, dict())
with self.assertRaises(AssertionError):
WeatherManager('apikey', None)
def test_weather_at_place(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_single_obs
result = self.__test_instance.weather_at_place("London,uk")
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Observation))
self.assertTrue(result.reception_time() is not None)
loc = result.location
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = result.weather
self.assertTrue(weat is not None)
def test_weather_at_place_fails_with_wrong_parameters(self):
self.assertRaises(AssertionError, WeatherManager.weather_at_place, self.__test_instance, 3)
def test_weather_at_coords(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_single_obs
result = self.__test_instance.weather_at_coords(57.0, -2.15)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Observation))
self.assertTrue(result.reception_time() is not None)
loc = result.location
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = result.weather
self.assertTrue(weat is not None)
def test_weather_at_zip_code(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_single_obs
result = self.__test_instance.weather_at_zip_code("2000", "AU")
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Observation))
self.assertTrue(result.reception_time() is not None)
loc = result.location
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = result.weather
self.assertTrue(weat is not None)
def test_weather_at_coords_fails_when_coordinates_out_of_bounds(self):
"""
Test failure when providing: lon < -180, lon > 180, lat < -90, lat > 90
"""
self.assertRaises(ValueError, WeatherManager.weather_at_coords, self.__test_instance, 43.7, -200.0)
self.assertRaises(ValueError, WeatherManager.weather_at_coords, self.__test_instance, 43.7, 200.0)
self.assertRaises(ValueError, WeatherManager.weather_at_coords, self.__test_instance, -200, 2.5)
self.assertRaises(ValueError, WeatherManager.weather_at_coords, self.__test_instance, 200, 2.5)
def test_weather_at_id(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_single_obs
result = self.__test_instance.weather_at_id(5128581) # New York city, US
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Observation))
self.assertTrue(result.reception_time() is not None)
loc = result.location
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = result.weather
self.assertTrue(weat is not None)
def test_weather_at_id_fails_when_id_negative(self):
self.assertRaises(ValueError, WeatherManager.weather_at_id, self.__test_instance, -156667)
def test_weather_at_ids(self):
ref_to_original_call_API = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_multiple_obs
result = self.__test_instance.weather_at_ids([5128581, 15647, 78654])
HttpClient.get_json = ref_to_original_call_API
self.assertTrue(isinstance(result, list))
for obs in result:
self.assertTrue(obs is not None)
self.assertTrue(isinstance(obs, Observation))
weat = obs.weather
self.assertTrue(weat is not None)
def test_weather_at_ids_fails_when_wrong_parameters(self):
self.assertRaises(AssertionError, WeatherManager.weather_at_ids, self.__test_instance, "test")
self.assertRaises(ValueError, WeatherManager.weather_at_ids, self.__test_instance, [-1, 2, 3])
def test_weather_at_places_without_limits(self):
original_func = HttpClient.get_json
HttpClient.get_json= \
self.mock_api_call_returning_multiple_obs
result = \
self.__test_instance.weather_at_places("London", "accurate")
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, list))
self.assertEqual(2, len(result))
for item in result:
self.assertTrue(item is not None)
self.assertTrue(item.reception_time())
loc = item.location
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.weather
self.assertTrue(weat is not None)
def test_weather_at_places_with_limits(self):
original_func = HttpClient.get_json
HttpClient.get_json= \
self.mock_api_call_returning_multiple_obs
result = \
self.__test_instance.weather_at_places("London", "accurate", limit=2)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, list))
self.assertEqual(2, len(result))
for item in result:
self.assertTrue(item is not None)
self.assertTrue(item.reception_time())
loc = item.location
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.weather
self.assertTrue(weat is not None)
def test_weather_at_places_fails_with_wrong_params(self):
self.assertRaises(ValueError, WeatherManager.weather_at_places, self.__test_instance, "London", "x")
self.assertRaises(ValueError, WeatherManager.weather_at_places, self.__test_instance, "London", "accurate", -5)
def test_weather_around_coords_without_limits(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_multiple_obs
result = self.__test_instance.weather_around_coords(57.0, -2.15)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, list))
for item in result:
self.assertTrue(item is not None)
self.assertTrue(item.reception_time() is not None)
loc = item.location
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.weather
self.assertTrue(weat is not None)
def test_weather_around_coords_with_limits(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_multiple_obs
result = self.__test_instance.weather_around_coords(57.0, -2.15, limit=2)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, list))
for item in result:
self.assertTrue(item is not None)
self.assertTrue(item.reception_time() is not None)
loc = item.location
self.assertTrue(loc is not None)
self.assertTrue(all(v is not None for v in loc.__dict__.values()))
weat = item.weather
self.assertTrue(weat is not None)
def test_weather_around_coords_fails_when_coordinates_out_of_bounds(self):
"""
Test failure when providing: lon < -180, lon > 180, lat < -90, lat > 90
"""
self.assertRaises(ValueError, WeatherManager.weather_around_coords, self.__test_instance, 43.7, -200.0)
self.assertRaises(ValueError, WeatherManager.weather_around_coords, self.__test_instance, 43.7, 200.0)
self.assertRaises(ValueError, WeatherManager.weather_around_coords, self.__test_instance, -200, 2.5)
self.assertRaises(ValueError, WeatherManager.weather_around_coords, self.__test_instance, 200, 2.5)
def test_weather_around_coords_fails_with_wrong_params(self):
self.assertRaises(ValueError, WeatherManager.weather_around_coords, self.__test_instance, 43.7, 20.0, -3)
def test_forecast_at_place_fails_with_wrong_params(self):
self.assertRaises(AssertionError, WeatherManager.forecast_at_place,
self.__test_instance, None, "daily", 3)
self.assertRaises(AssertionError, WeatherManager.forecast_at_place,
self.__test_instance, "London,uk", None, -3)
self.assertRaises(ValueError, WeatherManager.forecast_at_place,
self.__test_instance, "London,uk", "wrong", 3)
self.assertRaises(ValueError, WeatherManager.forecast_at_place,
self.__test_instance, "London,uk", "daily", -3)
def test_forecast_at_place_on_3h(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_3h_forecast
result = self.__test_instance.forecast_at_place("London,uk", "3h")
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Forecaster))
forecast = result.forecast
self.assertTrue(isinstance(forecast, Forecast))
self.assertTrue(forecast.interval is not None)
self.assertTrue(forecast.reception_time() is not None)
self.assertTrue(isinstance(forecast.location, Location))
self.assertEqual(1, len(forecast))
for weather in forecast:
self.assertTrue(isinstance(weather, Weather))
def test_forecast_at_place_on_3h_when_forecast_not_found(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_empty_3h_forecast
result = self.__test_instance.forecast_at_place("London,uk", "3h")
HttpClient.get_json = original_func
self.assertIsNone(result)
def test_forecast_at_coords_failing(self):
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, -100.0, 0.0, '3h', None)
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 100.0, 0.0, '3h', None)
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, -200.0, '3h', None)
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, 200.0, '3h', None)
self.assertRaises(AssertionError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, 60.0, None, None)
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, 60.0, 'unsupported', None)
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, 60.0, '3h', -4)
def test_forecast_at_coords_on_3h(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_3h_forecast_at_coords
result = \
self.__test_instance\
.forecast_at_coords(51.50853, -0.12574, "3h")
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Forecaster))
forecast = result.forecast
self.assertTrue(isinstance(forecast, Forecast))
self.assertTrue(forecast.interval is not None)
self.assertTrue(forecast.reception_time() is not None)
self.assertTrue(isinstance(forecast.location, Location))
self.assertEqual(1, len(forecast))
for weather in forecast:
self.assertTrue(isinstance(weather, Weather))
def test_forecast_at_coords_on_3h_when_forecast_not_found(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_empty_3h_forecast
result = self.__test_instance.forecast_at_coords(51.50853, -0.12574, '3h')
HttpClient.get_json = original_func
self.assertIsNone(result)
def test_forecast_at_id_on_3h(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_3h_forecast_at_id
result = self.__test_instance.forecast_at_id(2643743, '3h')
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Forecaster))
f = result.forecast
self.assertTrue(isinstance(f, Forecast))
self.assertTrue(f.interval is not None)
self.assertTrue(f.reception_time() is not None)
self.assertTrue(isinstance(f.location, Location))
self.assertEqual(1, len(f))
for weather in f:
self.assertTrue(isinstance(weather, Weather))
def test_forecast_at_id_on_3h_when_forecast_not_found(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_empty_3h_forecast
result = self.__test_instance.forecast_at_id(2643743, '3h')
HttpClient.get_json = original_func
self.assertIsNone(result)
def test_forecast_at_id_fails_with_wrong_params(self):
self.assertRaises(ValueError, WeatherManager.forecast_at_id, self.__test_instance, -1234, '3h', None)
self.assertRaises(AssertionError, WeatherManager.forecast_at_id, self.__test_instance, 123, None, None)
self.assertRaises(ValueError, WeatherManager.forecast_at_id, self.__test_instance, 123, 'unsupported', None)
self.assertRaises(ValueError, WeatherManager.forecast_at_id, self.__test_instance, 123, '3h', -8)
def test_forecast_at_place_daily(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_daily_forecast
result = self.__test_instance.forecast_at_place("London,uk", "daily", 2)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Forecaster))
forecast = result.forecast
self.assertTrue(isinstance(forecast, Forecast))
self.assertTrue(forecast.interval is not None)
self.assertTrue(forecast.reception_time() is not None)
self.assertTrue(isinstance(forecast.location, Location))
self.assertEqual(1, len(forecast))
for weather in forecast:
self.assertTrue(isinstance(weather, Weather))
def test_forecast_at_place_daily_when_forecast_not_found(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_empty_daily_forecast
result = self.__test_instance.forecast_at_place('London,uk', "daily")
HttpClient.get_json = original_func
self.assertIsNone(result)
def test_forecast_at_coords_daily(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_daily_forecast_at_coords
result = \
self.__test_instance.forecast_at_coords(51.50853, -0.12574, 'daily', 2)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Forecaster))
forecast = result.forecast
self.assertTrue(isinstance(forecast, Forecast))
self.assertTrue(forecast.interval is not None)
self.assertTrue(forecast.reception_time() is not None)
self.assertTrue(isinstance(forecast.location, Location))
self.assertEqual(1, len(forecast))
for weather in forecast:
self.assertTrue(isinstance(weather, Weather))
def test_forecast_at_coords_daily_fails_with_wrong_parameters(self):
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, -100.0, 0.0, 'daily')
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 100.0, 0.0, 'daily')
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, -200.0, 'daily')
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, 200.0, 'daily')
self.assertRaises(AssertionError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, 60.0, None, 2)
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, 60.0, 'unsupported')
self.assertRaises(ValueError, WeatherManager.forecast_at_coords,
self.__test_instance, 0.0, 60.0, 'daily', -5)
def test_forecast_at_coords_dailty_when_forecast_not_found(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_empty_daily_forecast
result = self.__test_instance.forecast_at_coords(51.50853, -0.12574, 'daily')
HttpClient.get_json = original_func
self.assertIsNone(result)
def test_forecast_at_id_dailty(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_daily_forecast_at_id
result = \
self.__test_instance.forecast_at_id(2643743, 'daily', 2)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Forecaster))
forecast = result.forecast
self.assertTrue(isinstance(forecast, Forecast))
self.assertTrue(forecast.interval is not None)
self.assertTrue(forecast.reception_time() is not None)
self.assertTrue(isinstance(forecast.location, Location))
self.assertEqual(1, len(forecast))
for weather in forecast:
self.assertTrue(isinstance(weather, Weather))
def test_forecast_at_id_daily_when_forecast_not_found(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_empty_daily_forecast
result = self.__test_instance.forecast_at_id(123456, 'daily')
HttpClient.get_json = original_func
self.assertIsNone(result)
def test_weather_at_places_in_bbox_fails_with_wrong_params(self):
self.assertRaises(AssertionError, WeatherManager.weather_at_places_in_bbox,
self.__test_instance, 12, 32, 15, 37, 'zoom')
self.assertRaises(ValueError, WeatherManager.weather_at_places_in_bbox,
self.__test_instance, 12, 32, 15, 37, -30)
self.assertRaises(AssertionError, WeatherManager.weather_at_places_in_bbox,
self.__test_instance, 12, 32, 15, 37, 10, 'cluster')
def test_weather_at_places_in_bbox(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_weather_at_places_in_bbox
results = self.__test_instance\
.weather_at_places_in_bbox(12,32,15,37,10)
HttpClient.get_json = original_func
self.assertTrue(isinstance(results, list))
for result in results:
self.assertTrue(isinstance(result, Observation))
self.assertTrue(isinstance(result.weather, Weather))
self.assertTrue(isinstance(result.location, Location))
self.assertTrue(result.reception_time() is not None)
def test_station_tick_history_without_limits(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_station_tick_weather_history
result = self.__test_instance.station_tick_history(1234)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Historian))
station_history = result.station_history
self.assertTrue(isinstance(station_history, StationHistory))
self.assertTrue(isinstance(station_history.measurements, dict))
def test_station_tick_history_with_limits(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_station_tick_weather_history
result = self.__test_instance.station_tick_history(1234, limit=4)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Historian))
station_history = result.station_history
self.assertTrue(isinstance(station_history, StationHistory))
self.assertTrue(isinstance(station_history.measurements, dict))
def test_station_tick_history_fails_with_wrong_params(self):
self.assertRaises(ValueError, WeatherManager.station_tick_history,
self.__test_instance, 1234, -3)
def test_station_tick_history_when_forecast_not_found(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_call_api_returning_station_history_with_no_items
result = self.__test_instance.station_tick_history(1234, limit=4)
HttpClient.get_json = original_func
self.assertIsNone(result)
def test_station_hour_history_without_limits(self):
original_call = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_station_hour_weather_history
result = self.__test_instance.station_hour_history(1234)
HttpClient.get_json = original_call
self.assertTrue(isinstance(result, Historian))
station_history = result.station_history
self.assertTrue(isinstance(station_history, StationHistory))
self.assertTrue(isinstance(station_history.measurements, dict))
def test_station_hour_history_with_limits(self):
original_call = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_station_hour_weather_history
result = self.__test_instance.station_hour_history(1234, limit=4)
HttpClient.get_json = original_call
self.assertTrue(isinstance(result, Historian))
station_history = result.station_history
self.assertTrue(isinstance(station_history, StationHistory))
self.assertTrue(isinstance(station_history.measurements, dict))
def test_station_hour_history_fails_with_wrong_params(self):
self.assertRaises(ValueError, WeatherManager.station_hour_history,
self.__test_instance, 1234, -3)
def test_station_hour_history_when_forecast_not_found(self):
original_call = HttpClient.get_json
HttpClient.get_json = \
self.mock_call_api_returning_station_history_with_no_items
result = self.__test_instance.station_hour_history(1234, limit=4)
HttpClient.get_json = original_call
self.assertIsNone(result)
def test_station_day_history_with_limits(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_call_api_returning_station_day_weather_history
result = self.__test_instance.station_day_history(1234, limit=4)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Historian))
station_history = result.station_history
self.assertTrue(isinstance(station_history, StationHistory))
self.assertTrue(isinstance(station_history.measurements, dict))
def test_station_day_history_without_limits(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_call_api_returning_station_day_weather_history
result = self.__test_instance.station_day_history(1234)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, Historian))
station_history = result.station_history
self.assertTrue(isinstance(station_history, StationHistory))
self.assertTrue(isinstance(station_history.measurements, dict))
def test_station_day_history_returning_none(self):
original_http_get = HttpClient.get_json
HttpClient.get_json = \
self.mock_call_api_returning_station_day_weather_history
original_retrieve_station_history = self.__test_instance._retrieve_station_history
self.__test_instance._retrieve_station_history = self.mock__retrieve_station_history
result = self.__test_instance.station_day_history(1234, limit=4)
HttpClient.get_json = original_http_get
self.__test_instance._retrieve_station_history = original_retrieve_station_history
self.assertIsNone(result)
def test_station_day_history_fails_with_wrong_params(self):
self.assertRaises(ValueError, WeatherManager.station_day_history, self.__test_instance, 1234, -3)
def test_one_call(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_onecall_data
result = self.__test_instance.one_call(46.23, 12.7)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, OneCall))
self.assertTrue(isinstance(result.current, Weather))
self.assertTrue(isinstance(result.forecast_hourly, list))
self.assertTrue(all(isinstance(v, Weather) for v in result.forecast_hourly))
if result.forecast_daily is not None:
self.assertTrue(all(isinstance(v, Weather) for v in result.forecast_daily))
def test_one_call_fails(self):
self.assertRaises(AssertionError, WeatherManager.one_call, self.__test_instance, None, 12.7)
self.assertRaises(AssertionError, WeatherManager.one_call, self.__test_instance, 46.23, 'test')
def test_one_call_history_without_time_range(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_onecall_history_data
result = self.__test_instance.one_call_history(46.23, 12.7)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, OneCall))
self.assertTrue(isinstance(result.current, Weather))
self.assertTrue(isinstance(result.forecast_hourly, list))
self.assertTrue(all(isinstance(v, Weather) for v in result.forecast_hourly))
if result.forecast_daily is not None:
self.assertTrue(all(isinstance(v, Weather) for v in result.forecast_daily))
def test_one_call_history_with_time_range(self):
original_func = HttpClient.get_json
HttpClient.get_json = \
self.mock_api_call_returning_onecall_history_data
result = self.__test_instance.one_call_history(46.23, 12.7, dt=1577890800)
HttpClient.get_json = original_func
self.assertTrue(isinstance(result, OneCall))
self.assertTrue(isinstance(result.current, Weather))
self.assertTrue(isinstance(result.forecast_hourly, list))
self.assertTrue(all(isinstance(v, Weather) for v in result.forecast_hourly))
if result.forecast_daily is not None:
self.assertTrue(all(isinstance(v, Weather) for v in result.forecast_daily))
def test_one_call_history_fails(self):
self.assertRaises(AssertionError, WeatherManager.one_call_history, self.__test_instance, None, 12.7, 1234567)
self.assertRaises(AssertionError, WeatherManager.one_call_history, self.__test_instance, 46.23, 'test', 1234567)
self.assertRaises(ValueError, WeatherManager.one_call_history, self.__test_instance, 46.23, 12.7, 'test')
self.assertRaises(ValueError, WeatherManager.one_call_history, self.__test_instance, 46.23, 12.7, -987)
|
|
"""Unit tests for reviewboard.reviews.ui.base.FileAttachmentReviewUI."""
from __future__ import unicode_literals
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.reviews.ui.base import (FileAttachmentReviewUI,
register_ui,
unregister_ui)
from reviewboard.testing import TestCase
class MyReviewUI(FileAttachmentReviewUI):
"""A basic file attachment Review UI used for testing."""
supported_mimetypes = ['application/rbtest']
supports_diffing = True
class FileAttachmentReviewUITests(SpyAgency, TestCase):
"""Unit tests for reviewboard.reviews.ui.base.FileAttachmentReviewUI."""
fixtures = ['test_users']
@classmethod
def setUpClass(cls):
super(FileAttachmentReviewUITests, cls).setUpClass()
register_ui(MyReviewUI)
@classmethod
def tearDownClass(cls):
super(FileAttachmentReviewUITests, cls).tearDownClass()
unregister_ui(MyReviewUI)
def setUp(self):
super(FileAttachmentReviewUITests, self).setUp()
self.review_request = self.create_review_request()
def test_for_type(self):
"""Testing FileAttachmentReviewUI.for_type with match"""
def test_for_type_with_exception(self):
"""Testing FileAttachmentReviewUI.for_type sandboxes ReviewUI
instantiation
"""
class BrokenReviewUI(FileAttachmentReviewUI):
supported_mimetypes = ['image/broken']
def __init__(self, *args, **kwargs):
raise Exception('Oh no')
self.spy_on(BrokenReviewUI.__init__,
owner=BrokenReviewUI)
register_ui(BrokenReviewUI)
try:
attachment = self.create_file_attachment(self.review_request,
mimetype='image/broken')
review_ui = FileAttachmentReviewUI.for_type(attachment)
self.assertIsNone(review_ui)
self.assertTrue(BrokenReviewUI.__init__.called_with(
self.review_request,
attachment))
finally:
unregister_ui(BrokenReviewUI)
def test_build_render_context_with_inline_true(self):
"""Testing FileAttachmentReviewUI.build_render_context with inline=True
"""
self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='My Attachment 1')
attachment = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='My Attachment 2')
self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='My Attachment 3')
review_ui = attachment.review_ui
request = self.create_http_request(path='/r/1/file/2/')
self.assertIsInstance(review_ui, MyReviewUI)
context = review_ui.build_render_context(request=request, inline=True)
self.assertEqual(context['base_template'],
'reviews/ui/base_inline.html')
self.assertEqual(context['caption'], 'My Attachment 2')
self.assertNotIn('prev_file_attachment', context)
self.assertNotIn('next_file_attachment', context)
def test_build_render_context_with_inline_false(self):
"""Testing FileAttachmentReviewUI.build_render_context with
inline=False
"""
attachment1 = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='My Attachment 1')
attachment2 = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='My Attachment 2')
attachment3 = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='My Attachment 3')
review_ui = attachment2.review_ui
request = self.create_http_request(path='/r/1/file/2/')
self.assertIsInstance(review_ui, MyReviewUI)
context = review_ui.build_render_context(request=request, inline=False)
self.assertEqual(context['base_template'], 'reviews/ui/base.html')
self.assertEqual(context['caption'], 'My Attachment 2')
self.assertEqual(context['social_page_title'],
'Attachment for Review Request #1: My Attachment 2')
self.assertEqual(context['prev_file_attachment'], attachment1)
self.assertEqual(context['next_file_attachment'], attachment3)
self.assertEqual(
context['tabs'],
[
{
'url': '/r/1/',
'text': 'Reviews',
},
{
'url': '/r/1/file/2/',
'text': 'File',
},
])
def test_get_caption(self):
"""Testing FileAttachmentReviewUI.get_caption"""
attachment = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='My Published Caption',
draft_caption='My Draft Caption')
review_ui = attachment.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
self.assertEqual(review_ui.get_caption(), 'My Published Caption')
def test_get_caption_with_draft(self):
"""Testing FileAttachmentReviewUI.get_caption with draft"""
draft = self.create_review_request_draft(self.review_request)
attachment = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='My Published Caption',
draft_caption='My Draft Caption',
draft=True)
review_ui = attachment.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
self.assertEqual(review_ui.get_caption(draft), 'My Draft Caption')
def test_get_comments(self):
"""Testing FileAttachmentReviewUI.get_comments"""
attachment1 = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest')
attachment2 = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest')
review1 = self.create_review(self.review_request)
review2 = self.create_review(self.review_request)
# These will show up.
comment1 = self.create_file_attachment_comment(
review1,
attachment1,
text='Comment 1')
comment2 = self.create_file_attachment_comment(
review1,
attachment1,
text='Comment 2')
comment3 = self.create_file_attachment_comment(
review2,
attachment1,
text='Comment 3')
# These will not.
self.create_file_attachment_comment(
review2,
attachment2,
text='Comment 4')
self.create_file_attachment_comment(
review2,
attachment2,
diff_against_file_attachment=attachment1,
text='Comment 5')
review_ui = attachment1.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
comments = review_ui.get_comments()
self.assertEqual(list(comments), [comment1, comment2, comment3])
def test_get_comments_with_diff(self):
"""Testing FileAttachmentReviewUI.get_comments with diff"""
attachment1 = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest')
attachment2 = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest')
review1 = self.create_review(self.review_request)
review2 = self.create_review(self.review_request)
# These will show up.
comment1 = self.create_file_attachment_comment(
review1,
attachment2,
diff_against_file_attachment=attachment1,
text='Comment 1')
comment2 = self.create_file_attachment_comment(
review1,
attachment2,
diff_against_file_attachment=attachment1,
text='Comment 2')
comment3 = self.create_file_attachment_comment(
review2,
attachment2,
diff_against_file_attachment=attachment1,
text='Comment 3')
# These will not.
self.create_file_attachment_comment(
review2,
attachment1,
text='Comment 4')
self.create_file_attachment_comment(
review2,
attachment2,
text='Comment 5')
review_ui = attachment2.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
review_ui.set_diff_against(attachment1)
comments = review_ui.get_comments()
self.assertEqual(list(comments), [comment1, comment2, comment3])
def test_get_comment_link_text(self):
"""Testing FileAttachmentReviewUI.get_comment_link_text"""
attachment = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
caption='Test Caption')
review_ui = attachment.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
review = self.create_review(self.review_request)
comment = self.create_file_attachment_comment(review, attachment)
self.assertEqual(review_ui.get_comment_link_text(comment),
'Test Caption')
def test_get_comment_link_url(self):
"""Testing FileAttachmentReviewUI.get_comment_link_url"""
attachment = self.create_file_attachment(self.review_request,
mimetype='application/rbtest')
review_ui = attachment.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
review = self.create_review(self.review_request)
comment = self.create_file_attachment_comment(review, attachment)
self.assertEqual(review_ui.get_comment_link_url(comment),
'/r/1/file/1/')
@add_fixtures(['test_site'])
def test_get_comment_link_url_with_local_site(self):
"""Testing FileAttachmentReviewUI.get_comment_link_url with LocalSite
"""
review_request = self.create_review_request(with_local_site=True)
attachment = self.create_file_attachment(review_request,
mimetype='application/rbtest')
review_ui = attachment.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
review = self.create_review(review_request)
comment = self.create_file_attachment_comment(review, attachment)
self.assertEqual(review_ui.get_comment_link_url(comment),
'/s/local-site-1/r/1001/file/1/')
def test_get_js_model_data(self):
"""Testing FileAttachmentReviewUI.get_js_model_data"""
attachment = self.create_file_attachment(
self.review_request,
mimetype='application/rbtest',
orig_filename='filename.txt')
review_ui = attachment.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
self.assertEqual(
review_ui.get_js_model_data(),
{
'fileAttachmentID': 1,
'fileRevision': 0,
'filename': 'filename.txt',
})
def test_get_js_model_data_with_history(self):
"""Testing FileAttachmentReviewUI.get_js_model_data with
FileAttachmentHistory
"""
attachment_history = self.create_file_attachment_history(
self.review_request)
attachment1 = self.create_file_attachment(
self.review_request,
attachment_history=attachment_history,
attachment_revision=0,
mimetype='application/rbtest',
orig_filename='filename.txt')
attachment2 = self.create_file_attachment(
self.review_request,
attachment_history=attachment_history,
attachment_revision=1,
mimetype='application/rbtest',
orig_filename='filename.txt')
review_ui = attachment2.review_ui
self.assertIsInstance(review_ui, MyReviewUI)
self.assertEqual(
review_ui.get_js_model_data(),
{
'attachmentRevisionIDs': [attachment1.pk, attachment2.pk],
'fileAttachmentID': attachment2.pk,
'fileRevision': 1,
'filename': 'filename.txt',
'numRevisions': 2,
})
def test_get_js_model_data_with_diff(self):
"""Testing FileAttachmentReviewUI.get_js_model_data with diff"""
attachment_history = self.create_file_attachment_history(
self.review_request)
attachment1 = self.create_file_attachment(
self.review_request,
attachment_history=attachment_history,
attachment_revision=0,
mimetype='application/rbtest',
orig_filename='filename.txt',
caption='My attachment 1')
attachment2 = self.create_file_attachment(
self.review_request,
attachment_history=attachment_history,
attachment_revision=1,
mimetype='application/rbtest',
orig_filename='filename.txt',
caption='My attachment 2')
review_ui = attachment2.review_ui
review_ui.set_diff_against(attachment1)
self.assertIsInstance(review_ui, MyReviewUI)
self.assertEqual(
review_ui.get_js_model_data(),
{
'attachmentRevisionIDs': [attachment1.pk, attachment2.pk],
'diffAgainstFileAttachmentID': attachment1.pk,
'diffCaption': 'My attachment 1',
'diffRevision': 0,
'fileAttachmentID': attachment2.pk,
'fileRevision': 1,
'filename': 'filename.txt',
'numRevisions': 2,
})
def test_get_js_model_data_with_diff_type_mismatch(self):
"""Testing FileAttachmentReviewUI.get_js_model_data with diff type
mismatch
"""
attachment_history = self.create_file_attachment_history(
self.review_request)
attachment1 = self.create_file_attachment(
self.review_request,
attachment_history=attachment_history,
attachment_revision=0,
mimetype='image/png',
orig_filename='filename.png',
caption='My attachment 1')
attachment2 = self.create_file_attachment(
self.review_request,
attachment_history=attachment_history,
attachment_revision=1,
mimetype='application/rbtest',
orig_filename='filename.txt',
caption='My attachment 2')
review_ui = attachment2.review_ui
review_ui.set_diff_against(attachment1)
self.assertIsInstance(review_ui, MyReviewUI)
self.assertEqual(
review_ui.get_js_model_data(),
{
'attachmentRevisionIDs': [attachment1.pk, attachment2.pk],
'diffAgainstFileAttachmentID': attachment1.pk,
'diffCaption': 'My attachment 1',
'diffRevision': 0,
'diffTypeMismatch': True,
'fileAttachmentID': attachment2.pk,
'fileRevision': 1,
'filename': 'filename.txt',
'numRevisions': 2,
})
def test_serialize_comment(self):
"""Testing FileAttachmentReviewUI.serialize_comment"""
attachment = self.create_file_attachment(self.review_request,
mimetype='application/rbtest')
review_ui = attachment.review_ui
review_ui.request = self.create_http_request()
self.assertIsInstance(review_ui, MyReviewUI)
review = self.create_review(self.review_request)
comment = self.create_file_attachment_comment(
review,
attachment,
text='My **test** comment',
rich_text=True)
self.assertEqual(
review_ui.serialize_comment(comment),
{
'comment_id': 1,
'html': '<p>My <strong>test</strong> comment</p>',
'issue_opened': False,
'issue_status': '',
'localdraft': False,
'review_id': 1,
'review_request_id': 1,
'rich_text': True,
'text': 'My **test** comment',
'url': '/r/1/#fcomment1',
'user': {
'name': 'dopey',
'username': 'dopey',
},
})
|
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test a corner-case at the level of the Cython API."""
import threading
import unittest
from grpc._cython import cygrpc
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
class _ServerDriver(object):
def __init__(self, completion_queue, shutdown_tag):
self._condition = threading.Condition()
self._completion_queue = completion_queue
self._shutdown_tag = shutdown_tag
self._events = []
self._saw_shutdown_tag = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._condition.notify()
if event.tag is self._shutdown_tag:
self._saw_shutdown_tag = True
break
thread = threading.Thread(target=in_thread)
thread.start()
def done(self):
with self._condition:
return self._saw_shutdown_tag
def first_event(self):
with self._condition:
while not self._events:
self._condition.wait()
return self._events[0]
def events(self):
with self._condition:
while not self._saw_shutdown_tag:
self._condition.wait()
return tuple(self._events)
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def done(self):
with self._condition:
return self._returned
def event_with_tag(self, tag):
with self._condition:
while True:
for event in self._events:
if event.tag is tag:
return event
self._condition.wait()
def events(self):
with self._condition:
while not self._returned:
self._condition.wait()
return tuple(self._events)
class ReadSomeButNotAllResponsesTest(unittest.TestCase):
def testReadSomeButNotAllResponses(self):
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server()
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port).encode())
server_shutdown_tag = 'server_shutdown_tag'
server_driver = _ServerDriver(server_completion_queue, server_shutdown_tag)
server_driver.start()
client_condition = threading.Condition()
client_due = set()
client_completion_queue = cygrpc.CompletionQueue()
client_driver = _QueueDriver(
client_condition, client_completion_queue, client_due)
client_driver.start()
server_call_condition = threading.Condition()
server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
server_send_first_message_tag = 'server_send_first_message_tag'
server_send_second_message_tag = 'server_send_second_message_tag'
server_complete_rpc_tag = 'server_complete_rpc_tag'
server_call_due = set((
server_send_initial_metadata_tag,
server_send_first_message_tag,
server_send_second_message_tag,
server_complete_rpc_tag,
))
server_call_completion_queue = cygrpc.CompletionQueue()
server_call_driver = _QueueDriver(
server_call_condition, server_call_completion_queue, server_call_due)
server_call_driver.start()
server_rpc_tag = 'server_rpc_tag'
request_call_result = server.request_call(
server_call_completion_queue, server_completion_queue, server_rpc_tag)
client_call = channel.create_call(
None, _EMPTY_FLAGS, client_completion_queue, b'/twinkies', None,
_INFINITE_FUTURE)
client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
client_complete_rpc_tag = 'client_complete_rpc_tag'
with client_condition:
client_receive_initial_metadata_start_batch_result = (
client_call.start_client_batch(cygrpc.Operations([
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
]), client_receive_initial_metadata_tag))
client_due.add(client_receive_initial_metadata_tag)
client_complete_rpc_start_batch_result = (
client_call.start_client_batch(cygrpc.Operations([
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
]), client_complete_rpc_tag))
client_due.add(client_complete_rpc_tag)
server_rpc_event = server_driver.first_event()
with server_call_condition:
server_send_initial_metadata_start_batch_result = (
server_rpc_event.operation_call.start_server_batch([
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
], server_send_initial_metadata_tag))
server_send_first_message_start_batch_result = (
server_rpc_event.operation_call.start_server_batch([
cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
], server_send_first_message_tag))
server_send_initial_metadata_event = server_call_driver.event_with_tag(
server_send_initial_metadata_tag)
server_send_first_message_event = server_call_driver.event_with_tag(
server_send_first_message_tag)
with server_call_condition:
server_send_second_message_start_batch_result = (
server_rpc_event.operation_call.start_server_batch([
cygrpc.operation_send_message(b'\x07', _EMPTY_FLAGS),
], server_send_second_message_tag))
server_complete_rpc_start_batch_result = (
server_rpc_event.operation_call.start_server_batch([
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
cygrpc.Metadata(()), cygrpc.StatusCode.ok, b'test details',
_EMPTY_FLAGS),
], server_complete_rpc_tag))
server_send_second_message_event = server_call_driver.event_with_tag(
server_send_second_message_tag)
server_complete_rpc_event = server_call_driver.event_with_tag(
server_complete_rpc_tag)
server_call_driver.events()
with client_condition:
client_receive_first_message_tag = 'client_receive_first_message_tag'
client_receive_first_message_start_batch_result = (
client_call.start_client_batch(cygrpc.Operations([
cygrpc.operation_receive_message(_EMPTY_FLAGS),
]), client_receive_first_message_tag))
client_due.add(client_receive_first_message_tag)
client_receive_first_message_event = client_driver.event_with_tag(
client_receive_first_message_tag)
client_call_cancel_result = client_call.cancel()
client_driver.events()
server.shutdown(server_completion_queue, server_shutdown_tag)
server.cancel_all_calls()
server_driver.events()
self.assertEqual(cygrpc.CallError.ok, request_call_result)
self.assertEqual(
cygrpc.CallError.ok, server_send_initial_metadata_start_batch_result)
self.assertEqual(
cygrpc.CallError.ok, client_receive_initial_metadata_start_batch_result)
self.assertEqual(
cygrpc.CallError.ok, client_complete_rpc_start_batch_result)
self.assertEqual(cygrpc.CallError.ok, client_call_cancel_result)
self.assertIs(server_rpc_tag, server_rpc_event.tag)
self.assertEqual(
cygrpc.CompletionType.operation_complete, server_rpc_event.type)
self.assertIsInstance(server_rpc_event.operation_call, cygrpc.Call)
self.assertEqual(0, len(server_rpc_event.batch_operations))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
""" from a gdsfactory component write Sparameters from an FDTD Lumerical simulation
"""
import json
from collections import namedtuple
import numpy as np
import pp
from pp.layers import layer2material, layer2nm
from pp.config import materials
def write(
component,
session=None,
run=True,
overwrite=False,
dirpath=pp.CONFIG["sp"],
height_nm=220,
**settings,
):
"""
writes Sparameters from a gdsfactory component using Lumerical FDTD
Args:
component: gdsfactory Component
session: you can pass a session=lumapi.FDTD() for debugging
run: True-> runs Lumerical , False -> only draws simulation
overwrite: run even if simulation results already exists
dirpath: where to store the simulations
height_nm: height
layer2nm: dict of {(1, 0): 220}
layer2material: dict of {(1, 0): "si"}
remove_layers: list of tuples (layers to remove)
background_material: for the background
port_width: port width (m)
port_height: port height (m)
port_extension_um: port extension (um)
mesh_accuracy: 2 (1: coarse, 2: fine, 3: superfine)
zmargin: for the FDTD region 1e-6 (m)
ymargin: for the FDTD region 2e-6 (m)
wavelength_start: 1.2e-6 (m)
wavelength_stop: 1.6e-6 (m)
wavelength_points: 500
Return:
results: dict(wavelength_nm, S11, S12 ...) after simulation, or if simulation exists and returns the Sparameters directly
"""
if hasattr(component, "simulation_settings"):
settings.update(component.simulation_settings)
sim_settings = s = dict(
layer2nm=layer2nm,
layer2material=layer2material,
remove_layers=[pp.LAYER.WGCLAD],
background_material="sio2",
port_width=3e-6,
port_height=1.5e-6,
port_extension_um=1,
mesh_accuracy=2,
zmargin=1e-6,
ymargin=2e-6,
wavelength_start=1.2e-6,
wavelength_stop=1.6e-6,
wavelength_points=500,
)
for setting in settings.keys():
assert (
setting in s
), f"`{setting}` is not a valid setting ({list(settings.keys())})"
s.update(**settings)
ss = namedtuple("sim_settings", s.keys())(*s.values())
assert ss.port_width < 5e-6
assert ss.port_height < 5e-6
assert ss.zmargin < 5e-6
assert ss.ymargin < 5e-6
ports = component.ports
component.remove_layers(ss.remove_layers)
component._bb_valid = False
c = pp.extend_ports(component, length=ss.port_extension_um)
gdspath = pp.write_gds(c)
filepath = component.get_sparameters_path(dirpath=dirpath, height_nm=height_nm)
filepath_json = filepath.with_suffix(".json")
filepath_sim_settings = filepath.with_suffix(".settings.json")
filepath_fsp = filepath.with_suffix(".fsp")
if run and filepath_json.exists() and not overwrite:
return json.loads(open(filepath_json).read())
if not run and session is None:
print(
"""
you need to pass `run=True` flag to run the simulation
To debug, you can create a lumerical FDTD session and pass it to the simulator
```
import lumapi
s = lumapi.FDTD()
import pp
c = pp.c.waveguide() # or whatever you want to simulate
pp.sp.write(component=c, run=False, session=s)
```
"""
)
pe = ss.port_extension_um * 1e-6 / 2
x_min = c.xmin * 1e-6 + pe
x_max = c.xmax * 1e-6 - pe
y_min = c.ymin * 1e-6 - ss.ymargin
y_max = c.ymax * 1e-6 + ss.ymargin
port_orientations = [p.orientation for p in ports.values()]
if 90 in port_orientations and len(ports) > 2:
y_max = c.ymax * 1e-6 - pe
x_max = c.xmax * 1e-6
elif 90 in port_orientations:
y_max = c.ymax * 1e-6 - pe
x_max = c.xmax * 1e-6 + ss.ymargin
z = 0
z_span = 2 * ss.zmargin + max(ss.layer2nm.values()) * 1e-9
import lumapi
s = session or lumapi.FDTD(hide=False)
s.newproject()
s.selectall()
s.deleteall()
s.addrect(
x_min=x_min,
x_max=x_max,
y_min=y_min,
y_max=y_max,
z=z,
z_span=z_span,
index=1.5,
name="clad",
)
material = ss.background_material
if material not in materials:
raise ValueError(f"{material} not in {list(materials.keys())}")
material = materials[material]
s.setnamed("clad", "material", material)
s.addfdtd(
dimension="3D",
x_min=x_min,
x_max=x_max,
y_min=y_min,
y_max=y_max,
z=z,
z_span=z_span,
mesh_accuracy=ss.mesh_accuracy,
use_early_shutoff=True,
)
layers = component.get_layers()
for layer, nm in ss.layer2nm.items():
if layer not in layers:
continue
assert layer in ss.layer2material, f"{layer} not in {ss.layer2material.keys()}"
material = ss.layer2material[layer]
if material not in materials:
raise ValueError(f"{material} not in {list(materials.keys())}")
material = materials[material]
s.gdsimport(str(gdspath), c.name, f"{layer[0]}:{layer[1]}")
silicon = f"GDS_LAYER_{layer[0]}:{layer[1]}"
s.setnamed(silicon, "z span", nm * 1e-9)
s.setnamed(silicon, "material", material)
for i, port in enumerate(ports.values()):
s.addport()
p = f"FDTD::ports::port {i+1}"
s.setnamed(p, "x", port.x * 1e-6)
s.setnamed(p, "y", port.y * 1e-6)
s.setnamed(p, "z span", ss.port_height)
deg = int(port.orientation)
# assert port.orientation in [0, 90, 180, 270], f"{port.orientation} needs to be [0, 90, 180, 270]"
if -45 <= deg <= 45:
direction = "Backward"
injection_axis = "x-axis"
dxp = 0
dyp = ss.port_width
elif 45 < deg < 90 + 45:
direction = "Backward"
injection_axis = "y-axis"
dxp = ss.port_width
dyp = 0
elif 90 + 45 < deg < 180 + 45:
direction = "Forward"
injection_axis = "x-axis"
dxp = 0
dyp = ss.port_width
elif 180 + 45 < deg < -45:
direction = "Forward"
injection_axis = "y-axis"
dxp = ss.port_width
dyp = 0
else:
raise ValueError(
f"port {port.name} with orientation {port.orientation} is not a valid"
" number "
)
s.setnamed(p, "direction", direction)
s.setnamed(p, "injection axis", injection_axis)
s.setnamed(p, "y span", dyp)
s.setnamed(p, "x span", dxp)
# s.setnamed(p, "theta", deg)
s.setnamed(p, "name", port.name)
s.setglobalsource("wavelength start", ss.wavelength_start)
s.setglobalsource("wavelength stop", ss.wavelength_stop)
s.setnamed("FDTD::ports", "monitor frequency points", ss.wavelength_points)
if run:
s.save(str(filepath_fsp))
s.deletesweep("s-parameter sweep")
s.addsweep(3)
s.setsweep("s-parameter sweep", "Excite all ports", 0)
s.setsweep("S sweep", "auto symmetry", True)
s.runsweep("s-parameter sweep")
# collect results
# S_matrix = s.getsweepresult("s-parameter sweep", "S matrix")
sp = s.getsweepresult("s-parameter sweep", "S parameters")
# export S-parameter data to file named s_params.dat to be loaded in INTERCONNECT
s.exportsweep("s-parameter sweep", str(filepath))
print(f"wrote sparameters to {filepath}")
keys = [key for key in sp.keys() if key.startswith("S")]
ra = {f"{key}a": list(np.unwrap(np.angle(sp[key].flatten()))) for key in keys}
rm = {f"{key}m": list(np.abs(sp[key].flatten())) for key in keys}
results = {"wavelength_nm": list(sp["lambda"].flatten() * 1e9)}
results.update(ra)
results.update(rm)
with open(filepath_json, "w") as f:
json.dump(results, f)
with open(filepath_sim_settings, "w") as f:
s = sim_settings
s["layer2nm"] = [f"{k[0]}_{k[1]}_{v}" for k, v in s["layer2nm"].items()]
s["layer2material"] = [
f"{k[0]}_{k[1]}_{v}" for k, v in s["layer2material"].items()
]
json.dump(s, f)
return results
def write_coupler_ring():
[
write(
pp.c.coupler_ring(
wg_width=wg_width, length_x=length_x, bend_radius=bend_radius, gap=gap
)
for wg_width in [0.5]
for length_x in [0.1, 1, 2, 3, 4]
for gap in [0.15, 0.2]
for bend_radius in [5, 10]
)
]
if __name__ == "__main__":
c = pp.c.coupler_ring(length_x=3)
r = write(component=c)
print(r)
# print(r.keys())
# print(c.ports.keys())
|
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import time
from datetime import datetime
from threading import Event
# ============= enthought library imports =======================
from apptools.preferences.preference_binding import bind_preference
from traits.api import Any, List, CInt, Int, Bool, Enum, Str, Instance
from pychron.envisage.consoleable import Consoleable
from pychron.pychron_constants import AR_AR, SIGNAL, BASELINE, WHIFF, SNIFF
class DataCollector(Consoleable):
"""
Base class for ``Collector`` objects. Provides logic for iterative measurement.
"""
measurement_script = Any
automated_run = Instance(
"pychron.experiment.automated_run.automated_run.AutomatedRun"
)
measurement_result = Str
detectors = List
check_conditionals = Bool(True)
ncounts = CInt
is_baseline = Bool(False)
for_peak_hop = Bool(False)
fits = List
series_idx = Int
fit_series_idx = Int
canceled = False
terminated = False
_truncate_signal = False
starttime = None
starttime_abs = None
_alive = False
_evt = None
_warned_no_fit = None
_warned_no_det = None
collection_kind = Enum((SNIFF, WHIFF, BASELINE, SIGNAL))
refresh_age = False
_data = None
_temp_conds = None
_result = None
_queue = None
err_message = Str
no_intensity_threshold = 100
not_intensity_count = 0
trigger = None
plot_panel_update_period = Int(1)
def __init__(self, *args, **kw):
super(DataCollector, self).__init__(*args, **kw)
bind_preference(
self,
"plot_panel_update_period",
"pychron.experiment.plot_panel_update_period",
)
# def wait(self):
# st = time.time()
# self.debug('wait started')
# while 1:
# if self._evt and self._evt.set():
# break
# self.debug('wait complete {:0.1f}s'.format(time.time() - st))
def set_truncated(self):
self._truncate_signal = True
def stop(self):
self._alive = False
if self._evt:
self._evt.set()
def set_starttime(self, s):
self.starttime = s
if s is not None:
# convert s (result of time.time()) to a datetime object
self.starttime_abs = datetime.fromtimestamp(s)
def measure(self):
if self.canceled:
return
self.measurement_result = ""
self.terminated = False
self._truncate_signal = False
self._warned_no_fit = []
self._warned_no_det = []
if self.starttime is None:
self.starttime = time.time()
self.starttime_abs = datetime.now()
et = self.ncounts * self.period_ms * 0.001
self._alive = True
self._measure()
tt = time.time() - self.starttime
self.debug("estimated time: {:0.3f} actual time: :{:0.3f}".format(et, tt))
# def plot_data(self, *args, **kw):
# from pychron.core.ui.gui import invoke_in_main_thread
# invoke_in_main_thread(self._plot_data, *args, **kw)
def set_temporary_conditionals(self, cd):
self._temp_conds = cd
def clear_temporary_conditionals(self):
self._temp_conds = None
# private
def _measure(self):
self.debug("starting measurement")
self._evt = evt = Event()
# self._queue = q = Queue()
# def writefunc():
# writer = self.data_writer
# while not q.empty() or not evt.wait(10):
# dets = self.detectors
# while not q.empty():
# x, keys, signals = q.get()
# writer(dets, x, keys, signals)
#
# # only write to file every 10 seconds and not on main thread
# t = Thread(target=writefunc)
# # t.setDaemon(True)
# t.start()
self.debug("measurement period (ms) = {}".format(self.period_ms))
period = self.period_ms * 0.001
i = 1
while not evt.is_set():
result = self._check_iteration(i)
if not result:
if not self._pre_trigger_hook():
break
if self.trigger:
self.trigger()
evt.wait(period)
self.automated_run.plot_panel.counts = i
inc = self._iter_hook(i)
if inc is None:
break
self._post_iter_hook(i)
if inc:
i += 1
else:
if result == "cancel":
self.canceled = True
elif result == "terminate":
self.terminated = True
break
evt.set()
# self.debug('waiting for write to finish')
# t.join()
self.debug("measurement finished")
def _pre_trigger_hook(self):
return True
def _post_iter_hook(self, i):
if self.experiment_type == AR_AR and self.refresh_age and not i % 5:
self.isotope_group.calculate_age(force=True)
def _pre_trigger_hook(self):
return True
def _iter_hook(self, i):
return self._iteration(i)
def _iteration(self, i, detectors=None):
try:
data = self._get_data(detectors)
if not data:
return
k, s, t, inc = data
except (AttributeError, TypeError, ValueError) as e:
self.debug("failed getting data {}".format(e))
return
if k is not None and s is not None:
x = self._get_time(t)
self._save_data(x, k, s)
self._plot_data(i, x, k, s)
return inc
def _get_time(self, t):
if t is None:
t = time.time()
r = t - self.starttime
else:
# t is provided by the spectrometer. t should be a python datetime object
# since t is in absolute time use self.starttime_abs
r = t - self.starttime_abs
# convert to seconds
r = r.total_seconds()
return r
def _get_data(self, detectors=None):
try:
data = next(self.data_generator)
except StopIteration:
self.debug("data generator stopped")
return
if data:
keys, signals, ct, inc = data
if detectors:
# data = list(zip(*(d for d in zip(*data) if d[0] in detectors)))
nkeys, nsignals = [], []
for k, s in zip(keys, signals):
if k in detectors:
nkeys.append(k)
nsignals.append(s)
data = (nkeys, nsignals, ct, inc)
self._data = (nkeys, nsignals)
else:
self._data = (keys, signals)
return data
def _save_data(self, x, keys, signals):
# self._queue.put((x, keys, signals))
self.data_writer(self.detectors, x, keys, signals)
# update arar_age
if self.is_baseline and self.for_peak_hop:
self._update_baseline_peak_hop(x, keys, signals)
else:
self._update_isotopes(x, keys, signals)
def _update_baseline_peak_hop(self, x, keys, signals):
ig = self.isotope_group
for iso in ig.itervalues():
signal = self._get_signal(keys, signals, iso.detector)
if signal is not None:
if not ig.append_data(iso.name, iso.detector, x, signal, "baseline"):
self.debug(
"baselines - failed appending data for {}. "
"not a current isotope {}".format(iso, ig.isotope_keys)
)
def _update_isotopes(self, x, keys, signals):
a = self.isotope_group
kind = self.collection_kind
for dn in keys:
dn = self._get_detector(dn)
if dn:
iso = dn.isotope
signal = self._get_signal(keys, signals, dn.name)
if signal is not None:
if not a.append_data(iso, dn.name, x, signal, kind):
self.debug(
"{} - failed appending data for {}. not a current isotope {}".format(
kind, iso, a.isotope_keys
)
)
def _get_signal(self, keys, signals, det):
try:
return signals[keys.index(det)]
except ValueError:
if det not in self._warned_no_det:
self.warning("Detector {} is not available".format(det))
self._warned_no_det.append(det)
self.canceled = True
self.stop()
def _get_detector(self, d):
if isinstance(d, str):
d = next((di for di in self.detectors if di.name == d), None)
return d
def _plot_data(self, cnt, x, keys, signals):
for dn, signal in zip(keys, signals):
det = self._get_detector(dn)
if det:
self._set_plot_data(cnt, det, x, signal)
if not cnt % self.plot_panel_update_period:
self.plot_panel.update()
def _set_plot_data(self, cnt, det, x, signal):
iso = det.isotope
detname = det.name
ypadding = det.ypadding
if self.collection_kind == SNIFF:
gs = [
(self.plot_panel.sniff_graph, iso, None, 0, 0),
(self.plot_panel.isotope_graph, iso, None, 0, 0),
]
elif self.collection_kind == BASELINE:
iso = self.isotope_group.get_isotope(detector=detname, kind="baseline")
if iso is not None:
fit = iso.get_fit(cnt)
else:
fit = "average"
gs = [(self.plot_panel.baseline_graph, detname, fit, 0, 0)]
else:
title = self.isotope_group.get_isotope_title(name=iso, detector=detname)
iso = self.isotope_group.get_isotope(name=iso, detector=detname)
fit = iso.get_fit(cnt)
gs = [
(
self.plot_panel.isotope_graph,
title,
fit,
self.series_idx,
self.fit_series_idx,
)
]
for g, name, fit, series, fit_series in gs:
pid = g.get_plotid_by_ytitle(name)
if pid is None:
self.critical(
"failed to locate {}, ytitles={}".format(name, g.get_plot_ytitles())
)
continue
g.add_datum(
(x, signal),
series=series,
plotid=pid,
update_y_limits=True,
ypadding=ypadding,
)
if fit:
g.set_fit(fit, plotid=pid, series=fit_series)
# ===============================================================================
#
# ===============================================================================
# ===============================================================================
# checks
# ===============================================================================
# def _check_modification_conditionals(self, cnt):
# tripped = self._check_conditionals(self.modification_conditionals, cnt)
# if tripped:
# queue = self.automated_run.experiment_executor.experiment_queue
# tripped.do_modifications(queue, self.automated_run)
# if tripped.use_truncation:
# return self._set_run_truncated()
def _check_conditionals(self, conditionals, cnt):
self.err_message = ""
for ti in conditionals:
if ti.check(self.automated_run, self._data, cnt):
m = "Conditional tripped: {}".format(ti.to_string())
self.info(m)
self.err_message = m
return ti
def _equilibration_func(self, tr):
if tr.use_truncation:
self.measurement_script.abbreviated_count_ratio = tr.abbreviated_count_ratio
return self._set_truncated()
elif tr.use_termination:
return "terminate"
def _modification_func(self, tr):
run = self.automated_run
ex = run.experiment_executor
queue = ex.experiment_queue
tr.do_modifications(run, ex, queue)
self.measurement_script.abbreviated_count_ratio = tr.abbreviated_count_ratio
if tr.use_truncation:
return self._set_truncated()
elif tr.use_termination:
return "terminate"
def _truncation_func(self, tr):
self.measurement_script.abbreviated_count_ratio = tr.abbreviated_count_ratio
return self._set_truncated()
def _action_func(self, tr):
tr.perform(self.measurement_script)
if not tr.resume:
return "break"
def _set_truncated(self):
self.state = "truncated"
self.automated_run.truncated = True
self.automated_run.spec.state = "truncated"
return "break"
def _check_iteration(self, i):
if self._temp_conds:
ti = self._check_conditionals(self._temp_conds, i)
if ti:
self.measurement_result = ti.action
return "break"
j = i - 1
user_counts = 0 if self.plot_panel is None else self.plot_panel.ncounts
script_counts = (
0 if self.measurement_script is None else self.measurement_script.ncounts
)
original_counts = self.ncounts
count_args = (j, original_counts)
# self.debug('user_counts={}, script_counts={}, original_counts={}'.format(user_counts,
# script_counts,
# original_counts))
if not self._alive:
self.info("measurement iteration executed {}/{} counts".format(*count_args))
return "cancel"
if user_counts != original_counts:
if i > user_counts:
self.info(
"user termination. measurement iteration executed {}/{} counts".format(
*count_args
)
)
self.plot_panel.total_counts -= original_counts - i
return self._set_truncated()
elif script_counts != original_counts:
if i > script_counts:
self.info(
"script termination. measurement iteration executed {}/{} counts".format(
*count_args
)
)
return self._set_truncated()
elif i > original_counts:
return "break"
if self._truncate_signal:
self.info("measurement iteration executed {}/{} counts".format(*count_args))
self._truncate_signal = False
return self._set_truncated()
if self.check_conditionals:
for tag, func, conditionals in (
(
"modification",
self._modification_func,
self.modification_conditionals,
),
("truncation", self._truncation_func, self.truncation_conditionals),
("action", self._action_func, self.action_conditionals),
("termination", lambda x: "terminate", self.termination_conditionals),
("cancelation", lambda x: "cancel", self.cancelation_conditionals),
(
"equilibration",
self._equilibration_func,
self.equilibration_conditionals,
),
):
if tag == "equilibration" and self.collection_kind != SNIFF:
continue
tripped = self._check_conditionals(conditionals, i)
if tripped:
self.info(
"{} conditional {}. measurement iteration executed {}/{} counts".format(
tag, tripped.message, j, original_counts
),
color="red",
)
self.automated_run.show_conditionals(tripped=tripped)
return func(tripped)
@property
def isotope_group(self):
if self.automated_run:
return self.automated_run.isotope_group
@property
def plot_panel(self):
if self.automated_run:
return self.automated_run.plot_panel
@property
def modification_conditionals(self):
if self.automated_run:
return self.automated_run.modification_conditionals
@property
def truncation_conditionals(self):
if self.automated_run:
return self.automated_run.truncation_conditionals
@property
def termination_conditionals(self):
if self.automated_run:
return self.automated_run.termination_conditionals
@property
def action_conditionals(self):
if self.automated_run:
return self.automated_run.action_conditionals
@property
def cancelation_conditionals(self):
if self.automated_run:
return self.automated_run.cancelation_conditionals
@property
def equilibration_conditionals(self):
if self.automated_run:
return self.automated_run.equilibration_conditionals
# ============= EOF =============================================
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple speech recognition to spot a limited number of keywords.
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It downloads the necessary training data and
runs with reasonable defaults to train within a few hours even only using a CPU.
For more information, please see
https://www.tensorflow.org/tutorials/audio_recognition.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. For more advanced
speech systems, I recommend looking into Kaldi. This network uses a keyword
detection style to spot discrete words from a small vocabulary, consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run tensorflow/examples/speech_commands:train
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, you can run the freeze script to
get a binary GraphDef that you can easily deploy on mobile applications.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
my_wavs >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train -- \
--data_dir=my_wavs --wanted_words=up,down
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def main(_):
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
audio_processor = input_data.AudioProcessor(
FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = list(map(int, FLAGS.how_many_training_steps.split(',')))
learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
fingerprint_input = tf.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
logits, dropout_prob = models.create_model(
fingerprint_input,
model_settings,
FLAGS.model_architecture,
is_training=True)
# Define loss and optimizer
ground_truth_input = tf.placeholder(
tf.int64, [None], name='groundtruth_input')
# Optionally we can add runtime checks to spot when NaNs or other symptoms of
# numerical errors start occurring during training.
control_dependencies = []
if FLAGS.check_nans:
checks = tf.add_check_numerics_ops()
control_dependencies = [checks]
# Create the back propagation and training evaluation machinery in the graph.
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
labels=ground_truth_input, logits=logits)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
train_step = tf.train.GradientDescentOptimizer(
learning_rate_input).minimize(cross_entropy_mean)
predicted_indices = tf.argmax(logits, 1)
correct_prediction = tf.equal(predicted_indices, ground_truth_input)
confusion_matrix = tf.confusion_matrix(
ground_truth_input, predicted_indices, num_classes=label_count)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
global_step = tf.train.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver(tf.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
tf.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
start_step = global_step.eval(session=sess)
tf.logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.train.write_graph(sess.graph_def, FLAGS.train_dir,
FLAGS.model_architecture + '.pbtxt')
# Save list of words.
with gfile.GFile(
os.path.join(FLAGS.train_dir, FLAGS.model_architecture + '_labels.txt'),
'w') as f:
f.write('\n'.join(audio_processor.words_list))
# Training loop.
training_steps_max = np.sum(training_steps_list)
for training_step in xrange(start_step, training_steps_max + 1):
# Figure out what the current learning rate is.
training_steps_sum = 0
for i in range(len(training_steps_list)):
training_steps_sum += training_steps_list[i]
if training_step <= training_steps_sum:
learning_rate_value = learning_rates_list[i]
break
# Pull the audio samples we'll use for training.
train_fingerprints, train_ground_truth = audio_processor.get_data(
FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
FLAGS.background_volume, time_shift_samples, 'training', sess)
# Run the graph with this batch of training data.
train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
[
merged_summaries, evaluation_step, cross_entropy_mean, train_step,
increment_global_step
],
feed_dict={
fingerprint_input: train_fingerprints,
ground_truth_input: train_ground_truth,
learning_rate_input: learning_rate_value,
dropout_prob: 0.5
})
train_writer.add_summary(train_summary, training_step)
tf.logging.info('Step #%d: rate %f, accuracy %.1f%%, cross entropy %f' %
(training_step, learning_rate_value, train_accuracy * 100,
cross_entropy_value))
is_last_step = (training_step == training_steps_max)
if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step:
set_size = audio_processor.set_size('validation')
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
validation_fingerprints, validation_ground_truth = (
audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
0.0, 0, 'validation', sess))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy, conf_matrix = sess.run(
[merged_summaries, evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: validation_fingerprints,
ground_truth_input: validation_ground_truth,
dropout_prob: 1.0
})
validation_writer.add_summary(validation_summary, training_step)
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (validation_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Step %d: Validation accuracy = %.1f%% (N=%d)' %
(training_step, total_accuracy * 100, set_size))
# Save the model checkpoint periodically.
if (training_step % FLAGS.save_step_interval == 0 or
training_step == training_steps_max):
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.model_architecture + '.ckpt')
tf.logging.info('Saving to "%s-%d"', checkpoint_path, training_step)
saver.save(sess, checkpoint_path, global_step=training_step)
set_size = audio_processor.set_size('testing')
tf.logging.info('set_size=%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)
test_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: test_fingerprints,
ground_truth_input: test_ground_truth,
dropout_prob: 1.0
})
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (test_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' % (total_accuracy * 100,
set_size))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data archive on the web.')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--time_shift_ms',
type=float,
default=100.0,
help="""\
Range to randomly shift the training audio by in time.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--how_many_training_steps',
type=str,
default='15000,3000',
help='How many training loops to run',)
parser.add_argument(
'--eval_step_interval',
type=int,
default=400,
help='How often to evaluate the training results.')
parser.add_argument(
'--learning_rate',
type=str,
default='0.001,0.0001',
help='How large a learning rate to use when training.')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/speech_commands_train',
help='Directory to write event logs and checkpoint.')
parser.add_argument(
'--save_step_interval',
type=int,
default=100,
help='Save model checkpoint every save_steps.')
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--check_nans',
type=bool,
default=False,
help='Whether to check for invalid numbers during processing')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
# -*- coding: utf8 -*-
from __future__ import print_function
import re
import eatiht
import requests
from breadability.readable import Article
from requests import Request, Session
from requests.adapters import HTTPAdapter
from six.moves import urllib
from sumy.models.dom import ObjectDocumentModel, Paragraph, Sentence
from sumy.nlp.stemmers import Stemmer
from sumy.parsers.parser import DocumentParser
from sumy.summarizers.text_rank import TextRankSummarizer as Summarizer
from sumy.utils import cached_property, get_stop_words
from .webapi import HTTPURLEndpoints
try:
from http.cookiejar import CookieJar
except ImportError:
from cookielib import CookieJar
class Client:
def __init__(self, key=None):
self.__key = key
def __repr__(self):
classname = self.__class__.__name__
return '%s(key=%r)' % (classname, 'KEY' if self.key else None)
def is_valid(self):
if not self.key:
return False
assert isinstance(self.key, str)
assert len(self.key) == 40
return True
@property
def key(self):
return self.__key
class AlchemyAPI:
def __init__(self, client, base='http://access.alchemyapi.com/calls'):
self.base = base
self.client = client
self.session = requests.session()
self.endpoint = HTTPURLEndpoints.build_endpoints()
def text(self, flavor, data, options={}):
"""
Extracts the cleaned text (removes ads, navigation, etc.)
for text, a URL or HTML.
For an overview, please refer to:
http://www.alchemyapi.com/products/features/text-extraction/
For the docs, please refer to:
http://www.alchemyapi.com/api/text-extraction/
Input:
======
:param flavor: str
Which version of the call ['text', 'url', 'html']
:param data:
The data to analyze, either the text, the url or html code.
:param options:
various parameters that can be used to adjust how the API works,
Available Options:
==================
:option useMetadata: utilize meta description data
0: disabled
1: enabled (default)
:option extractLinks: include links
0: disabled (default)
1: enabled
Output:
=======
:return response: JSON
The response, already converted from JSON to a Python object
"""
# Make sure this request supports this flavor
if flavor not in self.endpoint['text']:
return {'status': 'ERROR',
'statusInfo':
'clean text extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.connect(self.endpoint['text'][flavor], options)
def connect(self, endpoint, params, post_data=bytearray()):
"""
HTTP Request wrapper that is called by the endpoint functions.
This function is not intended to be called through an
external interface.
It makes the call, then converts the
returned JSON string into a Python object.
INPUT:
url -> the full URI encoded url
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Add the API Key and set the output mode to JSON
params['apikey'] = self.client.key
params['outputMode'] = 'json'
# Insert the base url
post_url = ""
try:
post_url = self.base + endpoint + \
'?' + urllib.parse.urlencode(params).encode('utf-8')
except TypeError:
post_url = self.base + endpoint + '?' + urlencode(params)
results = ""
try:
results = self.session.post(url=post_url, data=post_data)
except Exception as e:
print(e)
return {'status': 'ERROR', 'statusInfo': 'network-error'}
try:
return results.json()
except Exception as e:
if results != "":
print(results)
print(e)
return {'status': 'ERROR', 'statusInfo': 'parse-error'}
class HtmlParser(DocumentParser):
"""Parser of text from HTML format into DOM."""
SIGNIFICANT_TAGS = ('h1', 'h2', 'h3', 'b'
'strong', 'big', 'dfn', 'em', 'p')
@classmethod
def from_string(cls,
string,
url,
tokenizer, ):
return cls(string, tokenizer, url)
@classmethod
def from_file(cls, file_path, url, tokenizer):
with open(file_path, 'rb') as file:
return cls(file.read(), tokenizer, url)
@classmethod
def from_url(cls, url, tokenizer):
headers = {
'User-Agent': ' '.join([
'Mozilla/5.0 (X11; Linux x86_64)',
'AppleWebKit/537.11 (KHTML, like Gecko)',
'Chrome/23.0.1271.64 Safari/537.11'
]),
'Accept': ','.join([
'text/html', 'application/xhtml+xml', 'application/xml;q=0.9',
'*/*;q=0.8'
]),
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'
}
session = Session()
session.mount('http://', HTTPAdapter(max_retries=2))
session.mount('https://', HTTPAdapter(max_retries=2))
cookies = CookieJar()
request = Request(method='GET',
url=url,
headers=headers,
cookies=cookies)
prepare = session.prepare_request(request)
response = session.send(prepare, verify=True)
if response.status_code != requests.codes.ok:
response.raise_for_status()
return cls(response.text, tokenizer, url)
def __init__(self, html_content, tokenizer, url=None):
super(HtmlParser, self).__init__(tokenizer)
self._article = Article(html_content, url)
@cached_property
def significant_words(self):
words = []
for paragraph in self._article.main_text:
for (text, annotations) in paragraph:
if not self._contains_any(annotations, *self.SIGNIFICANT_TAGS):
continue
words.extend(self.tokenize_words(text))
if words:
return tuple(words)
else:
return self.SIGNIFICANT_WORDS
@cached_property
def stigma_words(self):
words = []
for paragraph in self._article.main_text:
for (text, annotations) in paragraph:
if self._contains_any(annotations, 'a', 'strike', 's'):
words.extend(self.tokenize_words(text))
if words:
return tuple(words)
else:
return self.STIGMA_WORDS
def _contains_any(self, sequence, *args):
if sequence is None:
return False
for item in args:
if item in sequence:
return True
return False
@cached_property
def document(self):
# a abbr acronym b big blink blockquote cite code
# dd del dfn dir dl dt em h h1 h2 h3 h4
# h5 h6 i ins kbd li marquee menu ol pre q
# s samp strike strong sub sup tt u ul var
headers = 'h1', 'h2', 'h3'
annotated_text = self._article.main_text
paragraphs = []
for paragraph in annotated_text:
sentences, current_text = [], ''
for (text, annotations) in paragraph:
if annotations and any(h_tag in annotations
for h_tag in headers):
sentences.append(Sentence(text,
self._tokenizer,
is_heading=True))
elif not (annotations and 'pre' in annotations):
# skip <pre> nodes
current_text += ' ' + text
new_sentences = self.tokenize_sentences(current_text)
sentences.extend(Sentence(s, self._tokenizer)
for s in new_sentences)
paragraphs.append(Paragraph(sentences))
return ObjectDocumentModel(paragraphs)
def summarizer(parser, sentences, language='english'):
"""
:params parser: Parser for selected document type
:params sentences: Maximum sentences for summarizer.
:returns summary: Summarized page.
"""
stemmer = Stemmer(language)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(language)
output = [str(sentence)
for sentence in summarizer(parser.document, sentences)]
return ' '.join(output)
def alt_extract(url):
client = Client('')
api = AlchemyAPI(client)
req = api.text('url', url)
if not req['status'] == 'ERROR':
clean = req.get('text')
else:
clean = eatiht.extract(url)
return '\n\n'.join(
[re.sub(r'\s+', ' ', i.strip()) for i in clean.split('\n')])
|
|
import warnings
from functools import reduce
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame, GeoSeries
from geopandas.array import _check_crs, _crs_mismatch_warn
def _ensure_geometry_column(df):
"""
Helper function to ensure the geometry column is called 'geometry'.
If another column with that name exists, it will be dropped.
"""
if not df._geometry_column_name == "geometry":
if "geometry" in df.columns:
df.drop("geometry", axis=1, inplace=True)
df.rename(
columns={df._geometry_column_name: "geometry"}, copy=False, inplace=True
)
df.set_geometry("geometry", inplace=True)
def _overlay_intersection(df1, df2):
"""
Overlay Intersection operation used in overlay function
"""
# Spatial Index to create intersections
idx1, idx2 = df2.sindex.query_bulk(df1.geometry, predicate="intersects", sort=True)
# Create pairs of geometries in both dataframes to be intersected
if idx1.size > 0 and idx2.size > 0:
left = df1.geometry.take(idx1)
left.reset_index(drop=True, inplace=True)
right = df2.geometry.take(idx2)
right.reset_index(drop=True, inplace=True)
intersections = left.intersection(right)
poly_ix = intersections.type.isin(["Polygon", "MultiPolygon"])
intersections.loc[poly_ix] = intersections[poly_ix].buffer(0)
# only keep actual intersecting geometries
pairs_intersect = pd.DataFrame({"__idx1": idx1, "__idx2": idx2})
geom_intersect = intersections
# merge data for intersecting geometries
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
dfinter = pairs_intersect.merge(
df1.drop(df1._geometry_column_name, axis=1),
left_on="__idx1",
right_index=True,
)
dfinter = dfinter.merge(
df2.drop(df2._geometry_column_name, axis=1),
left_on="__idx2",
right_index=True,
suffixes=("_1", "_2"),
)
return GeoDataFrame(dfinter, geometry=geom_intersect, crs=df1.crs)
else:
result = df1.iloc[:0].merge(
df2.iloc[:0].drop(df2.geometry.name, axis=1),
left_index=True,
right_index=True,
suffixes=("_1", "_2"),
)
result["__idx1"] = None
result["__idx2"] = None
return result[
result.columns.drop(df1.geometry.name).tolist() + [df1.geometry.name]
]
def _overlay_difference(df1, df2):
"""
Overlay Difference operation used in overlay function
"""
# spatial index query to find intersections
idx1, idx2 = df2.sindex.query_bulk(df1.geometry, predicate="intersects", sort=True)
idx1_unique, idx1_unique_indices = np.unique(idx1, return_index=True)
idx2_split = np.split(idx2, idx1_unique_indices[1:])
sidx = [
idx2_split.pop(0) if idx in idx1_unique else []
for idx in range(df1.geometry.size)
]
# Create differences
new_g = []
for geom, neighbours in zip(df1.geometry, sidx):
new = reduce(
lambda x, y: x.difference(y), [geom] + list(df2.geometry.iloc[neighbours])
)
new_g.append(new)
differences = GeoSeries(new_g, index=df1.index, crs=df1.crs)
poly_ix = differences.type.isin(["Polygon", "MultiPolygon"])
differences.loc[poly_ix] = differences[poly_ix].buffer(0)
geom_diff = differences[~differences.is_empty].copy()
dfdiff = df1[~differences.is_empty].copy()
dfdiff[dfdiff._geometry_column_name] = geom_diff
return dfdiff
def _overlay_symmetric_diff(df1, df2):
"""
Overlay Symmetric Difference operation used in overlay function
"""
dfdiff1 = _overlay_difference(df1, df2)
dfdiff2 = _overlay_difference(df2, df1)
dfdiff1["__idx1"] = range(len(dfdiff1))
dfdiff2["__idx2"] = range(len(dfdiff2))
dfdiff1["__idx2"] = np.nan
dfdiff2["__idx1"] = np.nan
# ensure geometry name (otherwise merge goes wrong)
_ensure_geometry_column(dfdiff1)
_ensure_geometry_column(dfdiff2)
# combine both 'difference' dataframes
dfsym = dfdiff1.merge(
dfdiff2, on=["__idx1", "__idx2"], how="outer", suffixes=("_1", "_2")
)
geometry = dfsym.geometry_1.copy()
geometry.name = "geometry"
# https://github.com/pandas-dev/pandas/issues/26468 use loc for now
geometry.loc[dfsym.geometry_1.isnull()] = dfsym.loc[
dfsym.geometry_1.isnull(), "geometry_2"
]
dfsym.drop(["geometry_1", "geometry_2"], axis=1, inplace=True)
dfsym.reset_index(drop=True, inplace=True)
dfsym = GeoDataFrame(dfsym, geometry=geometry, crs=df1.crs)
return dfsym
def _overlay_union(df1, df2):
"""
Overlay Union operation used in overlay function
"""
dfinter = _overlay_intersection(df1, df2)
dfsym = _overlay_symmetric_diff(df1, df2)
dfunion = pd.concat([dfinter, dfsym], ignore_index=True, sort=False)
# keep geometry column last
columns = list(dfunion.columns)
columns.remove("geometry")
columns.append("geometry")
return dfunion.reindex(columns=columns)
def overlay(df1, df2, how="intersection", keep_geom_type=None, make_valid=True):
"""Perform spatial overlay between two GeoDataFrames.
Currently only supports data GeoDataFrames with uniform geometry types,
i.e. containing only (Multi)Polygons, or only (Multi)Points, or a
combination of (Multi)LineString and LinearRing shapes.
Implements several methods that are all effectively subsets of the union.
See the User Guide page :doc:`../../user_guide/set_operations` for details.
Parameters
----------
df1 : GeoDataFrame
df2 : GeoDataFrame
how : string
Method of spatial overlay: 'intersection', 'union',
'identity', 'symmetric_difference' or 'difference'.
keep_geom_type : bool
If True, return only geometries of the same geometry type as df1 has,
if False, return all resulting geometries. Default is None,
which will set keep_geom_type to True but warn upon dropping
geometries.
make_valid : bool, default True
If True, any invalid input geometries are corrected with a call to `buffer(0)`,
if False, a `ValueError` is raised if any input geometries are invalid.
Returns
-------
df : GeoDataFrame
GeoDataFrame with new set of polygons and attributes
resulting from the overlay
Examples
--------
>>> from shapely.geometry import Polygon
>>> polys1 = geopandas.GeoSeries([Polygon([(0,0), (2,0), (2,2), (0,2)]),
... Polygon([(2,2), (4,2), (4,4), (2,4)])])
>>> polys2 = geopandas.GeoSeries([Polygon([(1,1), (3,1), (3,3), (1,3)]),
... Polygon([(3,3), (5,3), (5,5), (3,5)])])
>>> df1 = geopandas.GeoDataFrame({'geometry': polys1, 'df1_data':[1,2]})
>>> df2 = geopandas.GeoDataFrame({'geometry': polys2, 'df2_data':[1,2]})
>>> geopandas.overlay(df1, df2, how='union')
df1_data df2_data geometry
0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...
5 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...
6 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....
>>> geopandas.overlay(df1, df2, how='intersection')
df1_data df2_data geometry
0 1 1 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2 1 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2 2 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
>>> geopandas.overlay(df1, df2, how='symmetric_difference')
df1_data df2_data geometry
0 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
1 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...
2 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...
3 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....
>>> geopandas.overlay(df1, df2, how='difference')
geometry df1_data
0 POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0.... 1
1 MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000... 2
>>> geopandas.overlay(df1, df2, how='identity')
df1_data df2_data geometry
0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...
See also
--------
sjoin : spatial join
GeoDataFrame.overlay : equivalent method
Notes
------
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
# Allowed operations
allowed_hows = [
"intersection",
"union",
"identity",
"symmetric_difference",
"difference", # aka erase
]
# Error Messages
if how not in allowed_hows:
raise ValueError(
"`how` was '{0}' but is expected to be in {1}".format(how, allowed_hows)
)
if isinstance(df1, GeoSeries) or isinstance(df2, GeoSeries):
raise NotImplementedError(
"overlay currently only implemented for " "GeoDataFrames"
)
if not _check_crs(df1, df2):
_crs_mismatch_warn(df1, df2, stacklevel=3)
if keep_geom_type is None:
keep_geom_type = True
keep_geom_type_warning = True
else:
keep_geom_type_warning = False
polys = ["Polygon", "MultiPolygon"]
lines = ["LineString", "MultiLineString", "LinearRing"]
points = ["Point", "MultiPoint"]
for i, df in enumerate([df1, df2]):
poly_check = df.geom_type.isin(polys).any()
lines_check = df.geom_type.isin(lines).any()
points_check = df.geom_type.isin(points).any()
if sum([poly_check, lines_check, points_check]) > 1:
raise NotImplementedError(
"df{} contains mixed geometry types.".format(i + 1)
)
if how == "intersection":
box_gdf1 = df1.total_bounds
box_gdf2 = df2.total_bounds
if not (
((box_gdf1[0] <= box_gdf2[2]) and (box_gdf2[0] <= box_gdf1[2]))
and ((box_gdf1[1] <= box_gdf2[3]) and (box_gdf2[1] <= box_gdf1[3]))
):
result = df1.iloc[:0].merge(
df2.iloc[:0].drop(df2.geometry.name, axis=1),
left_index=True,
right_index=True,
suffixes=("_1", "_2"),
)
return result[
result.columns.drop(df1.geometry.name).tolist() + [df1.geometry.name]
]
# Computations
def _make_valid(df):
df = df.copy()
if df.geom_type.isin(polys).all():
mask = ~df.geometry.is_valid
col = df._geometry_column_name
if make_valid:
df.loc[mask, col] = df.loc[mask, col].buffer(0)
elif mask.any():
raise ValueError(
"You have passed make_valid=False along with "
f"{mask.sum()} invalid input geometries. "
"Use make_valid=True or make sure that all geometries "
"are valid before using overlay."
)
return df
df1 = _make_valid(df1)
df2 = _make_valid(df2)
with warnings.catch_warnings(): # CRS checked above, suppress array-level warning
warnings.filterwarnings("ignore", message="CRS mismatch between the CRS")
if how == "difference":
result = _overlay_difference(df1, df2)
elif how == "intersection":
result = _overlay_intersection(df1, df2)
elif how == "symmetric_difference":
result = _overlay_symmetric_diff(df1, df2)
elif how == "union":
result = _overlay_union(df1, df2)
elif how == "identity":
dfunion = _overlay_union(df1, df2)
result = dfunion[dfunion["__idx1"].notnull()].copy()
if how in ["intersection", "symmetric_difference", "union", "identity"]:
result.drop(["__idx1", "__idx2"], axis=1, inplace=True)
if keep_geom_type:
geom_type = df1.geom_type.iloc[0]
# First we filter the geometry types inside GeometryCollections objects
# (e.g. GeometryCollection([polygon, point]) -> polygon)
# we do this separately on only the relevant rows, as this is an expensive
# operation (an expensive no-op for geometry types other than collections)
is_collection = result.geom_type == "GeometryCollection"
if is_collection.any():
geom_col = result._geometry_column_name
collections = result[[geom_col]][is_collection]
exploded = collections.reset_index(drop=True).explode(index_parts=True)
exploded = exploded.reset_index(level=0)
orig_num_geoms_exploded = exploded.shape[0]
if geom_type in polys:
exploded.loc[~exploded.geom_type.isin(polys), geom_col] = None
elif geom_type in lines:
exploded.loc[~exploded.geom_type.isin(lines), geom_col] = None
elif geom_type in points:
exploded.loc[~exploded.geom_type.isin(points), geom_col] = None
else:
raise TypeError(
"`keep_geom_type` does not support {}.".format(geom_type)
)
num_dropped_collection = (
orig_num_geoms_exploded - exploded.geometry.isna().sum()
)
# level_0 created with above reset_index operation
# and represents the original geometry collections
# TODO avoiding dissolve to call unary_union in this case could further
# improve performance (we only need to collect geometries in their
# respective Multi version)
dissolved = exploded.dissolve(by="level_0")
result.loc[is_collection, geom_col] = dissolved[geom_col].values
else:
num_dropped_collection = 0
# Now we filter all geometries (in theory we don't need to do this
# again for the rows handled above for GeometryCollections, but filtering
# them out is probably more expensive as simply including them when this
# is typically about only a few rows)
orig_num_geoms = result.shape[0]
if geom_type in polys:
result = result.loc[result.geom_type.isin(polys)]
elif geom_type in lines:
result = result.loc[result.geom_type.isin(lines)]
elif geom_type in points:
result = result.loc[result.geom_type.isin(points)]
else:
raise TypeError("`keep_geom_type` does not support {}.".format(geom_type))
num_dropped = orig_num_geoms - result.shape[0]
if (num_dropped > 0 or num_dropped_collection > 0) and keep_geom_type_warning:
warnings.warn(
"`keep_geom_type=True` in overlay resulted in {} dropped "
"geometries of different geometry types than df1 has. "
"Set `keep_geom_type=False` to retain all "
"geometries".format(num_dropped + num_dropped_collection),
UserWarning,
stacklevel=2,
)
result.reset_index(drop=True, inplace=True)
return result
|
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import servicetype_db as st_db
import sqlalchemy as sa
from sqlalchemy.ext import orderinglist
from sqlalchemy import orm
from neutron_lbaas.services.loadbalancer import constants as lb_const
class SessionPersistenceV2(model_base.BASEV2):
__tablename__ = "lbaas_sessionpersistences"
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_pools.id"),
primary_key=True,
nullable=False)
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_SP_TYPES,
name="lbaas_sesssionpersistences_typev2"),
nullable=False)
cookie_name = sa.Column(sa.String(1024), nullable=True)
class LoadBalancerStatistics(model_base.BASEV2):
"""Represents load balancer statistics."""
NAME = 'loadbalancer_stats'
__tablename__ = "lbaas_loadbalancer_statistics"
loadbalancer_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_loadbalancers.id"),
primary_key=True,
nullable=False)
bytes_in = sa.Column(sa.BigInteger, nullable=False)
bytes_out = sa.Column(sa.BigInteger, nullable=False)
active_connections = sa.Column(sa.BigInteger, nullable=False)
total_connections = sa.Column(sa.BigInteger, nullable=False)
@orm.validates('bytes_in', 'bytes_out',
'active_connections', 'total_connections')
def validate_non_negative_int(self, key, value):
if value < 0:
data = {'key': key, 'value': value}
raise ValueError(_('The %(key)s field can not have '
'negative value. '
'Current value is %(value)d.') % data)
return value
class MemberV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer member."""
NAME = 'member'
__tablename__ = "lbaas_members"
__table_args__ = (
sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
name='uniq_pool_address_port_v2'),
)
pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
nullable=False)
address = sa.Column(sa.String(64), nullable=False)
protocol_port = sa.Column(sa.Integer, nullable=False)
weight = sa.Column(sa.Integer, nullable=True)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), nullable=True)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
@property
def root_loadbalancer(self):
return self.pool.listener.loadbalancer
class HealthMonitorV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer healthmonitor."""
NAME = 'healthmonitor'
__tablename__ = "lbaas_healthmonitors"
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_HEALTH_MONITOR_TYPES,
name="healthmonitors_typev2"),
nullable=False)
delay = sa.Column(sa.Integer, nullable=False)
timeout = sa.Column(sa.Integer, nullable=False)
max_retries = sa.Column(sa.Integer, nullable=False)
http_method = sa.Column(sa.String(16), nullable=True)
url_path = sa.Column(sa.String(255), nullable=True)
expected_codes = sa.Column(sa.String(64), nullable=True)
provisioning_status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
@property
def root_loadbalancer(self):
return self.pool.listener.loadbalancer
class PoolV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer pool."""
NAME = 'pool'
__tablename__ = "lbaas_pools"
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
healthmonitor_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_healthmonitors.id"),
unique=True,
nullable=True)
protocol = sa.Column(sa.Enum(*lb_const.POOL_SUPPORTED_PROTOCOLS,
name="pool_protocolsv2"),
nullable=False)
lb_algorithm = sa.Column(sa.Enum(*lb_const.SUPPORTED_LB_ALGORITHMS,
name="lb_algorithmsv2"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
members = orm.relationship(MemberV2,
backref=orm.backref("pool", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
healthmonitor = orm.relationship(
HealthMonitorV2,
backref=orm.backref("pool", uselist=False),
lazy='joined')
session_persistence = orm.relationship(
SessionPersistenceV2,
uselist=False,
backref=orm.backref("pool", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
@property
def root_loadbalancer(self):
return self.listener.loadbalancer
class LoadBalancer(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer."""
NAME = 'loadbalancer'
__tablename__ = "lbaas_loadbalancers"
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
vip_subnet_id = sa.Column(sa.String(36), nullable=False)
vip_port_id = sa.Column(sa.String(36), sa.ForeignKey(
'ports.id', name='fk_lbaas_loadbalancers_ports_id'))
vip_address = sa.Column(sa.String(36))
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vip_port = orm.relationship(models_v2.Port)
stats = orm.relationship(
LoadBalancerStatistics,
uselist=False,
backref=orm.backref("loadbalancer", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
provider = orm.relationship(
st_db.ProviderResourceAssociation,
uselist=False,
lazy="joined",
primaryjoin="LoadBalancer.id==ProviderResourceAssociation.resource_id",
foreign_keys=[st_db.ProviderResourceAssociation.resource_id],
# this is only for old API backwards compatibility because when a load
# balancer is deleted the pool ID should be the same as the load
# balancer ID and should not be cleared out in this table
viewonly=True
)
@property
def root_loadbalancer(self):
return self
class SNI(model_base.BASEV2):
"""Many-to-many association between Listener and TLS container ids
Making the SNI certificates list, ordered using the position
"""
NAME = 'sni'
__tablename__ = "lbaas_sni"
listener_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_listeners.id"),
primary_key=True,
nullable=False)
tls_container_id = sa.Column(sa.String(36),
primary_key=True,
nullable=False)
position = sa.Column(sa.Integer)
@property
def root_loadbalancer(self):
return self.listener.loadbalancer
class Listener(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron listener."""
NAME = 'listener'
__tablename__ = "lbaas_listeners"
__table_args__ = (
sa.schema.UniqueConstraint('loadbalancer_id', 'protocol_port',
name='uniq_loadbalancer_listener_port'),
)
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
default_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
unique=True)
loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey(
"lbaas_loadbalancers.id"))
protocol = sa.Column(sa.Enum(*lb_const.LISTENER_SUPPORTED_PROTOCOLS,
name="listener_protocolsv2"),
nullable=False)
default_tls_container_id = sa.Column(sa.String(36),
default=None, nullable=True)
sni_containers = orm.relationship(
SNI,
backref=orm.backref("listener", uselist=False),
uselist=True,
lazy="joined",
primaryjoin="Listener.id==SNI.listener_id",
order_by='SNI.position',
collection_class=orderinglist.ordering_list(
'position'),
foreign_keys=[SNI.listener_id],
cascade="all, delete-orphan"
)
protocol_port = sa.Column(sa.Integer, nullable=False)
connection_limit = sa.Column(sa.Integer)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
provisioning_status = sa.Column(sa.String(16), nullable=False)
operating_status = sa.Column(sa.String(16), nullable=False)
default_pool = orm.relationship(
PoolV2, backref=orm.backref("listener", uselist=False), lazy='joined')
loadbalancer = orm.relationship(
LoadBalancer, backref=orm.backref("listeners"), lazy='joined')
@property
def root_loadbalancer(self):
return self.loadbalancer
|
|
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from thrift.protocol.TCompactProtocol import TCompactProtocol
from thrift.protocol.TJSONProtocol import TJSONProtocol
from thrift.transport import TTransport
from thrift.protocol.TBase import TBase
from unimodel.model import Unimodel, Field
from unimodel.backends.base import Serializer
from unimodel import types
from unimodel.util import get_backend_type
from contextlib import contextmanager
import json
class ThriftSpecFactory(object):
def __init__(self, model_registry=None):
self.model_registry = model_registry
if self.model_registry is None:
from unimodel.model import ModelRegistry
self.model_registry = ModelRegistry()
self._spec_cache = {}
self.tuple_type_cache = {}
def get_spec(self, struct_class):
if struct_class not in self._spec_cache:
self._spec_cache[
struct_class] = self.get_spec_for_struct(struct_class)
return self._spec_cache[struct_class]
def get_spec_for_struct(self, struct_class):
field_list = sorted(
struct_class.get_field_definitions(),
key=lambda x: x.field_id)
thrift_spec = [None]
# save the spec to cache so recurisve data structures work.
self._spec_cache[struct_class] = thrift_spec
for f in field_list:
thrift_spec.append(self.get_spec_for_field(f))
return thrift_spec
def get_tuple_type_parameter(self, field_type):
# tuple_id =
# (implementation_class, self.get_spec(implementation_class))
ta = ThriftTupleAdapter(Field(field_type), None)
return (ta.tuple_struct_class, self.get_spec(ta.tuple_struct_class))
def get_spec_type_parameter(self, field_type):
""" Returns value 3 of the element
in thrift_spec which defines this field. """
# tuples are encoded as structs
if isinstance(field_type, types.Tuple):
return self.get_tuple_type_parameter(field_type)
# structs are a special case
if isinstance(field_type, types.Struct):
interface_class = field_type.get_python_type()
implementation_class = self.model_registry.lookup(interface_class)
return (implementation_class, self.get_spec(implementation_class))
# If there are no type parameters, return None
if not field_type.type_parameters:
return None
# lists, sets, maps
spec_list = []
for t in field_type.type_parameters:
# for each type_parameter, first add the type's id
spec_list.append(get_backend_type("thrift", t.type_id))
# then the type's parameters
spec_list.append(self.get_spec_type_parameter(t))
return spec_list
def get_spec_for_field(self, field):
return (
field.field_id,
get_backend_type("thrift", field.field_type.type_id),
field.field_name,
self.get_spec_type_parameter(field.field_type),
field.default,)
class ThriftTupleAdapter(object):
def __init__(self, field_definition, field_value):
self.field_definition = field_definition
self.field_value = field_value
# This is probably very inefficient,
# maybe we can optimize it someday
self.tuple_struct_class = self.get_tuple_struct_class()
def get_tuple_struct_name(self):
return "%s_tuple" % (self.field_definition.field_name)
def get_tuple_struct_class(self):
field_dict = {}
for ix in xrange(0, len(self.field_definition.field_type.type_parameters)):
field_name = "tuple_%s" % ix
type_parameter = self.field_definition.field_type.type_parameters[ix]
field_dict[field_name] = Field(type_parameter)
return type(
self.get_tuple_struct_name(),
(Unimodel,),
field_dict)
def write(self, protocol):
obj = self.tuple_struct_class()
for ix in xrange(0, len(self.field_value)):
obj["tuple_%s" % ix] = self.field_value[ix]
return obj.write(protocol)
@classmethod
def to_tuple(cls, tuple_struct_instance):
elements = []
for f in sorted(tuple_struct_instance.get_field_definitions(), key=lambda x: x.field_id):
elements.append(tuple_struct_instance[f.field_name])
return tuple(elements)
class ThriftValueConverter(object):
def to_internal(self, field_definition, field_value):
if isinstance(field_definition.field_type, types.UTF8):
# TODO: not python3 friendly
if type(field_value) == unicode:
pass
else:
field_value = field_value.decode('utf-8')
if isinstance(field_definition.field_type, types.BigInt):
if field_value is None:
field_value = 0
field_value = long(field_value)
if isinstance(field_definition.field_type, types.JSONData):
field_value = json.loads(field_value)
if isinstance(field_definition.field_type, types.Tuple):
field_value = ThriftTupleAdapter.to_tuple(field_value)
return field_value
def from_internal(self, field_definition, field_value):
if isinstance(field_definition.field_type, types.UTF8):
field_value = field_value.encode('utf-8')
if isinstance(field_definition.field_type, types.BigInt):
field_value = None if field_value is None else str(field_value)
if isinstance(field_definition.field_type, types.JSONData):
field_value = json.dumps(field_value)
if isinstance(field_definition.field_type, types.Tuple):
field_value = ThriftTupleAdapter(field_definition, field_value)
return field_value
def make_protocol_factory(protocol_class):
conv = ThriftValueConverter()
@contextmanager
def converter(obj):
old_value_converter = getattr(obj, '_value_converter', None)
try:
obj._set_value_converter(conv)
yield
#except Exception, e:
# import pdb;pdb.set_trace()
finally:
obj._set_value_converter(old_value_converter)
# invoke converter when reading / writing fields
class Protocol(protocol_class):
def writeStruct(self, obj, thrift_spec):
with converter(obj):
return protocol_class.writeStruct(self, obj, thrift_spec)
def readStruct(self, obj, thrift_spec):
with converter(obj):
return protocol_class.readStruct(self, obj, thrift_spec)
class ProtocolFactory(object):
def getProtocol(self, trans):
return Protocol(trans)
return ProtocolFactory()
class ThriftProtocol(object):
factories = [
('binary', make_protocol_factory(TBinaryProtocol)),
('json', make_protocol_factory(TJSONProtocol)),
('compact', make_protocol_factory(TCompactProtocol))
]
@classmethod
def iter(cls):
current = 0
while current < len(cls.factories):
yield cls.factories[current]
current += 1
@classmethod
def lookup_by_id(cls, protocol_id):
return (protocol_id, ) + cls.factories[protocol_id]
@classmethod
def lookup_by_name(cls, protocol_name):
for i in xrange(0, len(cls.factories)):
if cls.factories[i][0] == protocol_name:
return (i, ) + cls.factories[i]
return None
def __init__(self, protocol_name_or_id):
if isinstance(protocol_name_or_id, int):
protocol = self.lookup_by_id(protocol_name_or_id)
else:
protocol = self.lookup_by_name(protocol_name_or_id)
self.id, self.name, self.factory = protocol
default_protocol_factory = ThriftProtocol('binary').factory
class ThriftSerializer(Serializer):
def __init__(
self,
protocol_factory=default_protocol_factory,
**kwargs):
super(ThriftSerializer, self).__init__(**kwargs)
self.protocol_factory = protocol_factory
self.spec_factory = ThriftSpecFactory(self.model_registry)
def serialize(self, obj):
transport = TTransport.TMemoryBuffer()
protocol = self.protocol_factory.getProtocol(transport)
setattr(protocol, "serializer", self)
self.write_to_stream(obj, protocol)
transport._buffer.seek(0)
return transport._buffer.getvalue()
def deserialize(self, cls, stream):
obj = self.model_registry.lookup(cls)()
transport = TTransport.TMemoryBuffer()
transport._buffer.write(stream)
transport._buffer.seek(0)
protocol = self.protocol_factory.getProtocol(transport)
setattr(protocol, "serializer", self)
self.read_from_stream(obj, protocol)
return obj
def write_to_stream(self, obj, protocol):
return protocol.writeStruct(
obj,
self.spec_factory.get_spec_for_struct(
obj.__class__))
def read_from_stream(self, obj, protocol):
protocol.readStruct(
obj,
self.spec_factory.get_spec_for_struct(
obj.__class__))
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute API that proxies via Cells Service."""
from nova import availability_zones
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
from nova import exception
from nova.openstack.common import excutils
check_instance_state = compute_api.check_instance_state
wrap_check_policy = compute_api.wrap_check_policy
check_policy = compute_api.check_policy
check_instance_lock = compute_api.check_instance_lock
check_instance_cell = compute_api.check_instance_cell
class ComputeRPCAPIRedirect(object):
# NOTE(comstud): These are a list of methods where the cells_rpcapi
# and the compute_rpcapi methods have the same signatures. This
# is for transitioning to a common interface where we can just
# swap out the compute_rpcapi class with the cells_rpcapi class.
cells_compatible = ['start_instance', 'stop_instance',
'reboot_instance', 'suspend_instance',
'resume_instance']
def __init__(self, cells_rpcapi):
self.cells_rpcapi = cells_rpcapi
def __getattr__(self, key):
if key in self.cells_compatible:
return getattr(self.cells_rpcapi, key)
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class SchedulerRPCAPIRedirect(object):
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
def __getattr__(self, key):
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
class ConductorTaskRPCAPIRedirect(object):
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
def __getattr__(self, key):
def _noop_rpc_wrapper(*args, **kwargs):
return None
return _noop_rpc_wrapper
def build_instances(self, context, **kwargs):
self.cells_rpcapi.build_instances(context, **kwargs)
class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI):
"""Class used to substitute Compute RPC API that will proxy
via the cells manager to a compute manager in a child cell.
"""
def __init__(self, *args, **kwargs):
super(ComputeRPCProxyAPI, self).__init__(*args, **kwargs)
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def cast(self, ctxt, msg, topic=None, version=None):
self._set_version(msg, version)
topic = self._get_topic(topic)
self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic)
def call(self, ctxt, msg, topic=None, version=None, timeout=None):
self._set_version(msg, version)
topic = self._get_topic(topic)
return self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic,
call=True,
timeout=timeout)
class ComputeCellsAPI(compute_api.API):
def __init__(self, *args, **kwargs):
super(ComputeCellsAPI, self).__init__(*args, **kwargs)
self.cells_rpcapi = cells_rpcapi.CellsAPI()
# Avoid casts/calls directly to compute
self.compute_rpcapi = ComputeRPCAPIRedirect(self.cells_rpcapi)
# Redirect scheduler run_instance to cells.
self.scheduler_rpcapi = SchedulerRPCAPIRedirect(self.cells_rpcapi)
# Redirect conductor build_instances to cells
self._compute_task_api = ConductorTaskRPCAPIRedirect(self.cells_rpcapi)
self._cell_type = 'api'
def _cast_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance['uuid']
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
self.cells_rpcapi.cast_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _call_to_cells(self, context, instance, method, *args, **kwargs):
instance_uuid = instance['uuid']
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
return self.cells_rpcapi.call_compute_api_method(context, cell_name,
method, instance_uuid, *args, **kwargs)
def _check_requested_networks(self, context, requested_networks):
"""Override compute API's checking of this. It'll happen in
child cell
"""
return
def _validate_image_href(self, context, image_href):
"""Override compute API's checking of this. It'll happen in
child cell
"""
return
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None, image_id=None):
"""Backup the given instance."""
image_meta = super(ComputeCellsAPI, self).backup(context,
instance, name, backup_type, rotation,
extra_properties=extra_properties, image_id=image_id)
image_id = image_meta['id']
self._cast_to_cells(context, instance, 'backup', name,
backup_type=backup_type, rotation=rotation,
extra_properties=extra_properties, image_id=image_id)
return image_meta
def snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Snapshot the given instance."""
image_meta = super(ComputeCellsAPI, self).snapshot(context,
instance, name, extra_properties=extra_properties,
image_id=image_id)
image_id = image_meta['id']
self._cast_to_cells(context, instance, 'snapshot',
name, extra_properties=extra_properties, image_id=image_id)
return image_meta
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
for completeness.
"""
return super(ComputeCellsAPI, self).create(*args, **kwargs)
def update_state(self, context, instance, new_state):
"""Updates the state of a compute instance.
For example to 'active' or 'error'.
Also sets 'task_state' to None.
Used by admin_actions api
:param context: The security context
:param instance: The instance to update
:param new_state: A member of vm_state to change
the instance's state to,
eg. 'active'
"""
self.update(context, instance,
pass_on_state_change=True,
vm_state=new_state,
task_state=None)
def update(self, context, instance, pass_on_state_change=False, **kwargs):
"""
Update an instance.
:param pass_on_state_change: if true, the state change will be passed
on to child cells
"""
cell_name = instance['cell_name']
if cell_name and self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method='update')
rv = super(ComputeCellsAPI, self).update(context,
instance, **kwargs)
kwargs_copy = kwargs.copy()
if not pass_on_state_change:
# We need to skip vm_state/task_state updates... those will
# happen via a _cast_to_cells when running a different
# compute api method
kwargs_copy.pop('vm_state', None)
kwargs_copy.pop('task_state', None)
if kwargs_copy:
try:
self._cast_to_cells(context, instance, 'update',
**kwargs_copy)
except exception.InstanceUnknownCell:
pass
return rv
def _local_delete(self, context, instance, bdms):
# This will get called for every delete in the API cell
# because _delete() in compute/api.py will not find a
# service when checking if it's up.
# We need to only take action if there's no cell_name. Our
# overrides of delete() and soft_delete() will take care of
# the rest.
cell_name = instance['cell_name']
if not cell_name:
return super(ComputeCellsAPI, self)._local_delete(context,
instance, bdms)
def soft_delete(self, context, instance):
self._handle_cell_delete(context, instance,
super(ComputeCellsAPI, self).soft_delete, 'soft_delete')
def delete(self, context, instance):
self._handle_cell_delete(context, instance,
super(ComputeCellsAPI, self).delete, 'delete')
def _handle_cell_delete(self, context, instance, method, method_name):
"""Terminate an instance."""
# We can't use the decorator because we have special logic in the
# case we don't know the cell_name...
cell_name = instance['cell_name']
if cell_name and self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method_name)
method(context, instance)
try:
self._cast_to_cells(context, instance, method_name)
except exception.InstanceUnknownCell:
# If there's no cell, there's also no host... which means
# the instance was destroyed from the DB here. Let's just
# broadcast a message down to all cells and hope this ends
# up resolving itself... Worse case.. the instance will
# show back up again here.
delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
self.cells_rpcapi.instance_delete_everywhere(context,
instance, delete_type)
@check_instance_cell
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).restore(context, instance)
self._cast_to_cells(context, instance, 'restore')
@check_instance_cell
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
super(ComputeCellsAPI, self).force_delete(context, instance)
self._cast_to_cells(context, instance, 'force_delete')
@check_instance_cell
def rebuild(self, context, instance, *args, **kwargs):
"""Rebuild the given instance with the provided attributes."""
super(ComputeCellsAPI, self).rebuild(context, instance, *args,
**kwargs)
self._cast_to_cells(context, instance, 'rebuild', *args, **kwargs)
@check_instance_cell
def evacuate(self, context, instance, *args, **kwargs):
"""Evacuate the given instance with the provided attributes."""
super(ComputeCellsAPI, self).evacuate(context, instance, *args,
**kwargs)
self._cast_to_cells(context, instance, 'evacuate', *args, **kwargs)
@check_instance_state(vm_state=[vm_states.RESIZED])
@check_instance_cell
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
super(ComputeCellsAPI, self).revert_resize(context, instance)
self._cast_to_cells(context, instance, 'revert_resize')
@check_instance_state(vm_state=[vm_states.RESIZED])
@check_instance_cell
def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
super(ComputeCellsAPI, self).confirm_resize(context, instance)
self._cast_to_cells(context, instance, 'confirm_resize')
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
@check_instance_cell
def resize(self, context, instance, flavor_id=None, *args, **kwargs):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
super(ComputeCellsAPI, self).resize(context, instance,
flavor_id=flavor_id, *args,
**kwargs)
# NOTE(johannes): If we get to this point, then we know the
# specified flavor_id is valid and exists. We'll need to load
# it again, but that should be safe.
old_instance_type = flavors.extract_flavor(instance)
if not flavor_id:
new_instance_type = old_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
# NOTE(johannes): Later, when the resize is confirmed or reverted,
# the superclass implementations of those methods will need access
# to a local migration record for quota reasons. We don't need
# source and/or destination information, just the old and new
# flavors. Status is set to 'finished' since nothing else
# will update the status along the way.
self.db.migration_create(context.elevated(),
{'instance_uuid': instance['uuid'],
'old_instance_type_id': old_instance_type['id'],
'new_instance_type_id': new_instance_type['id'],
'status': 'finished'})
# FIXME(comstud): pass new instance_type object down to a method
# that'll unfold it
self._cast_to_cells(context, instance, 'resize', flavor_id=flavor_id,
*args, **kwargs)
@check_instance_cell
def add_fixed_ip(self, context, instance, *args, **kwargs):
"""Add fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).add_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'add_fixed_ip',
*args, **kwargs)
@check_instance_cell
def remove_fixed_ip(self, context, instance, *args, **kwargs):
"""Remove fixed_ip from specified network to given instance."""
super(ComputeCellsAPI, self).remove_fixed_ip(context, instance,
*args, **kwargs)
self._cast_to_cells(context, instance, 'remove_fixed_ip',
*args, **kwargs)
@check_instance_cell
def pause(self, context, instance):
"""Pause the given instance."""
super(ComputeCellsAPI, self).pause(context, instance)
self._cast_to_cells(context, instance, 'pause')
@check_instance_cell
def unpause(self, context, instance):
"""Unpause the given instance."""
super(ComputeCellsAPI, self).unpause(context, instance)
self._cast_to_cells(context, instance, 'unpause')
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
# FIXME(comstud): Cache this?
# Also: only calling super() to get state/policy checking
super(ComputeCellsAPI, self).get_diagnostics(context, instance)
return self._call_to_cells(context, instance, 'get_diagnostics')
@check_instance_cell
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
super(ComputeCellsAPI, self).rescue(context, instance,
rescue_password=rescue_password)
self._cast_to_cells(context, instance, 'rescue',
rescue_password=rescue_password)
@check_instance_cell
def unrescue(self, context, instance):
"""Unrescue the given instance."""
super(ComputeCellsAPI, self).unrescue(context, instance)
self._cast_to_cells(context, instance, 'unrescue')
@wrap_check_policy
@check_instance_cell
def shelve(self, context, instance):
"""Shelve the given instance."""
self._cast_to_cells(context, instance, 'shelve')
@wrap_check_policy
@check_instance_cell
def shelve_offload(self, context, instance):
"""Offload the shelved instance."""
super(ComputeCellsAPI, self).shelve_offload(context, instance)
self._cast_to_cells(context, instance, 'shelve_offload')
@wrap_check_policy
@check_instance_cell
def unshelve(self, context, instance):
"""Unshelve the given instance."""
super(ComputeCellsAPI, self).unshelve(context, instance)
self._cast_to_cells(context, instance, 'unshelve')
@check_instance_cell
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
super(ComputeCellsAPI, self).set_admin_password(context, instance,
password=password)
self._cast_to_cells(context, instance, 'set_admin_password',
password=password)
@check_instance_cell
def inject_file(self, context, instance, *args, **kwargs):
"""Write a file to the given instance."""
super(ComputeCellsAPI, self).inject_file(context, instance, *args,
**kwargs)
self._cast_to_cells(context, instance, 'inject_file', *args, **kwargs)
@wrap_check_policy
@check_instance_cell
def get_vnc_console(self, context, instance, console_type):
"""Get a url to a VNC Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_vnc_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance_uuid=instance['uuid'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@check_instance_cell
def get_spice_console(self, context, instance, console_type):
"""Get a url to a SPICE Console."""
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self._call_to_cells(context, instance,
'get_spice_connect_info', console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type, connect_info['host'],
connect_info['port'], connect_info['internal_access_path'],
instance_uuid=instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_cell
def get_console_output(self, context, instance, *args, **kwargs):
"""Get console output for an an instance."""
# NOTE(comstud): Calling super() just to get policy check
super(ComputeCellsAPI, self).get_console_output(context, instance,
*args, **kwargs)
return self._call_to_cells(context, instance, 'get_console_output',
*args, **kwargs)
def lock(self, context, instance):
"""Lock the given instance."""
super(ComputeCellsAPI, self).lock(context, instance)
self._cast_to_cells(context, instance, 'lock')
def unlock(self, context, instance):
"""Unlock the given instance."""
super(ComputeCellsAPI, self).lock(context, instance)
self._cast_to_cells(context, instance, 'unlock')
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
super(ComputeCellsAPI, self).reset_network(context, instance)
self._cast_to_cells(context, instance, 'reset_network')
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
super(ComputeCellsAPI, self).inject_network_info(context, instance)
self._cast_to_cells(context, instance, 'inject_network_info')
@wrap_check_policy
@check_instance_cell
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
return self._call_to_cells(context, instance, 'attach_volume',
volume_id, device)
@check_instance_cell
def _detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
self.volume_api.check_detach(context, volume)
self._cast_to_cells(context, instance, 'detach_volume',
volume)
@wrap_check_policy
@check_instance_cell
def associate_floating_ip(self, context, instance, address):
"""Makes calls to network_api to associate_floating_ip.
:param address: is a string floating ip address
"""
self._cast_to_cells(context, instance, 'associate_floating_ip',
address)
@check_instance_cell
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
super(ComputeCellsAPI, self).delete_instance_metadata(context,
instance, key)
self._cast_to_cells(context, instance, 'delete_instance_metadata',
key)
@wrap_check_policy
@check_instance_cell
def update_instance_metadata(self, context, instance,
metadata, delete=False):
rv = super(ComputeCellsAPI, self).update_instance_metadata(context,
instance, metadata, delete=delete)
try:
self._cast_to_cells(context, instance,
'update_instance_metadata',
metadata, delete=delete)
except exception.InstanceUnknownCell:
pass
return rv
@check_instance_cell
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
super(ComputeCellsAPI, self).live_migrate(context,
instance, block_migration, disk_over_commit, host_name)
self._cast_to_cells(context, instance, 'live_migrate',
block_migration, disk_over_commit, host_name)
def get_migrations(self, context, filters):
return self.cells_rpcapi.get_migrations(context, filters)
class HostAPI(compute_api.HostAPI):
"""HostAPI() class for cells.
Implements host management related operations. Works by setting the
RPC API used by the base class to proxy via the cells manager to the
compute manager in the correct cell. Hosts specified with cells will
need to be of the format 'path!to!cell@host'.
DB methods in the base class are also overridden to proxy via the
cells manager.
"""
def __init__(self):
super(HostAPI, self).__init__(rpcapi=ComputeRPCProxyAPI())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Cannot check this in API cell. This will be checked in the
target child cell.
"""
pass
def service_get_all(self, context, filters=None, set_zones=False):
if filters is None:
filters = {}
if 'availability_zone' in filters:
zone_filter = filters.pop('availability_zone')
set_zones = True
else:
zone_filter = None
services = self.cells_rpcapi.service_get_all(context,
filters=filters)
if set_zones:
services = availability_zones.set_availability_zones(context,
services)
if zone_filter is not None:
services = [s for s in services
if s['availability_zone'] == zone_filter]
return services
def service_get_by_compute_host(self, context, host_name):
return self.cells_rpcapi.service_get_by_compute_host(context,
host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""
Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
return self.cells_rpcapi.service_update(
context, host_name, binary, params_to_update)
def instance_get_all_by_host(self, context, host_name):
"""Get all instances by host. Host might have a cell prepended
to it, so we'll need to strip it out. We don't need to proxy
this call to cells, as we have instance information here in
the API cell.
"""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
instances = super(HostAPI, self).instance_get_all_by_host(context,
host_name)
if cell_name:
instances = [i for i in instances
if i['cell_name'] == cell_name]
return instances
def task_log_get_all(self, context, task_name, beginning, ending,
host=None, state=None):
"""Return the task logs within a given range from cells,
optionally filtering by the host and/or state. For cells, the
host should be a path like 'path!to!cell@host'. If no @host
is given, only task logs from a particular cell will be returned.
"""
return self.cells_rpcapi.task_log_get_all(context,
task_name,
beginning,
ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Get a compute node from a particular cell by its integer ID.
compute_id should be in the format of 'path!to!cell@ID'.
"""
return self.cells_rpcapi.compute_node_get(context, compute_id)
def compute_node_get_all(self, context):
return self.cells_rpcapi.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.cells_rpcapi.compute_node_get_all(context,
hypervisor_match=hypervisor_match)
def compute_node_statistics(self, context):
return self.cells_rpcapi.compute_node_stats(context)
class InstanceActionAPI(compute_api.InstanceActionAPI):
"""InstanceActionAPI() class for cells."""
def __init__(self):
super(InstanceActionAPI, self).__init__()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def actions_get(self, context, instance):
return self.cells_rpcapi.actions_get(context, instance)
def action_get_by_request_id(self, context, instance, request_id):
return self.cells_rpcapi.action_get_by_request_id(context, instance,
request_id)
def action_events_get(self, context, instance, action_id):
return self.cells_rpcapi.action_events_get(context, instance,
action_id)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Virtual batch normalization.
This technique was first introduced in `Improved Techniques for Training GANs`
(Salimans et al, https://arxiv.org/abs/1606.03498). Instead of using batch
normalization on a minibatch, it fixes a reference subset of the data to use for
calculating normalization statistics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
__all__ = [
'VBN',
]
def _static_or_dynamic_batch_size(tensor, batch_axis):
"""Returns the static or dynamic batch size."""
batch_size = array_ops.shape(tensor)[batch_axis]
static_batch_size = tensor_util.constant_value(batch_size)
return static_batch_size or batch_size
def _statistics(x, axes):
"""Calculate the mean and mean square of `x`.
Modified from the implementation of `tf.nn.moments`.
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
Returns:
Two `Tensor` objects: `mean` and `square mean`.
"""
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
shift = array_ops.stop_gradient(math_ops.reduce_mean(y, axes, keepdims=True))
shifted_mean = math_ops.reduce_mean(y - shift, axes, keepdims=True)
mean = shifted_mean + shift
mean_squared = math_ops.reduce_mean(math_ops.square(y), axes, keepdims=True)
mean = array_ops.squeeze(mean, axes)
mean_squared = array_ops.squeeze(mean_squared, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16),
math_ops.cast(mean_squared, dtypes.float16))
else:
return (mean, mean_squared)
def _validate_init_input_and_get_axis(reference_batch, axis):
"""Validate input and return the used axis value."""
if reference_batch.shape.ndims is None:
raise ValueError('`reference_batch` has unknown dimensions.')
ndims = reference_batch.shape.ndims
if axis < 0:
used_axis = ndims + axis
else:
used_axis = axis
if used_axis < 0 or used_axis >= ndims:
raise ValueError('Value of `axis` argument ' + str(used_axis) +
' is out of range for input with rank ' + str(ndims))
return used_axis
def _validate_call_input(tensor_list, batch_dim):
"""Verifies that tensor shapes are compatible, except for `batch_dim`."""
def _get_shape(tensor):
shape = tensor.shape.as_list()
del shape[batch_dim]
return shape
base_shape = tensor_shape.TensorShape(_get_shape(tensor_list[0]))
for tensor in tensor_list:
base_shape.assert_is_compatible_with(_get_shape(tensor))
class VBN(object):
"""A class to perform virtual batch normalization.
This technique was first introduced in `Improved Techniques for Training GANs`
(Salimans et al, https://arxiv.org/abs/1606.03498). Instead of using batch
normalization on a minibatch, it fixes a reference subset of the data to use
for calculating normalization statistics.
To do this, we calculate the reference batch mean and mean square, and modify
those statistics for each example. We use mean square instead of variance,
since it is linear.
Note that if `center` or `scale` variables are created, they are shared
between all calls to this object.
The `__init__` API is intended to mimic `tf.layers.batch_normalization` as
closely as possible.
"""
def __init__(self,
reference_batch,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
batch_axis=0):
"""Initialize virtual batch normalization object.
We precompute the 'mean' and 'mean squared' of the reference batch, so that
`__call__` is efficient. This means that the axis must be supplied when the
object is created, not when it is called.
We precompute 'square mean' instead of 'variance', because the square mean
can be easily adjusted on a per-example basis.
Args:
reference_batch: A minibatch tensors. This will form the reference data
from which the normalization statistics are calculated. See
https://arxiv.org/abs/1606.03498 for more details.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can
be disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the ops.
batch_axis: The axis of the batch dimension. This dimension is treated
differently in `virtual batch normalization` vs `batch normalization`.
Raises:
ValueError: If `reference_batch` has unknown dimensions at graph
construction.
ValueError: If `batch_axis` is the same as `axis`.
"""
axis = _validate_init_input_and_get_axis(reference_batch, axis)
self._epsilon = epsilon
self._beta = 0
self._gamma = 1
self._batch_axis = _validate_init_input_and_get_axis(
reference_batch, batch_axis)
if axis == self._batch_axis:
raise ValueError('`axis` and `batch_axis` cannot be the same.')
with variable_scope.variable_scope(name, 'VBN',
values=[reference_batch]) as self._vs:
self._reference_batch = reference_batch
# Calculate important shapes:
# 1) Reduction axes for the reference batch
# 2) Broadcast shape, if necessary
# 3) Reduction axes for the virtual batchnormed batch
# 4) Shape for optional parameters
input_shape = self._reference_batch.shape
ndims = input_shape.ndims
reduction_axes = list(range(ndims))
del reduction_axes[axis]
self._broadcast_shape = [1] * len(input_shape)
self._broadcast_shape[axis] = input_shape.dims[axis]
self._example_reduction_axes = list(range(ndims))
del self._example_reduction_axes[max(axis, self._batch_axis)]
del self._example_reduction_axes[min(axis, self._batch_axis)]
params_shape = self._reference_batch.shape[axis]
# Determines whether broadcasting is needed. This is slightly different
# than in the `nn.batch_normalization` case, due to `batch_dim`.
self._needs_broadcasting = (
sorted(self._example_reduction_axes) != list(range(ndims))[:-2])
# Calculate the sufficient statistics for the reference batch in a way
# that can be easily modified by additional examples.
self._ref_mean, self._ref_mean_squares = _statistics(
self._reference_batch, reduction_axes)
self._ref_variance = (self._ref_mean_squares -
math_ops.square(self._ref_mean))
# Virtual batch normalization uses a weighted average between example
# statistics and the reference batch statistics.
ref_batch_size = _static_or_dynamic_batch_size(
self._reference_batch, self._batch_axis)
self._example_weight = 1. / (
math_ops.cast(ref_batch_size, dtypes.float32) + 1.)
self._ref_weight = 1. - self._example_weight
# Make the variables, if necessary.
if center:
self._beta = variable_scope.get_variable(
name='beta',
shape=(params_shape,),
initializer=beta_initializer,
regularizer=beta_regularizer,
trainable=trainable)
if scale:
self._gamma = variable_scope.get_variable(
name='gamma',
shape=(params_shape,),
initializer=gamma_initializer,
regularizer=gamma_regularizer,
trainable=trainable)
def _virtual_statistics(self, inputs, reduction_axes):
"""Compute the statistics needed for virtual batch normalization."""
cur_mean, cur_mean_sq = _statistics(inputs, reduction_axes)
vb_mean = (self._example_weight * cur_mean +
self._ref_weight * self._ref_mean)
vb_mean_sq = (self._example_weight * cur_mean_sq +
self._ref_weight * self._ref_mean_squares)
return (vb_mean, vb_mean_sq)
def _broadcast(self, v, broadcast_shape=None):
# The exact broadcast shape depends on the current batch, not the reference
# batch, unless we're calculating the batch normalization of the reference
# batch.
b_shape = broadcast_shape or self._broadcast_shape
if self._needs_broadcasting and v is not None:
return array_ops.reshape(v, b_shape)
return v
def reference_batch_normalization(self):
"""Return the reference batch, but batch normalized."""
with ops.name_scope(self._vs.name):
return nn.batch_normalization(self._reference_batch,
self._broadcast(self._ref_mean),
self._broadcast(self._ref_variance),
self._broadcast(self._beta),
self._broadcast(self._gamma),
self._epsilon)
def __call__(self, inputs):
"""Run virtual batch normalization on inputs.
Args:
inputs: Tensor input.
Returns:
A virtual batch normalized version of `inputs`.
Raises:
ValueError: If `inputs` shape isn't compatible with the reference batch.
"""
_validate_call_input([inputs, self._reference_batch], self._batch_axis)
with ops.name_scope(self._vs.name, values=[inputs, self._reference_batch]):
# Calculate the statistics on the current input on a per-example basis.
vb_mean, vb_mean_sq = self._virtual_statistics(
inputs, self._example_reduction_axes)
vb_variance = vb_mean_sq - math_ops.square(vb_mean)
# The exact broadcast shape of the input statistic Tensors depends on the
# current batch, not the reference batch. The parameter broadcast shape
# is independent of the shape of the input statistic Tensor dimensions.
b_shape = self._broadcast_shape[:] # deep copy
b_shape[self._batch_axis] = _static_or_dynamic_batch_size(
inputs, self._batch_axis)
return nn.batch_normalization(
inputs,
self._broadcast(vb_mean, b_shape),
self._broadcast(vb_variance, b_shape),
self._broadcast(self._beta, self._broadcast_shape),
self._broadcast(self._gamma, self._broadcast_shape),
self._epsilon)
|
|
#----------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#----------------------------------------------------------------
#authors :
#---------
# Piumi Francois ([email protected]) software conception and development (engineer in bioinformatics)
# Jouneau Luc ([email protected]) software conception and development (engineer in bioinformatics)
# Gasselin Maxime ([email protected]) software user and data analysis (PhD student in Epigenetics)
# Perrier Jean-Philippe ([email protected]) software user and data analysis (PhD student in Epigenetics)
# Al Adhami Hala ([email protected]) software user and data analysis (postdoctoral researcher in Epigenetics)
# Jammes Helene ([email protected]) software user and data analysis (research group leader in Epigenetics)
# Kiefer Helene ([email protected]) software user and data analysis (principal invertigator in Epigenetics)
#
import os
import sys
from sys import argv
import re
from bx.intervals.intersection import IntervalTree,Interval
from gene import Gene
"""
the boundaries are not contained in the interval with bx module.
example :
ivt.insert_interval( Interval(10,20) )
ivt.insert_interval( Interval(20,30) )
print "result={0}".format(ivt.find(20,20))
result=[]
therefore, when we use find method, we always transform the target boundaries :
start boundary -1
end boundary +1
such as :
interval_tree.find(target_start-1,target_end+1))
"""
class Join_with_gtf :
debug=False
def __init__(self,theme) :
self.theme=theme
def verify_key_columns(self,file,header_elmts,keys) :
if type(keys[0]).__name__=="str" :
#Target keys are specified as column name : replace names with corresponding column numbers
for i in range(0,len(header_elmts)) :
for j in range(0,len(keys)) :
if keys[j] == header_elmts[i] :
keys[j] = i+1
#Verify all joining columns name has been found
for j in range(0,len(keys)) :
if type(keys[j]).__name__ == "str" :
#Column name not found
sys.exit(("Could not find joining column '{0}' "+ \
"in first line of file '{1}'. Exiting.").format(keys[j],file))
def log_already_completed(self,message,file_size,position) :
achieved=100*position/file_size
sys.stdout.write("\t{0} ... ({1}% completed)\r".format(
message,achieved
))
sys.stdout.flush()
##############################################################################################################
# read_referrence
##############################################################################################################
def read_reference(self,file_ref):
print "Read reference ..."
#get file size
file_size=os.stat(file_ref).st_size
#Parse GTF file
in_reference = open(file_ref)
genes={}
no_line = 0
current_position=0
for gtf_line in in_reference:
no_line = no_line + 1
current_position+=len(gtf_line)
#if (no_line< 1e5 and no_line % 1000 == 0) or (no_line<1e6 and no_line % 1e4 ==0) or (no_line>1e6 and no_line % 1e5 ==0) :
if no_line % 1e5 ==0 :
self.log_already_completed("{0} lines read from reference".format(no_line),file_size,current_position)
if re.match("^#.*$",gtf_line):
continue
gtf_line = gtf_line.rstrip('\r\n')
elmts = gtf_line.split('\t')
gene_chr=elmts[0]
gene_chr=gene_chr.lower().replace("chr","")
start=int(elmts[3])
end=int(elmts[4])
if Join_with_gtf.debug and gene_chr != '1' :
break
feature=elmts[2]
annot=elmts[8]
me=re.match('^gene_id "([^"]+)".*$',annot)
if me :
gene_id=me.group(1)
else :
#Feature not related to a gene_id
gene_id=""
#sys.exit("Unable to find gene_id value on line #{0} of file '{1}'. Exiting".format(no_line,file_ref))
if feature == "gene":
gene_start=start
gene_end=end
strand=elmts[6]
if strand == "-" : strand=-1
elif strand == '+' : strand=1
else: sys.exit("Unexpected strand value on line #{0} of file '{1}' : '{2}'. Exiting".format(no_line,file_ref,strand))
if gene_id not in genes :
gene=Gene(gene_id,gene_chr,gene_start,gene_end,strand)
genes[gene_id]=gene
else :
gene=genes[gene_id]
gene.set_location(gene_chr,gene_start,gene_end)
gene.set_strand(strand)
#gene start and end are defined in this line, therefore we can compute :
#tss, promoter and tss
self.features_found["promoter"]=1
self.features_found["tss"]=1
self.features_found["tts"]=1
self.features_found["gene"]=1
gene.gene_model_has_been_defined()
#elif feature not in("CDS","UTR","transcript") :
else :
if gene_id not in genes :
gene=Gene(gene_id,gene_chr)
genes[gene_id]=gene
else :
gene=genes[gene_id]
if feature == "start_codon" :
self.features_found["utr5"]=1
elif feature == "stop_codon" :
self.features_found["utr3"]=1
elif feature == "exon" :
self.features_found["exon"]=1
self.features_found["intron"]=1
else :
self.features_found[feature.lower()]=1
gene.add_feature(feature,start,end)
in_reference.close()
print "\n\t{0} lines read from reference in total.".format(no_line)
#Check that all features listed in configuration file has been found at least once
for feature in self.features_found :
if self.features_found[feature.lower()] == 0 :
sys.stderr.write(("Warning : feature named '{0}' found in 'feature_priorities' parameter. "+
"This feature has never been found in reference file '{1}'.\n").format(
feature, file_ref
))
#Complete feature_properties with the one found in gtf files but not requested by user
#Otherwise when we will try to order feature overlapping with a given region
#sorted(overlaps, key=lambda ovlp: self.feature_priorities[ ovlp.value[0] ])
#It will raise an exception.
for feature in self.features_found :
if feature.lower() not in self.feature_priorities :
self.feature_priorities[feature.lower()]=None
#define downstream/upstream boundaries
promoter_downstream= self.theme.get_parameter("promoter_downstream")
promoter_upstream= self.theme.get_parameter("promoter_upstream")
tss_downstream= self.theme.get_parameter("tss_downstream")
tss_upstream= self.theme.get_parameter("tss_upstream")
tts_downstream= self.theme.get_parameter("tts_downstream")
tts_upstream= self.theme.get_parameter("tts_upstream")
#print "promoter_upstream={0}".format(promoter_upstream)
#print "promoter_downstream={0}".format(promoter_downstream)
#print "tss_upstream={0}".format(tss_upstream)
#print "tss_downstream={0}".format(tss_downstream)
#print "tts_upstream={0}".format(tts_upstream)
#print "tts_downstream={0}".format(tts_downstream)
#Initialize dictionnaries
features={}
gene_boundaries={}
#Build gene model
print "Build gene model ..."
no_gene=0
for gene_id in genes :
gene=genes[gene_id]
(gene_chr,gene_start,gene_end)=gene.get_coordinates()
no_gene+=1
if no_gene % 1000 == 0 :
self.log_already_completed("{0} genes treated".format(no_gene),len(genes),no_gene)
if gene_chr not in features :
features[gene_chr]=IntervalTree()
gene_boundaries[gene_chr]=IntervalTree()
if gene.gene_model_is_defined() :
if gene_chr not in gene_boundaries :
gene_boundaries[gene_chr]=IntervalTree()
gene_boundaries[gene_chr].insert_interval( Interval(gene_start,gene_end, value=["gene",gene_id] ) )
#Promoter
if gene.strand == 1 :
(start,end)=gene.get_promoter(promoter_upstream,promoter_downstream)
else :
(start,end)=gene.get_promoter(promoter_downstream,promoter_upstream)
features[gene_chr].insert_interval( Interval(start,end, value=["promoter",gene_id] ) )
#5' UTR
(start,end)=gene.get_utr5()
if start is not None:
features[gene_chr].insert_interval( Interval(start,end, value=["utr5",gene_id] ) )
#TTS
if gene.strand == 1 :
(start,end)=gene.get_tss(tss_upstream,tss_downstream)
else :
(start,end)=gene.get_tss(tss_downstream,tss_upstream)
features[gene_chr].insert_interval( Interval(start,end, value=["tss",gene_id] ) )
#Intron / Exon
(intron_coords,exon_coords)=gene.get_introns_exons()
#Debug
#if gene.gene_id == "ENSBTAG00000000010" :
# print "gene_id '{0} / intron={1} / exon={2}".format(gene.gene_id,intron_coords,exon_coords)
for exon_coord in exon_coords :
(start,end)=exon_coord
features[gene_chr].insert_interval( Interval(start,end, value=["exon",gene_id] ) )
for intron_coord in intron_coords :
(start,end)=intron_coord
features[gene_chr].insert_interval( Interval(start,end, value=["intron",gene_id] ) )
#TTS
if gene.strand == 1 :
(start,end)=gene.get_tts(tts_upstream,tts_downstream)
else :
(start,end)=gene.get_tts(tts_downstream,tts_upstream)
features[gene_chr].insert_interval( Interval(start,end, value=["tts",gene_id] ) )
#3' UTR
(start,end)=gene.get_utr3()
if start is not None:
features[gene_chr].insert_interval( Interval(start,end, value=["utr3",gene_id] ) )
#Other features
for feature in gene.get_other_features() :
(start,end,feature)=feature
features[gene_chr].insert_interval( Interval(start,end, value=[feature,gene_id] ) )
print "\n\t{0} genes treated in total.".format(no_gene)
return (features,gene_boundaries)
##############################################################################################################
# run_annotation
##############################################################################################################
def run_annotation(self,file_in,file_out) :
theme=self.theme.get_name()
file_ref=self.theme.get_parameter("reference_file")
target_keys=self.theme.get_parameter("target_keys")
keep_scaffolds=self.theme.get_parameter("keep_scaffolds").lower()
if keep_scaffolds[0] != "n" :
keep_scaffolds=True
else :
keep_scaffolds=False
nb_max_results=self.theme.get_parameter("nb_max_results")
max_dist_nearest_gene=self.theme.get_parameter("max_dist_nearest_gene")
self.feature_priorities={}
self.features_found={}
no_priority=0
for gene_feature in self.theme.get_parameter("feature_priorities").split(","):
gene_feature=gene_feature.lower()
no_priority+=1
self.feature_priorities[gene_feature]=no_priority
self.features_found[gene_feature]=0
#there is always the feature "gene" if no other features are found
no_priority+=1
self.feature_priorities["gene"]=no_priority
#Read input file
target_is_region=False
if len(target_keys)!=2 :
target_is_region=True
no_scaffolds_filtered=0
#get file size
file_size=os.stat(file_in).st_size
try:
try:
in_file=open(file_in,"rt")
except IOError as exc:
sys.exit("Cannot open input file '{0}' : {1}".format(file_in,exc))
try:
out_file=open(file_out,"wt")
except IOError as exc:
sys.exit("Cannot open output file '{0}' : {1}".format(file_out,exc))
no_line=0
current_position=0
for line in in_file.readlines():
line=line.rstrip("\r\n")
current_position+=len(line)
no_line+=1
#if (no_line< 1e5 and no_line % 1000 == 0) or (no_line<1e6 and no_line % 1e4 ==0) or (no_line>1e6 and no_line % 1e5 ==0) :
if (no_line<1e6 and no_line % 1e4 ==0) or (no_line>1e6 and no_line % 1e5 ==0) :
self.log_already_completed("{0} lines read from target".format(no_line),file_size,current_position)
elmts = line.split("\t")
if no_line == 1 :
out_file.write(line)
empty_reference=""
if nb_max_results != 1 :
out_file.write("\t# overlap")
empty_reference+="\t"
out_file.write("\tGene ID\tDistance from target\tGene feature")
empty_reference+="\t\t"
self.verify_key_columns(file_in,elmts,target_keys)
#Everything is OK : read reference file now
try:
(features,gene_boundaries)=self.read_reference(file_ref)
except IOError as exc:
sys.exit("Cannot open reference file '{0}' : {1}".format(file_ref,exc))
out_file.write("\n")
nb_fields=len(elmts)
continue
if no_line == 2 :
print "Read target file"
#Verify this line has the same number of fields as in header
if len(elmts) != nb_fields:
sys.exit("Line #"+str(no_line)+"' in '"+file_in+"' contains "+str(len(elmts))+" fields "+
"while header line contains "+str(nb_fields)+" fields. Exiting."
)
if len(target_keys) == 1 :
localisation=elmts[target_keys[0]-1]
if localisation == "" :
#No localisation => no overlap possible
target_chr=""
target_start=""
target_end=""
else :
me=re.match("^(.*):([0-9]+)-([0-9]+)$",localisation)
if me is None :
me=re.match("^(.*):([0-9]+)$",localisation)
if me is None :
sys.exit("Could not interpret localisation '{0}' on line #{1} f target file {2}. Exiting.".format( \
elmts[target_keys[0]-1],no_line,file_in) \
)
target_chr=me.group(1)
target_start=int(me.group(2))
target_end=target_start
else :
target_chr=me.group(1)
target_start=int(me.group(2))
target_end=int(me.group(3))
else :
target_chr = elmts[target_keys[0]-1]
target_start = int(elmts[target_keys[1]-1])
if target_is_region :
target_end = int(elmts[target_keys[2]-1])
else :
target_end = target_start + 1
target_chr = target_chr.lower().replace("chr","")
if not re.match("^([0-9]+|[a-z]|mt)$",target_chr) and not keep_scaffolds :
no_scaffolds_filtered+=1
continue
if target_start != "" and target_end != "" :
if target_end < target_start:
sys.exit("End of region before start of region in '{0}' on line #{1}.".format(file_in,no_line))
target_length=(target_end-target_start+1)
#Find overlaps with gene features
overlaps=[]
if target_chr in features :
overlaps.extend(features[target_chr].find(target_start-1,target_end+1))
#Look if inside gene : may happen if gene has no other definition than gene_start and end (no exon features)
overlaps.extend(gene_boundaries[target_chr].find(target_start-1,target_end+1))
at_least_one_result_found=False
if len(overlaps)!=0 :
no_overlap=1
treated={}
for overlap in sorted(overlaps, key=lambda ovlp: self.feature_priorities[ ovlp.value[0].lower() ]) :
gene_feature=overlap.value[0]
if gene_feature == "gene" and len(overlaps) != 1 :
continue
if self.feature_priorities[gene_feature.lower()] is None :
#Not requested by user
continue
#At this step, we will output at least one result
at_least_one_result_found=True
gene_id=overlap.value[1]
#output only once for a couple of {gene_feature;gene_id}
if "{0}\t{1}".format(gene_id,gene_feature) in treated :
continue
treated["{0}\t{1}".format(gene_id,gene_feature)]=1
if nb_max_results!=1 :
out_file.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format(
line,
no_overlap,gene_id,0,gene_feature
))
else :
out_file.write("{0}\t{1}\t{2}\t{3}\n".format(
line,
gene_id,0,gene_feature
))
no_overlap+=1
if nb_max_results != -1 and no_overlap > nb_max_results :
break
if not at_least_one_result_found :
gene_id=min_dist=gene_feature=""
no_overlap=1
if target_chr in gene_boundaries :
#Look for nearest gene
next_upstream_gene=gene_boundaries[target_chr].upstream_of_interval(
Interval(target_start,target_end),max_dist=max_dist_nearest_gene
)
if len(next_upstream_gene) != 0 :
dist_upstream=target_start - next_upstream_gene[0].end
assert dist_upstream > 0, \
"Negative distance found between region {0}:{1}-{2} and next upstream gene '{3}' {0}:{4}-{5}.".format(
target_chr,target_start,target_end,
next_upstream_gene[0].value[1], next_upstream_gene[0].start, next_upstream_gene[0].end
)
else :
dist_upstream=None
next_downstream_gene=gene_boundaries[target_chr].downstream_of_interval(
Interval(target_start,target_end),max_dist=max_dist_nearest_gene
)
if len(next_downstream_gene) != 0 :
dist_downstream=next_downstream_gene[0].start - target_end
assert dist_downstream > 0, \
"Negative distance found between region {0}:{1}-{2} and next downstream gene '{3}' {0}:{4}-{5}.".format(
target_chr,target_start,target_end,
next_downstream_gene[0].value[1], next_downstream_gene[0].start, next_downstream_gene[0].end
)
else :
dist_downstream=None
if dist_upstream is not None and dist_downstream is not None :
if dist_upstream<dist_downstream :
gene_id=next_upstream_gene[0].value[1]
min_dist="+{0}".format(dist_upstream)
else :
gene_id=next_downstream_gene[0].value[1]
min_dist="-{0}".format(dist_downstream)
elif dist_upstream is not None :
gene_id=next_upstream_gene[0].value[1]
min_dist="+{0}".format(dist_upstream)
elif dist_downstream is not None :
gene_id=next_downstream_gene[0].value[1]
min_dist="-{0}".format(dist_downstream)
else :
no_overlap=""
#else :
#print "No chr '{0}' in gene_boundaries dictionnary.".format(target_chr)
if nb_max_results != 1 :
out_file.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format(
line,
no_overlap,gene_id,min_dist,gene_feature
))
else :
out_file.write("{0}\t{1}\t{2}\t{3}\n".format(
line,
gene_id,min_dist,gene_feature
))
in_file.close()
out_file.close()
print "\n\t{0} lines read from target in total.".format(no_line)
if no_scaffolds_filtered != 0 :
print "\t{0} lines not kept because keep_scaffolds is set to 'No'.".format(no_scaffolds_filtered)
except IOError as exc:
sys.exit("I/O error occured during annotation treatment : {1}".format(file_in,exc))
#if __name__ == "__main__":
# join=Join_with_gtf(None)
# join.read_reference("reference/Bos_taurus.UMD3.1.81.gtf",[1,4,5])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.