repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
yordan-desta/QgisIns | tests/src/python/test_qgspallabeling_base.py | 2 | 17646 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPalLabeling: base suite setup
From build dir, run: ctest -R PyQgsPalLabelingBase -V
See <qgis-src-dir>/tests/testdata/labeling/README.rst for description.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Larry Shaffer'
__date__ = '07/09/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import datetime
import glob
import shutil
import StringIO
import tempfile
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import (
QgsCoordinateReferenceSystem,
QgsDataSourceURI,
QgsMapLayerRegistry,
QgsMapRenderer,
QgsMapSettings,
QgsPalLabeling,
QgsPalLayerSettings,
QgsProviderRegistry,
QgsVectorLayer,
QgsRenderChecker
)
from utilities import (
getQgisTestApp,
TestCase,
unittest,
unitTestDataPath,
getTempfilePath,
renderMapToImage,
loadTestFonts,
getTestFont,
openInBrowserTab
)
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
FONTSLOADED = loadTestFonts()
PALREPORT = 'PAL_REPORT' in os.environ
PALREPORTS = {}
# noinspection PyPep8Naming,PyShadowingNames
class TestQgsPalLabeling(TestCase):
_TestDataDir = unitTestDataPath()
_PalDataDir = os.path.join(_TestDataDir, 'labeling')
_PalFeaturesDb = os.path.join(_PalDataDir, 'pal_features_v3.sqlite')
_TestFont = getTestFont() # Roman at 12 pt
""":type: QFont"""
_MapRegistry = None
""":type: QgsMapLayerRegistry"""
_MapRenderer = None
""":type: QgsMapRenderer"""
_MapSettings = None
""":type: QgsMapSettings"""
_Canvas = None
""":type: QgsMapCanvas"""
_Pal = None
""":type: QgsPalLabeling"""
_PalEngine = None
""":type: QgsLabelingEngineInterface"""
_BaseSetup = False
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# qgis instances
cls._QgisApp, cls._Canvas, cls._Iface, cls._Parent = \
QGISAPP, CANVAS, IFACE, PARENT
# verify that spatialite provider is available
msg = '\nSpatialite provider not found, SKIPPING TEST SUITE'
# noinspection PyArgumentList
res = 'spatialite' in QgsProviderRegistry.instance().providerList()
assert res, msg
cls._TestFunction = ''
cls._TestGroup = ''
cls._TestGroupPrefix = ''
cls._TestGroupAbbr = ''
cls._TestGroupCanvasAbbr = ''
cls._TestImage = ''
cls._TestMapSettings = None
cls._Mismatch = 0
cls._Mismatches = dict()
cls._ColorTol = 0
cls._ColorTols = dict()
# initialize class MapRegistry, Canvas, MapRenderer, Map and PAL
# noinspection PyArgumentList
cls._MapRegistry = QgsMapLayerRegistry.instance()
cls._MapRenderer = cls._Canvas.mapRenderer()
cls._MapSettings = cls.getBaseMapSettings()
osize = cls._MapSettings.outputSize()
cls._Canvas.resize(QSize(osize.width(), osize.height())) # necessary?
# set color to match render test comparisons background
cls._Canvas.setCanvasColor(cls._MapSettings.backgroundColor())
cls.setDefaultEngineSettings()
msg = ('\nCould not initialize PAL labeling engine, '
'SKIPPING TEST SUITE')
assert cls._PalEngine, msg
cls._BaseSetup = True
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def setUp(self):
"""Run before each test."""
TestQgsPalLabeling.setDefaultEngineSettings()
self.lyr = self.defaultLayerSettings()
@classmethod
def setDefaultEngineSettings(cls):
"""Restore default settings for pal labelling"""
cls._Pal = QgsPalLabeling()
cls._MapRenderer.setLabelingEngine(cls._Pal)
cls._PalEngine = cls._MapRenderer.labelingEngine()
@classmethod
def removeAllLayers(cls):
cls._MapRegistry.removeAllMapLayers()
cls._MapSettings.setLayers([])
@classmethod
def removeMapLayer(cls, layer):
if layer is None:
return
lyr_id = layer.id()
cls._MapRegistry.removeMapLayer(lyr_id)
ms_layers = cls._MapSettings.layers()
""":type: QStringList"""
if ms_layers.contains(lyr_id):
ms_layers.removeAt(ms_layers.indexOf(lyr_id))
cls._MapSettings.setLayers(ms_layers)
@classmethod
def getTestFont(cls):
return QFont(cls._TestFont)
@classmethod
def loadFeatureLayer(cls, table, chk=False):
if chk and cls._MapRegistry.mapLayersByName(table):
return
uri = QgsDataSourceURI()
uri.setDatabase(cls._PalFeaturesDb)
uri.setDataSource('', table, 'geometry')
vlayer = QgsVectorLayer(uri.uri(), table, 'spatialite')
# .qml should contain only style for symbology
vlayer.loadNamedStyle(os.path.join(cls._PalDataDir,
'{0}.qml'.format(table)))
# qDebug('render_lyr = {0}'.format(repr(vlayer)))
cls._MapRegistry.addMapLayer(vlayer)
# place new layer on top of render stack
render_lyrs = [vlayer.id()]
render_lyrs.extend(cls._MapSettings.layers())
# qDebug('render_lyrs = {0}'.format(repr(render_lyrs)))
cls._MapSettings.setLayers(render_lyrs)
# zoom to aoi
cls._MapSettings.setExtent(cls.aoiExtent())
cls._Canvas.zoomToFullExtent()
return vlayer
@classmethod
def aoiExtent(cls):
"""Area of interest extent, which matches output aspect ratio"""
uri = QgsDataSourceURI()
uri.setDatabase(cls._PalFeaturesDb)
uri.setDataSource('', 'aoi', 'geometry')
aoilayer = QgsVectorLayer(uri.uri(), 'aoi', 'spatialite')
return aoilayer.extent()
@classmethod
def getBaseMapSettings(cls):
"""
:rtype: QgsMapSettings
"""
ms = QgsMapSettings()
crs = QgsCoordinateReferenceSystem()
""":type: QgsCoordinateReferenceSystem"""
# default for labeling test data: WGS 84 / UTM zone 13N
crs.createFromSrid(32613)
ms.setBackgroundColor(QColor(152, 219, 249))
ms.setOutputSize(QSize(420, 280))
ms.setOutputDpi(72)
ms.setFlag(QgsMapSettings.Antialiasing, True)
ms.setFlag(QgsMapSettings.UseAdvancedEffects, False)
ms.setFlag(QgsMapSettings.ForceVectorOutput, False) # no caching?
ms.setDestinationCrs(crs)
ms.setCrsTransformEnabled(False)
ms.setMapUnits(crs.mapUnits()) # meters
ms.setExtent(cls.aoiExtent())
return ms
def cloneMapSettings(self, oms):
"""
:param QgsMapSettings oms: Other QgsMapSettings
:rtype: QgsMapSettings
"""
ms = QgsMapSettings()
ms.setBackgroundColor(oms.backgroundColor())
ms.setOutputSize(oms.outputSize())
ms.setOutputDpi(oms.outputDpi())
ms.setFlags(oms.flags())
ms.setDestinationCrs(oms.destinationCrs())
ms.setCrsTransformEnabled(oms.hasCrsTransformEnabled())
ms.setMapUnits(oms.mapUnits())
ms.setExtent(oms.extent())
ms.setOutputImageFormat(oms.outputImageFormat())
ms.setLayers(oms.layers())
return ms
def configTest(self, prefix, abbr):
"""Call in setUp() function of test subclass"""
self._TestGroupPrefix = prefix
self._TestGroupAbbr = abbr
# insert test's Class.function marker into debug output stream
# this helps visually track down the start of a test's debug output
testid = self.id().split('.')
self._TestGroup = testid[1]
self._TestFunction = testid[2]
testheader = '\n#####_____ {0}.{1} _____#####\n'.\
format(self._TestGroup, self._TestFunction)
qDebug(testheader)
# define the shorthand name of the test (to minimize file name length)
self._Test = '{0}_{1}'.format(self._TestGroupAbbr,
self._TestFunction.replace('test_', ''))
def defaultLayerSettings(self):
lyr = QgsPalLayerSettings()
lyr.enabled = True
lyr.fieldName = 'text' # default in test data sources
font = self.getTestFont()
font.setPointSize(32)
lyr.textFont = font
lyr.textNamedStyle = 'Roman'
return lyr
@staticmethod
def settingsDict(lyr):
"""Return a dict of layer-level labeling settings
.. note:: QgsPalLayerSettings is not a QObject, so we can not collect
current object properties, and the public properties of the C++ obj
can't be listed with __dict__ or vars(). So, we sniff them out relative
to their naming convention (camelCase), as reported by dir().
"""
res = {}
for attr in dir(lyr):
if attr[0].islower() and not attr.startswith("__"):
value = getattr(lyr, attr)
if not callable(value):
res[attr] = value
return res
def controlImagePath(self, grpprefix=''):
if not grpprefix:
grpprefix = self._TestGroupPrefix
return os.path.join(self._TestDataDir, 'control_images',
'expected_' + grpprefix,
self._Test, self._Test + '.png')
def saveControlImage(self, tmpimg=''):
# don't save control images for RenderVsOtherOutput (Vs) tests, since
# those control images belong to a different test result
if ('PAL_CONTROL_IMAGE' not in os.environ
or 'Vs' in self._TestGroup):
return
imgpath = self.controlImagePath()
testdir = os.path.dirname(imgpath)
if not os.path.exists(testdir):
os.makedirs(testdir)
imgbasepath = \
os.path.join(testdir,
os.path.splitext(os.path.basename(imgpath))[0])
# remove any existing control images
for f in glob.glob(imgbasepath + '.*'):
if os.path.exists(f):
os.remove(f)
qDebug('Control image for {0}.{1}'.format(self._TestGroup,
self._TestFunction))
if not tmpimg:
# TODO: this can be deprecated, when per-base-test-class rendering
# in checkTest() is verified OK for all classes
qDebug('Rendering control to: {0}'.format(imgpath))
ms = self._MapSettings # class settings
""":type: QgsMapSettings"""
settings_type = 'Class'
if self._TestMapSettings is not None:
ms = self._TestMapSettings # per test settings
settings_type = 'Test'
qDebug('MapSettings type: {0}'.format(settings_type))
img = renderMapToImage(ms, parallel=False)
""":type: QImage"""
tmpimg = getTempfilePath('png')
if not img.save(tmpimg, 'png'):
os.unlink(tmpimg)
raise OSError('Control not created for: {0}'.format(imgpath))
if tmpimg and os.path.exists(tmpimg):
qDebug('Copying control to: {0}'.format(imgpath))
shutil.copyfile(tmpimg, imgpath)
else:
raise OSError('Control not copied to: {0}'.format(imgpath))
def renderCheck(self, mismatch=0, colortol=0, imgpath='', grpprefix=''):
"""Check rendered map canvas or existing image against control image
:mismatch: number of pixels different from control, and still valid
:colortol: maximum difference for each color component including alpha
:imgpath: existing image; if present, skips rendering canvas
:grpprefix: compare test image/rendering against different test group
"""
if not grpprefix:
grpprefix = self._TestGroupPrefix
ctl_path = self.controlImagePath(grpprefix)
if not os.path.exists(ctl_path):
raise OSError('Missing control image: {0}'.format(ctl_path))
chk = QgsRenderChecker()
chk.setControlPathPrefix('expected_' + grpprefix)
chk.setControlName(self._Test)
chk.setColorTolerance(colortol)
ms = self._MapSettings # class settings
if self._TestMapSettings is not None:
ms = self._TestMapSettings # per test settings
chk.setMapSettings(ms)
# noinspection PyUnusedLocal
res = False
if imgpath:
res = chk.compareImages(self._Test, mismatch, str(imgpath))
else:
res = chk.runTest(self._Test, mismatch)
if PALREPORT and not res: # don't report ok checks
testname = self._TestGroup + ' . ' + self._Test
PALREPORTS[testname] = str(chk.report().toLocal8Bit())
msg = '\nRender check failed for "{0}"'.format(self._Test)
return res, msg
def checkTest(self, **kwargs):
"""Intended to be overridden in subclasses"""
pass
class TestPALConfig(TestQgsPalLabeling):
@classmethod
def setUpClass(cls):
TestQgsPalLabeling.setUpClass()
cls.layer = TestQgsPalLabeling.loadFeatureLayer('point')
def setUp(self):
"""Run before each test."""
self.configTest('pal_base', 'base')
def tearDown(self):
"""Run after each test."""
pass
def test_default_pal_disabled(self):
# Verify PAL labeling is disabled for layer by default
palset = self.layer.customProperty('labeling', '').toString()
msg = '\nExpected: Empty string\nGot: {0}'.format(palset)
self.assertEqual(palset, '', msg)
def test_settings_enable_pal(self):
# Verify default PAL settings enable PAL labeling for layer
lyr = QgsPalLayerSettings()
lyr.writeToLayer(self.layer)
palset = self.layer.customProperty('labeling', '').toString()
msg = '\nExpected: Empty string\nGot: {0}'.format(palset)
self.assertEqual(palset, 'pal', msg)
def test_layer_pal_activated(self):
# Verify, via engine, that PAL labeling can be activated for layer
lyr = self.defaultLayerSettings()
lyr.writeToLayer(self.layer)
msg = '\nLayer labeling not activated, as reported by labelingEngine'
self.assertTrue(self._PalEngine.willUseLayer(self.layer), msg)
def test_write_read_settings(self):
# Verify written PAL settings are same when read from layer
# load and write default test settings
lyr1 = self.defaultLayerSettings()
lyr1dict = self.settingsDict(lyr1)
# print lyr1dict
lyr1.writeToLayer(self.layer)
# read settings
lyr2 = QgsPalLayerSettings()
lyr2.readFromLayer(self.layer)
lyr2dict = self.settingsDict(lyr1)
# print lyr2dict
msg = '\nLayer settings read not same as settings written'
self.assertDictEqual(lyr1dict, lyr2dict, msg)
def test_default_partials_labels_enabled(self):
# Verify ShowingPartialsLabels is enabled for PAL by default
pal = QgsPalLabeling()
self.assertTrue(pal.isShowingPartialsLabels())
def test_partials_labels_activate(self):
pal = QgsPalLabeling()
# Enable partials labels
pal.setShowingPartialsLabels(True)
self.assertTrue(pal.isShowingPartialsLabels())
def test_partials_labels_deactivate(self):
pal = QgsPalLabeling()
# Disable partials labels
pal.setShowingPartialsLabels(False)
self.assertFalse(pal.isShowingPartialsLabels())
# noinspection PyPep8Naming,PyShadowingNames
def runSuite(module, tests):
"""This allows for a list of test names to be selectively run.
Also, ensures unittest verbose output comes at end, after debug output"""
loader = unittest.defaultTestLoader
if 'PAL_SUITE' in os.environ:
if tests:
suite = loader.loadTestsFromNames(tests, module)
else:
raise Exception(
"\n\n####__ 'PAL_SUITE' set, but no tests specified __####\n")
else:
suite = loader.loadTestsFromModule(module)
verb = 2 if 'PAL_VERBOSE' in os.environ else 0
out = StringIO.StringIO()
res = unittest.TextTestRunner(stream=out, verbosity=verb).run(suite)
if verb:
print '\nIndividual test summary:'
print '\n' + out.getvalue()
out.close()
if PALREPORTS:
teststamp = 'PAL Test Report: ' + \
datetime.datetime.now().strftime('%Y-%m-%d %X')
report = '<html><head><title>{0}</title></head><body>'.format(teststamp)
report += '\n<h2>Failed Tests: {0}</h2>'.format(len(PALREPORTS))
for k, v in PALREPORTS.iteritems():
report += '\n<h3>{0}</h3>\n{1}'.format(k, v)
report += '</body></html>'
tmp = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
tmp.write(report)
tmp.close()
openInBrowserTab('file://' + tmp.name)
return res
if __name__ == '__main__':
# NOTE: unless PAL_SUITE env var is set all test class methods will be run
# ex: 'TestGroup(Point|Line|Curved|Polygon|Feature).test_method'
suite = [
'TestPALConfig.test_write_read_settings'
]
res = runSuite(sys.modules[__name__], suite)
sys.exit(not res.wasSuccessful())
| gpl-2.0 | -1,152,539,646,844,711,000 | 34.793103 | 80 | 0.620197 | false |
rljacobson/Guru | guru/WorksheetController.py | 1 | 43950 | #Needs to:
# Manage the notebook/worksheet object.
# Communicate to the javascript in the WebView object.
# Open and save .sws files.
# Delete the internal notebook object on disk when done.
import os
try:
# simplejson is faster, so try to import it first
import simplejson as json
except ImportError:
import json
from PySide.QtCore import (QObject, SIGNAL, Slot, Signal)
from sagenb.notebook.notebook import Notebook
from sagenb.notebook.misc import encode_response
from sagenb.misc.misc import (unicode_str, walltime)
from guru.globals import GURU_PORT, GURU_USERNAME, guru_notebook
from guru.ServerConfigurations import ServerConfigurations
import guru.SageProcessManager as SageProcessManager
worksheet_commands = {}
dirty_commands = [
"eval",
"introspect"
]
class WorksheetController(QObject):
#Class variables
notebook = guru_notebook
worksheet_count = 0 #Reference counting.
def __init__(self, webViewController):
super(WorksheetController, self).__init__()
WorksheetController.worksheet_count += 1
self.thingy = 0
self.webview_controller = webViewController
#Set up the Python-javascript bridge.
self.webFrame = self.webview_controller.webView().page().mainFrame()
self.connect(self.webFrame, SIGNAL("javaScriptWindowObjectCleared()"), self.addJavascriptBridge)
self.request_values = None
self.isDirty = False
#Sanity check.
if guru_notebook is None:
raise RuntimeError
self.notebook_username = GURU_USERNAME
self._worksheet = None
self.init_updates()
self.server_configuration = None
#When we use a Notebook Server, we want to be able to access resources on the server
#from the webview. In order to do so, we need to set a session cookie. We set this
#cookie on loadStarted()
self.connect(self.webFrame, SIGNAL('loadFinished(bool)'), self.setSessionCookie)
@staticmethod
def withNewWorksheet(webViewController, server=None):
# server is a Sage server configuration that will determine the Sage process this
# worksheet will use.
if not server:
server = ServerConfigurations.getDefault()
wsc = WorksheetController(webViewController)
wsc.server_configuration = server
ws = guru_notebook.create_new_worksheet('Untitled', wsc.notebook_username)
SageProcessManager.setWorksheetProcessServer(ws, server)
wsc.setWorksheet(ws)
return wsc
@staticmethod
def withWorksheetFile(webViewController, filename, server=None):
if not server:
server = ServerConfigurations.getDefault()
wsc = WorksheetController(webViewController)
wsc.server_configuration = server
ws = wsc.notebook.import_worksheet(filename, wsc.notebook_username)
SageProcessManager.setWorksheetProcessServer(ws, server)
wsc.setWorksheet(ws)
return wsc
def useServerConfiguration(self, server_config):
self.server_configuration = server_config
new_worksheet = SageProcessManager.setWorksheetProcessServer(self._worksheet, server_config)
#If we are switching TO a Notebook Server, we want to be able to access resources on the
#server from the browser page.
self.setSessionCookie()
#There are some instances when we have to swap out worksheets, i.e., when switching FROM a
#notebook server.
if self._worksheet is not new_worksheet:
self._worksheet._notebook.delete_worksheet(self._worksheet.filename())
self._worksheet = new_worksheet
#Open the worksheet in the webView
self.webFrame.setUrl(self.worksheetUrl())
def setWorksheet(self, worksheet):
# This "opens" the worksheet in the webview as if it were a new file.
#Check that the worksheet we were given has the notebook setup correctly.
if (not hasattr(worksheet, "_notebook")) or (worksheet._notebook is None):
worksheet._notebook = guru_notebook
self._worksheet = worksheet
#Handle the dirty status of the worksheet.
self.isDirty = False
#Open the worksheet in the webView
self.webFrame.setUrl(self.worksheetUrl())
def addJavascriptBridge(self):
#This method is called whenever new content is loaded into the webFrame.
#Each time this happens, we need to reconnect the Python-javascript bridge.
self.webFrame.addToJavaScriptWindowObject("Guru", self)
def setSessionCookie(self):
cookie_string = SageProcessManager.getSessionCookie(self.server_configuration)
if cookie_string:
javascript = "document.cookie='%s';" % cookie_string
print javascript
self.webFrame.evaluateJavaScript(javascript)
@Slot(str)
def asyncRequest(self, token):
#This is the counterpart to sagenb.async_request() in sagenb.js.
#The original sagenb.async_request() made an ajax request. We can
#significantly improve UI performance by calling this python method
#directly and bypassing the Flask server.
# Sometimes the worksheet is deleted before the webview is GCed.
if self._worksheet is None:
return
if True:
#Handle the command ourselves.
javascript = "Guru.requests['%s']['url'];" % token
url = self.webFrame.evaluateJavaScript(javascript)
javascript = "encode_response(Guru.requests['%s']['postvars']);" % token
postvars = self.webFrame.evaluateJavaScript(javascript)
if postvars:
self.request_values = json.loads(postvars);
#The url encodes the command. They look like:
# url = "/home/admin/0/worksheet_properties"
print "URL: %s" % url
command = url.split("/")[-1]
# Check and see if the operation will make the worksheet dirty. If so, emit a "dirty" signal.
if command in dirty_commands:
self.isDirty = True
self.emit(SIGNAL("dirty(bool)"), True)
if self.server_configuration["type"] == "local":
result = worksheet_commands[command](self, self._worksheet)
elif self.server_configuration["type"] == "notebook server":
try:
result = SageProcessManager.remoteCommand(self._worksheet, (command, self.request_values))
except Exception as e:
#This signal may be emitted over and over, so the owner of this WorksheetController
#needs to take care of handling only the first error signal (or whatever).
#self.emit(SIGNAL("remoteCommandError(str)"), "Error executing remote command:\n%s"%e.message)
return
elif self.server_configuration["type"] == "cell server":
pass
self.sendResultToPage(result, token)
else:
#Let the Sage Notebook Server handle the request as usual.
javascript = "sagenb.guru_async_request_fall_through('%s');" % token
self.webFrame.evaluateJavaScript(javascript)
def sendResultToPage(self, result, token):
#Because we encode result in a javascript string literal, we need
#to format the string as follows.
result_string = repr(result)
if result_string.startswith("u'"):
result_string = result_string[2:-1]
else:
result_string = result_string[1:-1]
#Now give the result back to the page.
self.webview_controller.putAjaxConsole("result: " + result + "\n")
javascript = "Guru.requests['%s']['callback']('success', '%s');" % (token, result_string)
self.webFrame.evaluateJavaScript(javascript)
javascript = "delete Guru.requests['%s'];" % token
self.webFrame.evaluateJavaScript(javascript)
@Slot(str)
def putAjaxConsole(self, text):
self.webview_controller.putAjaxConsole(text + "\n")
def cleanup(self):
#This method is called when this WorksheetController instance is about
#to be eligible for garbage collection.
WorksheetController.worksheet_count -= 1
if self._worksheet is not None:
SageProcessManager.stopSageProcess(self._worksheet, create_new=False)
#Now remove the worksheet.
self._worksheet._notebook.delete_worksheet(self._worksheet.filename())
self._worksheet = None
def saveWorksheet(self, file_name):
#Write out the worksheet to filename, overwriting if necessary.
if os.path.exists(file_name):
os.remove(file_name) #This may be unnecessary.
if self.server_configuration["type"] == "local":
self.worksheet_download(self._worksheet, file_name)
elif self.server_configuration["type"] == "notebook server":
SageProcessManager.saveRemoteWorksheet(self._worksheet, file_name)
elif self.server_configuration["type"] == "cell server":
pass
#The worksheet is no longer dirty.
self.isDirty = False
self.emit(SIGNAL("dirty(bool)"), False)
def worksheetUrl(self):
if self._worksheet is None:
return ''
#There is probably a better way to do this.
url_vars = {'port' : GURU_PORT, 'name': self._worksheet.filename()}
url = "http://localhost:%(port)s/home/%(name)s/" % url_vars
return url
def getTitle(self):
return self._worksheet.name()
########### FILE MENU WORKSHEET COMMANDS ###########
def evaluateAll(self):
javascript = "sagenb.worksheetapp.worksheet.evaluate_all()"
self.webFrame.evaluateJavaScript(javascript)
def interrupt(self):
javascript = "sagenb.worksheetapp.worksheet.interrupt()"
self.webFrame.evaluateJavaScript(javascript)
def hideAllOutput(self):
javascript = "sagenb.worksheetapp.worksheet.hide_all_output()"
self.webFrame.evaluateJavaScript(javascript)
def showAllOutput(self):
javascript = "sagenb.worksheetapp.worksheet.show_all_output()"
self.webFrame.evaluateJavaScript(javascript)
def deleteAllOutput(self):
javascript = "sagenb.worksheetapp.worksheet.delete_all_output()"
self.webFrame.evaluateJavaScript(javascript)
def restartWorksheet(self):
javascript = "sagenb.worksheetapp.worksheet.restart_sage()"
self.webFrame.evaluateJavaScript(javascript)
def typesetOutput(self, enabled):
#set_pretty_print takes a lowercase string.
if enabled:
self._worksheet.set_pretty_print('true')
else:
self._worksheet.set_pretty_print('false')
########### FLASK SERVER WORKSHEET COMMANDS ###########
def worksheet_command(target):
#This decorator registers the command as a command that the worksheet controller
#knows how to handle.
def decorator(f):
#Register the worksheet command.
worksheet_commands[target] = f
#We will need to take care of commands with multiple arguments.
def wrapper(*args, **kwds):
return f(*args, **kwds)
return wrapper
return decorator
def get_cell_id(self):
"""
Returns the cell ID from the request.
We cast the incoming cell ID to an integer, if it's possible.
Otherwise, we treat it as a string.
"""
try:
return int(self.request_values['id'])
except ValueError:
return self.request_values['id']
@worksheet_command('rename')
def worksheet_rename(self, worksheet):
worksheet.set_name(self.request_values['name'])
return 'done'
@worksheet_command('alive')
def worksheet_alive(self, worksheet):
return str(worksheet.state_number())
@worksheet_command('system/<system>')
def worksheet_system(self, worksheet, system):
worksheet.set_system(system)
return 'success'
@worksheet_command('pretty_print/<enable>')
def worksheet_pretty_print(self, worksheet, enable):
worksheet.set_pretty_print(enable)
return 'success'
@worksheet_command('conf')
def worksheet_conf(self, worksheet):
return str(worksheet.conf())
########################################################
# Save a worksheet
########################################################
@worksheet_command('save')
def worksheet_save(self, worksheet):
"""
Save the contents of a worksheet after editing it in plain-text
edit mode.
"""
if 'button_save' in request.form:
E = self.request_values['textfield']
worksheet.edit_save(E)
worksheet.record_edit(self.notebook_username)
return redirect(url_for_worksheet(worksheet))
@worksheet_command('save_snapshot')
def worksheet_save_snapshot(self, worksheet):
"""Save a snapshot of a worksheet."""
worksheet.save_snapshot(self.notebook_username)
return 'saved'
@worksheet_command('save_and_quit')
def worksheet_save_and_quit(self, worksheet):
"""Save a snapshot of a worksheet then quit it. """
worksheet.save_snapshot(self.notebook_username)
worksheet.quit()
return 'saved'
#XXX: Redundant due to the above?
@worksheet_command('save_and_close')
def worksheet_save_and_close(self, worksheet):
"""Save a snapshot of a worksheet then quit it. """
worksheet.save_snapshot(self.notebook_username)
worksheet.quit()
return 'saved'
@worksheet_command('discard_and_quit')
def worksheet_discard_and_quit(self, worksheet):
"""Quit the worksheet, discarding any changes."""
worksheet.revert_to_last_saved_state()
worksheet.quit()
return 'saved' #XXX: Should this really be saved?
@worksheet_command('revert_to_last_saved_state')
def worksheet_revert_to_last_saved_state(self, worksheet):
worksheet.revert_to_last_saved_state()
return 'reverted'
########################################################
# Worksheet properties
########################################################
@worksheet_command('worksheet_properties')
def worksheet_properties(self, worksheet):
"""
Send worksheet properties as a JSON object
"""
r = worksheet.basic()
if worksheet.has_published_version():
hostname = request.headers.get('host', self.notebook.interface + ':' + str(self.notebook.port))
r['published_url'] = 'http%s://%s/home/%s' % ('' if not self.notebook.secure else 's',
hostname,
worksheet.published_version().filename())
return encode_response(r)
########################################################
# Used in refreshing the cell list
########################################################
@worksheet_command('cell_properties')
def worksheet_cell_properties(self, worksheet):
"""
Return the cell with the given id as a JSON object
"""
id = self.get_cell_id()
return encode_response(worksheet.get_cell_with_id(id).basic())
@worksheet_command('cell_list')
def worksheet_cell_list(self, worksheet):
"""
Return a list of cells in JSON format.
"""
r = {}
r['state_number'] = worksheet.state_number()
r['cell_list'] = [c.basic() for c in worksheet.cell_list()]
return encode_response(r)
########################################################
# Set output type of a cell
########################################################
@worksheet_command('set_cell_output_type')
def worksheet_set_cell_output_type(self, worksheet):
"""
Set the output type of the cell.
This enables the type of output cell, such as to allowing wrapping
for output that is very long.
"""
id = self.get_cell_id()
type = self.request_values['type']
worksheet.get_cell_with_id(id).set_cell_output_type(type)
return ''
########################################################
#Cell creation
########################################################
@worksheet_command('new_cell_before')
def worksheet_new_cell_before(self, worksheet):
"""Add a new cell before a given cell."""
r = {}
r['id'] = id = self.get_cell_id()
input = unicode_str(self.request_values.get('input', ''))
cell = worksheet.new_cell_before(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(div_wrap=False)
return encode_response(r)
@worksheet_command('new_text_cell_before')
def worksheet_new_text_cell_before(self, worksheet):
"""Add a new text cell before a given cell."""
r = {}
r['id'] = id = self.get_cell_id()
input = unicode_str(self.request_values.get('input', ''))
cell = worksheet.new_text_cell_before(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(editing=True)
# XXX: Does editing correspond to TinyMCE? If so, we should try
# to centralize that code.
return encode_response(r)
@worksheet_command('new_cell_after')
def worksheet_new_cell_after(self, worksheet):
"""Add a new cell after a given cell."""
r = {}
r['id'] = id = self.get_cell_id()
input = unicode_str(self.request_values.get('input', ''))
cell = worksheet.new_cell_after(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(div_wrap=True)
return encode_response(r)
@worksheet_command('new_text_cell_after')
def worksheet_new_text_cell_after(self, worksheet):
"""Add a new text cell after a given cell."""
r = {}
r['id'] = id = self.get_cell_id()
input = unicode_str(self.request_values.get('input', ''))
cell = worksheet.new_text_cell_after(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(editing=True)
# XXX: Does editing correspond to TinyMCE? If so, we should try
# to centralize that code.
return encode_response(r)
########################################################
# Cell deletion
########################################################
@worksheet_command('delete_cell')
def worksheet_delete_cell(self, worksheet):
"""
Deletes a worksheet cell, unless there's only one compute cell
left. This allows functions which evaluate relative to existing
cells, e.g., inserting a new cell, to continue to work.
"""
r = {}
r['id'] = id = self.get_cell_id()
if len(worksheet.compute_cell_id_list()) <= 1:
r['command'] = 'ignore'
else:
prev_id = worksheet.delete_cell_with_id(id)
r['command'] = 'delete'
r['prev_id'] = worksheet.delete_cell_with_id(id)
r['cell_id_list'] = worksheet.cell_id_list()
return encode_response(r)
@worksheet_command('delete_cell_output')
def worksheet_delete_cell_output(self, worksheet):
"""Delete's a cell's output."""
r = {}
r['id'] = id = self.get_cell_id()
worksheet.get_cell_with_id(id).delete_output()
r['command'] = 'delete_output'
return encode_response(r)
########################################################
# Evaluation and cell update
########################################################
@worksheet_command('eval')
def worksheet_eval(self, worksheet):
"""
Evaluate a worksheet cell.
If the request is not authorized (the requester did not enter the
correct password for the given worksheet), then the request to
evaluate or introspect the cell is ignored.
If the cell contains either 1 or 2 question marks at the end (not
on a comment line), then this is interpreted as a request for
either introspection to the documentation of the function, or the
documentation of the function and the source code of the function
respectively.
"""
r = {}
r['id'] = id = self.get_cell_id()
cell = worksheet.get_cell_with_id(id)
public = worksheet.tags().get('_pub_', [False])[0] #this is set in pub_worksheet
if public and not cell.is_interactive_cell():
r['command'] = 'error'
r['message'] = 'Cannot evaluate non-interactive public cell with ID %r.' % id
return encode_response(r)
worksheet.increase_state_number()
if public:
# Make public input cells read-only.
input_text = cell.input_text()
else:
input_text = unicode_str(self.request_values.get('input', '')).replace('\r\n', '\n') #DOS
# Handle an updated / recomputed interact. TODO: JSON encode
# the update data.
if 'interact' in self.request_values:
r['interact'] = 1
input_text = INTERACT_UPDATE_PREFIX
variable = self.request_values.get('variable', '')
if variable!='':
adapt_number = int(self.request_values.get('adapt_number', -1))
value = self.request_values.get('value', '')
input_text += "\n_interact_.update('%s', '%s', %s, _interact_.standard_b64decode('%s'), globals())" % (id, variable, adapt_number, value)
if int(self.request_values.get('recompute', 0)):
input_text += "\n_interact_.recompute('%s')" % id
cell.set_input_text(input_text)
if int(self.request_values.get('save_only', '0')):
self.notebook_updates()
return encode_response(r)
elif int(self.request_values.get('text_only', '0')):
self.notebook_updates()
r['cell_html'] = cell.html()
return encode_response(r)
cell.evaluate(username=self.notebook_username)
new_cell = int(self.request_values.get('newcell', 0)) #whether to insert a new cell or not
if new_cell:
new_cell = worksheet.new_cell_after(id)
r['command'] = 'insert_cell'
r['new_cell_id'] = new_cell.id()
r['new_cell_html'] = new_cell.html(div_wrap=False)
else:
r['next_id'] = cell.next_compute_id()
self.notebook_updates()
return encode_response(r)
@worksheet_command('cell_update')
def worksheet_cell_update(self, worksheet):
import time
r = {}
r['id'] = id = self.get_cell_id()
# update the computation one "step".
worksheet.check_comp()
# now get latest status on our cell
r['status'], cell = worksheet.check_cell(id)
if r['status'] == 'd':
r['new_input'] = cell.changed_input_text()
r['output_html'] = cell.output_html()
# Update the log.
t = time.strftime('%Y-%m-%d at %H:%M',
time.localtime(time.time()))
H = "Worksheet '%s' (%s)\n" % (worksheet.name(), t)
H += cell.edit_text(ncols=self.notebook.HISTORY_NCOLS, prompts=False,
max_out=self.notebook.HISTORY_MAX_OUTPUT)
self.notebook.add_to_user_history(H, self.notebook_username)
else:
r['new_input'] = ''
r['output_html'] = ''
r['interrupted'] = cell.interrupted()
if 'Unhandled SIGSEGV' in cell.output_text(raw=True).split('\n'):
r['interrupted'] = 'restart'
print 'Segmentation fault detected in output!'
r['output'] = cell.output_text(html=True)
r['output_wrapped'] = cell.output_text(self.notebook.conf()['word_wrap_cols'])
r['introspect_output'] = cell.introspect_output()
# Compute 'em, if we got 'em.
worksheet.start_next_comp()
return encode_response(r)
########################################################
# Cell introspection
########################################################
@worksheet_command('introspect')
def worksheet_introspect(self, worksheet):
"""
Cell introspection. This is called when the user presses the tab
key in the browser in order to introspect.
"""
r = {}
r['id'] = id = self.get_cell_id()
if worksheet.tags().get('_pub_', [False])[0]: #tags set in pub_worksheet
r['command'] = 'error'
r['message'] = 'Cannot evaluate public cell introspection.'
return encode_response(r)
before_cursor = self.request_values.get('before_cursor', '')
after_cursor = self.request_values.get('after_cursor', '')
cell = worksheet.get_cell_with_id(id)
cell.evaluate(introspect=[before_cursor, after_cursor])
r['command'] = 'introspect'
return encode_response(r)
########################################################
# Edit the entire worksheet
########################################################
@worksheet_command('edit')
def worksheet_edit(self, worksheet):
"""
Return a window that allows the user to edit the text of the
worksheet with the given filename.
"""
return render_template(os.path.join("html", "worksheet_edit.html"),
worksheet = worksheet,
username = self.notebook_username)
########################################################
# Plain text log view of worksheet
########################################################
@worksheet_command('text')
def worksheet_text(self, worksheet):
"""
Return a window that allows the user to edit the text of the
worksheet with the given filename.
"""
from cgi import escape
plain_text = worksheet.plain_text(prompts=True, banner=False)
plain_text = escape(plain_text).strip()
return render_template(os.path.join("html", "worksheet_text.html"),
username = self.notebook_username,
plain_text = plain_text)
########################################################
# Copy a worksheet
########################################################
@worksheet_command('copy')
def worksheet_copy(self, worksheet):
copy = self.notebook.copy_worksheet(worksheet, self.notebook_username)
if 'no_load' in self.request_values:
return ''
else:
return redirect(url_for_worksheet(copy))
########################################################
# Get a copy of a published worksheet and start editing it
########################################################
@worksheet_command('edit_published_page')
def worksheet_edit_published_page(self, worksheet):
## if user_type(self.username) == 'guest':
## return current_app.message('You must <a href="/">login first</a> in order to edit this worksheet.')
ws = worksheet.worksheet_that_was_published()
if ws.owner() == self.notebook_username:
W = ws
else:
W = self.notebook.copy_worksheet(worksheet, self.notebook_username)
W.set_name(worksheet.name())
return redirect(url_for_worksheet(W))
########################################################
# Collaborate with others
########################################################
@worksheet_command('invite_collab')
def worksheet_invite_collab(self, worksheet):
owner = worksheet.owner()
id_number = worksheet.id_number()
old_collaborators = set(worksheet.collaborators())
collaborators = set([u.strip() for u in self.request_values.get('collaborators', '').split(',') if u!=owner])
if len(collaborators-old_collaborators)>500:
# to prevent abuse, you can't add more than 500 collaborators at a time
return current_app.message(_("Error: can't add more than 500 collaborators at a time"), cont=url_for_worksheet(worksheet))
worksheet.set_collaborators(collaborators)
user_manager = self.notebook.user_manager()
# add worksheet to new collaborators
for u in collaborators-old_collaborators:
try:
user_manager.user(u).viewable_worksheets().add((owner, id_number))
except KeyError:
# user doesn't exist
pass
# remove worksheet from ex-collaborators
for u in old_collaborators-collaborators:
try:
user_manager.user(u).viewable_worksheets().discard((owner, id_number))
except KeyError:
# user doesn't exist
pass
return ''
########################################################
# Revisions
########################################################
# TODO take out or implement
@worksheet_command('revisions')
def worksheet_revisions(self, worksheet):
"""
Show a list of revisions of this worksheet.
"""
if 'action' not in self.request_values:
if 'rev' in self.request_values:
return self.notebook.html_specific_revision(self.notebook_username, worksheet,
self.request_values['rev'])
else:
return self.notebook.html_worksheet_revision_list(self.notebook_username, worksheet)
else:
rev = self.request_values['rev']
action = self.request_values['action']
if action == 'revert':
import bz2
worksheet.save_snapshot(self.notebook_username)
#XXX: Requires access to filesystem
txt = bz2.decompress(open(worksheet.get_snapshot_text_filename(rev)).read())
worksheet.delete_cells_directory()
worksheet.edit_save(txt)
return redirect(url_for_worksheet(worksheet))
elif action == 'publish':
import bz2
W = self.notebook.publish_worksheet(worksheet, self.notebook_username)
txt = bz2.decompress(open(worksheet.get_snapshot_text_filename(rev)).read())
W.delete_cells_directory()
W.edit_save(txt)
return redirect(url_for_worksheet(W))
else:
return current_app.message(_('Error'))
########################################################
# Cell directories
########################################################
@worksheet_command('cells/<path:filename>')
def worksheet_cells(self, worksheet, filename):
#XXX: This requires that the worker filesystem be accessible from
#the server.
from flask.helpers import send_from_directory
return send_from_directory(worksheet.cells_directory(), filename)
##############################################
# Data
##############################################
@worksheet_command('data/<path:filename>')
def worksheed_data_folder(self, worksheet, filename):
dir = os.path.abspath(worksheet.data_directory())
if not os.path.exists(dir):
return make_response(_('No data file'), 404)
else:
from flask.helpers import send_from_directory
return send_from_directory(worksheet.data_directory(), filename)
@worksheet_command('delete_datafile')
def worksheet_delete_datafile(self, worksheet):
dir = os.path.abspath(worksheet.data_directory())
filename = self.request_values['name']
path = os.path.join(dir, filename)
os.unlink(path)
return ''
@worksheet_command('edit_datafile/<path:filename>')
def worksheet_edit_datafile(self, worksheet, filename):
ext = os.path.splitext(filename)[1].lower()
file_is_image, file_is_text = False, False
text_file_content = ""
path = "/home/%s/data/%s" % (worksheet.filename(), filename)
if ext in ['.png', '.jpg', '.gif']:
file_is_image = True
if ext in ['.txt', '.tex', '.sage', '.spyx', '.py', '.f', '.f90', '.c']:
file_is_text = True
text_file_content = open(os.path.join(worksheet.data_directory(), filename)).read()
return render_template(os.path.join("html", "datafile_edit.html"),
worksheet = worksheet,
username = self.notebook_username,
filename_ = filename,
file_is_image = file_is_image,
file_is_text = file_is_text,
text_file_content = text_file_content,
path = path)
@worksheet_command('save_datafile')
def worksheet_save_datafile(self, worksheet):
filename = self.request_values['filename']
if 'button_save' in self.request_values:
text_field = self.request_values['textfield']
dest = os.path.join(worksheet.data_directory(), filename) #XXX: Requires access to filesystem
if os.path.exists(dest):
os.unlink(dest)
open(dest, 'w').write(text_field)
print 'saving datafile, redirect'
return redirect(url_for_worksheet(worksheet))
# @worksheet_command('link_datafile')
# def worksheet_link_datafile(self, worksheet):
# target_worksheet_filename = self.request_values['target']
# data_filename = self.request_values['filename']
# src = os.path.abspath(os.path.join(
# worksheet.data_directory(), data_filename))
# target_ws = self.notebook.get_worksheet_with_filename(target_worksheet_filename)
# target = os.path.abspath(os.path.join(
# target_ws.data_directory(), data_filename))
# if target_ws.owner() != self.notebook_username and not target_ws.is_collaborator(self.notebook_username):
# return current_app.message(_("illegal link attempt!"), worksheet_datafile.url_for(worksheet, name=data_filename))
# if os.path.exists(target):
# return current_app.message(_("The data filename already exists in other worksheet\nDelete the file in the other worksheet before creating a link."), worksheet_datafile.url_for(worksheet, name=data_filename))
# os.link(src,target)
# return redirect(worksheet_datafile.url_for(worksheet, name=data_filename))
# #return redirect(url_for_worksheet(target_ws) + '/datafile?name=%s'%data_filename) #XXX: Can we not hardcode this?
@worksheet_command('upload_datafile')
def worksheet_upload_datafile(self, worksheet):
from werkzeug.utils import secure_filename
file = request.files['file']
name = self.request_values.get('name', '').strip() or file.filename
name = secure_filename(name)
#XXX: disk access
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
file.save(dest)
return ''
@worksheet_command('datafile_from_url')
def worksheet_datafile_from_url(self, worksheet):
from werkzeug.utils import secure_filename
name = self.request_values.get('name', '').strip()
url = self.request_values.get('url', '').strip()
if url and not name:
name = url.split('/')[-1]
name = secure_filename(name)
import urllib2
from urlparse import urlparse
# we normalize the url by parsing it first
parsedurl = urlparse(url)
if not parsedurl[0] in ('http','https','ftp'):
return _('URL must start with http, https, or ftp.')
download = urllib2.urlopen(parsedurl.geturl())
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
import re
matches = re.match("file://(?:localhost)?(/.+)", url)
if matches:
f = file(dest, 'wb')
f.write(open(matches.group(1)).read())
f.close()
return ''
with open(dest, 'w') as f:
f.write(download.read())
return ''
@worksheet_command('new_datafile')
def worksheet_new_datafile(self, worksheet):
from werkzeug.utils import secure_filename
name = self.request_values.get('new', '').strip()
name = secure_filename(name)
#XXX: disk access
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
open(dest, 'w').close()
return ''
################################
#Publishing
################################
@worksheet_command('publish')
def worksheet_publish(self, worksheet):
"""
This provides a frontend to the management of worksheet
publication. This management functionality includes
initializational of publication, re-publication, automated
publication when a worksheet saved, and ending of publication.
"""
if 'publish_on' in self.request_values:
self.notebook.publish_worksheet(worksheet, self.notebook_username)
if 'publish_off' in self.request_values and worksheet.has_published_version():
self.notebook.delete_worksheet(worksheet.published_version().filename())
if 'auto_on' in self.request_values:
worksheet.set_auto_publish(True)
if 'auto_off' in self.request_values:
worksheet.set_auto_publish(False)
if 'is_auto' in self.request_values:
return str(worksheet.is_auto_publish())
if 'republish' in self.request_values:
self.notebook.publish_worksheet(worksheet, self.notebook_username)
return ''
############################################
# Ratings
############################################
# @worksheet_command('rating_info')
# def worksheet_rating_info(worksheet):
# return worksheet.html_ratings_info()
# @worksheet_command('rate')
# def worksheet_rate(worksheet):
# ## if user_type(self.username) == "guest":
# ## return HTMLResponse(stream = message(
# ## 'You must <a href="/">login first</a> in order to rate this worksheet.', ret))
# rating = int(self.request_values['rating'])
# if rating < 0 or rating >= 5:
# return current_app.messge("Gees -- You can't fool the rating system that easily!",
# url_for_worksheet(worksheet))
# comment = self.request_values['comment']
# worksheet.rate(rating, comment, self.notebook_username)
# s = """
# Thank you for rating the worksheet <b><i>%s</i></b>!
# You can <a href="rating_info">see all ratings of this worksheet.</a>
# """%(worksheet.name())
# #XXX: Hardcoded url
# return current_app.message(s.strip(), '/pub/', title=u'Rating Accepted')
########################################################
# Downloading, moving around, renaming, etc.
########################################################
@worksheet_command('download/<path:title>')
def worksheet_download(self, worksheet, filename):
try:
#XXX: Accessing the hard disk.
self.notebook.export_worksheet(worksheet.filename(), filename)
except KeyError:
print 'No such worksheet.'
@worksheet_command('restart_sage')
def worksheet_restart_sage(self, worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.restart_sage()
return 'done'
@worksheet_command('quit_sage')
def worksheet_quit_sage(self, worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.quit()
return 'done'
@worksheet_command('interrupt')
def worksheet_interrupt(self, worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.sage().interrupt()
return 'failed' if worksheet.sage().is_computing() else 'success'
@worksheet_command('hide_all')
def worksheet_hide_all(self, worksheet):
worksheet.hide_all()
return 'success'
@worksheet_command('show_all')
def worksheet_show_all(self, worksheet):
worksheet.show_all()
return 'success'
@worksheet_command('delete_all_output')
def worksheet_delete_all_output(self, worksheet):
try:
worksheet.delete_all_output(self.notebook_username)
except ValueError:
return 'fail'
else:
return 'success'
@worksheet_command('print')
def worksheet_print(self, worksheet):
#XXX: We might want to separate the printing template from the
#regular html template.
return self.notebook.html(worksheet.filename(), do_print=True)
#######################################################
# Jmol Popup
#######################################################
#@ws.route('/home/<username>/<id>/jmol_popup.html', methods=['GET'])
#@login_required
def jmol_popup(username, id):
return render_template(os.path.join('html', 'jmol_popup.html'))
############################
# Notebook autosave.
############################
# save if make a change to notebook and at least some seconds have elapsed since last save.
def init_updates(self):
self.save_interval = self.notebook.conf()['save_interval']
self.idle_interval = self.notebook.conf()['idle_check_interval']
self.last_save_time = walltime()
self.last_idle_time = walltime()
def notebook_save_check(self):
t = walltime()
if t > self.last_save_time + self.save_interval:
with global_lock:
# if someone got the lock before we did, they might have saved,
# so we check against the last_save_time again
# we don't put the global_lock around the outer loop since we don't need
# it unless we are actually thinking about saving.
if t > self.last_save_time + self.save_interval:
self.notebook.save()
self.last_save_time = t
def notebook_idle_check(self):
t = walltime()
if t > self.last_idle_time + self.idle_interval:
if t > self.last_idle_time + self.idle_interval:
self.notebook.update_worksheet_processes()
self.notebook.quit_idle_worksheet_processes()
self.last_idle_time = t
def notebook_updates(self):
self.notebook_save_check()
#Guru does not quit idle worksheet processes.
#self.notebook_idle_check() | mit | 3,556,050,928,706,078,700 | 38.453321 | 221 | 0.574881 | false |
marguerite/susews-planet | libs/jinja/jinja2/ext.py | 5 | 21054 | # -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from collections import deque
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.environment import get_spontaneous_environment
from jinja2.runtime import Undefined, concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup, next
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(object):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
__metaclass__ = ExtensionRegistry
#: if this extension parses this is the list of tags it's listening to.
tags = set()
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(context, string):
return context.resolve('gettext')(string)
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract
)
def _install(self, translations):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self.environment.globals.update(gettext=gettext, ngettext=ngettext)
def _install_null(self):
self.environment.globals.update(
gettext=lambda x: x,
ngettext=lambda s, p, n: (n != 1 and (p,) or (s,))[0]
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, basestring):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
plural_expr = var
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
# no variables referenced? no need to escape
if not referenced:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
if variables:
variables = nodes.Dict([nodes.Pair(nodes.Const(x, lineno=lineno), y)
for x, y in variables.items()])
else:
variables = None
node = self._make_node(singular, plural, variables, plural_expr)
node.set_lineno(lineno)
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr):
"""Generates a useful node from the data provided."""
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# mark the return value as safe if we are in an
# environment with autoescaping turned on
if self.environment.autoescape:
node = nodes.MarkSafe(node)
if variables:
node = nodes.Mod(node, variables)
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, basestring):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
environment = get_spontaneous_environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
str(options.get('trim_blocks', TRIM_BLOCKS)).lower() in \
('1', 'on', 'yes', 'true'),
NEWLINE_SEQUENCE, frozenset(extensions),
# fill with defaults so that environments are shared
# with other spontaneus environments. The rest of the
# arguments are optimizer, undefined, finalize, autoescape,
# loader, cache size, auto reloading setting and the
# bytecode cache
True, Undefined, None, False, None, 0, False, None
)
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError, e:
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
| gpl-2.0 | -3,613,898,787,326,348,300 | 38.649718 | 80 | 0.61176 | false |
en0/Supybot_sasl | plugins/Owner/test.py | 6 | 4002 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.conf as conf
import supybot.plugin as plugin
class OwnerTestCase(PluginTestCase):
# Defaults, but hey, I'm cool.
plugins = ('Owner', 'Config', 'Misc', 'Admin')
def testHelpLog(self):
self.assertHelp('help log')
def testSrcAmbiguity(self):
self.assertError('capability add foo bar')
def testIrcquote(self):
self.assertResponse('ircquote PRIVMSG %s :foo' % self.irc.nick, 'foo')
def testFlush(self):
self.assertNotError('flush')
def testUpkeep(self):
self.assertNotError('upkeep')
def testLoad(self):
self.assertError('load Owner')
self.assertError('load owner')
self.assertNotError('load Channel')
self.assertNotError('list Owner')
def testReload(self):
self.assertError('reload Channel')
self.assertNotError('load Channel')
self.assertNotError('reload Channel')
self.assertNotError('reload Channel')
def testUnload(self):
self.assertError('unload Foobar')
self.assertNotError('load Channel')
self.assertNotError('unload Channel')
self.assertError('unload Channel')
self.assertNotError('load Channel')
self.assertNotError('unload CHANNEL')
def testDisable(self):
self.assertError('disable enable')
self.assertError('disable identify')
def testEnable(self):
self.assertError('enable enable')
def testEnableIsCaseInsensitive(self):
self.assertNotError('disable Foo')
self.assertNotError('enable foo')
def testRename(self):
self.assertError('rename Admin ignore IGNORE')
self.assertError('rename Admin ignore ig-nore')
self.assertNotError('rename Admin "capability remove" rmcap')
self.assertNotRegexp('list Admin', 'capability remove')
self.assertRegexp('list Admin', 'rmcap')
self.assertNotError('reload Admin')
self.assertNotRegexp('list Admin', 'capability remove')
self.assertRegexp('list Admin', 'rmcap')
self.assertNotError('unrename Admin')
self.assertRegexp('list Admin', 'capability remove')
self.assertNotRegexp('list Admin', 'rmcap')
def testDefaultPluginErrorsWhenCommandNotInPlugin(self):
self.assertError('defaultplugin foobar owner')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | -6,746,866,300,462,572,000 | 38.623762 | 79 | 0.710895 | false |
mirrorcoloured/slcypi | HAT_Python3/Adafruit_I2C.py | 1 | 5520 | #!/usr/bin/python
import re
import smbus
# ===========================================================================
# Adafruit_I2C Class
# ===========================================================================
class Adafruit_I2C(object):
@staticmethod
def getPiRevision():
"Gets the version number of the Raspberry Pi board"
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
try:
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, assume revision 0 like older code for compatibility.
return 0
except:
return 0
@staticmethod
def getPiI2CBusNumber():
# Gets the I2C bus number /dev/i2c#
return 1 if Adafruit_I2C.getPiRevision() > 1 else 0
def __init__(self, address, busnum=-1, debug=False):
self.address = address
# By default, the correct I2C bus is auto-detected using /proc/cpuinfo
# Alternatively, you can hard-code the bus version below:
# self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's)
# self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's)
self.bus = smbus.SMBus(busnum if busnum >= 0 else Adafruit_I2C.getPiI2CBusNumber())
self.debug = debug
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or long (32-bit) value"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
print("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def write8(self, reg, value):
"Writes an 8-bit value to the specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
if self.debug:
print("I2C: Wrote 0x%02X to register 0x%02X" % (value, reg))
except(IOError, err):
return self.errMsg()
def write16(self, reg, value):
"Writes a 16-bit value to the specified register/address pair"
try:
self.bus.write_word_data(self.address, reg, value)
if self.debug:
print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" % (value, reg, reg+1))
except(IOError, err):
return self.errMsg()
def writeRaw8(self, value):
"Writes an 8-bit value on the bus"
try:
self.bus.write_byte(self.address, value)
if self.debug:
print("I2C: Wrote 0x%02X" % value)
except(IOError, err):
return self.errMsg()
def writeList(self, reg, list):
"Writes an array of bytes using I2C format"
try:
if self.debug:
print("I2C: Writing list to register 0x%02X:" % reg)
print(list)
self.bus.write_i2c_block_data(self.address, reg, list)
except(IOError, err):
return self.errMsg()
def readList(self, reg, length):
"Read a list of bytes from the I2C device"
try:
results = self.bus.read_i2c_block_data(self.address, reg, length)
if self.debug:
print ("I2C: Device 0x%02X returned the following from reg 0x%02X" %
(self.address, reg))
print(results)
return results
except(IOError, err):
return self.errMsg()
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except(IOError, err):
return self.errMsg()
def readS8(self, reg):
"Reads a signed byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127: result -= 256
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except(IOError, err):
return self.errMsg()
def readU16(self, reg, little_endian=True):
"Reads an unsigned 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
# Swap bytes if using big endian because read_word_data assumes little
# endian on ARM (little endian) systems.
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
if (self.debug):
print("I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" %
(self.address, result & 0xFFFF, reg))
return result
except(IOError, err):
return self.errMsg()
def readS16(self, reg, little_endian=True):
"Reads a signed 16-bit value from the I2C device"
try:
result = self.readU16(reg,little_endian)
if result > 32767: result -= 65536
return result
except(IOError, err):
return self.errMsg()
if __name__ == '__main__':
try:
bus = Adafruit_I2C(address=0)
print("Default I2C bus is accessible")
except:
print("Error accessing default I2C bus")
| mit | -7,110,954,138,778,199,000 | 33.285714 | 94 | 0.608333 | false |
hsuchie4/TACTIC | src/pyasm/prod/command/code_update.py | 6 | 4127 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['BaseCodeUpdate', 'AssetCodeUpdate', 'TextureCodeUpdate',
'TemplateCodeUpdate']
import os, shutil
from pyasm.command import *
from pyasm.search import *
from pyasm.common import Environment
from pyasm.checkin import FileCheckin
from pyasm.biz import Pipeline, Snapshot, Template, File
from pyasm.prod.biz import *
class BaseCodeUpdate(DatabaseAction):
'''Provides the next code following a naming convention'''
'''This class supports the naming convention:
< asset_category >< unique_code >
'''
def get_naming(my):
raise CommandException("override this to return a naming scheme")
def execute(my):
if not my.sobject.is_insert():
return
# use naming to figure out the code for this asset
naming = my.get_naming()
code = naming.get_next_code(my.sobject)
my.sobject.set_value("code", code)
class TemplateCodeUpdate(BaseCodeUpdate):
def get_naming(my):
return TemplateCodeNaming()
class AssetCodeUpdate(BaseCodeUpdate):
SEARCH_TYPE = "prod/asset"
def get_naming(my):
return AssetCodeNaming()
def get_default_code(my):
return "prod-asset_default"
def execute(my):
if not my.sobject.is_insert():
return
if ProdSetting.get_value_by_key("use_name_as_asset_code") == "true":
name = my.sobject.get_value("name")
my.sobject.set_value("code", name)
else:
super(AssetCodeUpdate,my).execute()
class TextureCodeUpdate(BaseCodeUpdate):
'''Provides the next asset code following a naming convention'''
'''This class supports the naming convention:
<asset_code>_###_<asset_context>
'''
def get_naming(my):
return TextureCodeNaming()
"""
def execute(my):
# register the trigger on renaming files
#print "registering"
#Trigger.register(my,"UploadAction")
pass
def handle_rename_files(my):
'''called by trigger'''
return
files = my.command.files
sobject = my.command.sobject
asset_code = sobject.get_value("asset_code")
code = sobject.get_value("code")
# if there is no code yet, then the extract the code from the filename
is_code_new = False
if code == "":
main = files[0]
dirname = os.path.dirname(main)
filename = os.path.basename(main)
code, ext = os.path.splitext(filename)
# if it already starts with the asset code, then remove the
# asset_code
if code.startswith("%s_" % asset_code):
code = code.replace("%s_" % asset_code, "")
sobject.set_value("code", code)
is_code_new = True
base = "%s_%s" % (asset_code, code)
# prepend the sobject code to every file
new_paths = []
for file_path in files:
if is_code_new:
# move the file to the new name
filename = os.path.basename(file_path)
dirname = os.path.dirname(file_path)
if not filename.startswith("%s_" % asset_code):
filename = "%s_%s" % (asset_code, filename)
new_path = "%s/%s" % (dirname,filename)
shutil.move(file_path,new_path)
new_paths.append(new_path)
else:
# the file must start with base
if not os.path.basename(file_path).startswith(base):
raise CommandException("File '%s' does not belong to texture '%s'" % (file_path, base) )
new_paths.append(file_path)
# remap to the new paths
my.command.files = new_paths
"""
| epl-1.0 | -4,961,217,542,410,323,000 | 26.885135 | 109 | 0.579355 | false |
mrquim/repository.mrquim | repo/plugin.video.salts/js2py/translators/jsregexps.py | 7 | 6896 | from salts_lib.pyjsparser.pyjsparserdata import *
REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.'}
NOT_PATTERN_CHARS = {'^', '$', '\\', '.', '*', '+', '?', '(', ')', '[', ']', '|'} # what about '{', '}', ???
CHAR_CLASS_ESCAPE = {'d', 'D', 's', 'S', 'w', 'W'}
CONTROL_ESCAPE_CHARS = {'f', 'n', 'r', 't', 'v'}
CONTROL_LETTERS = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}
def SpecialChar(char):
return {'type': 'SpecialChar',
'content': char}
def isPatternCharacter(char):
return char not in NOT_PATTERN_CHARS
class JsRegExpParser:
def __init__(self, source, flags):
self.source = source
self.flags = flags
self.index = 0
self.length = len(source)
self.lineNumber = 0
self.lineStart = 0
def parsePattern(self):
'''Perform sctring escape - for regexp literals'''
return {'type': 'Pattern',
'contents': self.parseDisjunction()}
def parseDisjunction(self):
alternatives = []
while True:
alternatives.append(self.parseAlternative())
if not self.isEOF():
self.expect_character('|')
else:
break
return {'type': 'Disjunction',
'contents': alternatives}
def isEOF(self):
if self.index>=self.length:
return True
return False
def expect_character(self, character):
if self.source[self.index]!=character:
self.throwUnexpected(character)
self.index += 1
def parseAlternative(self):
contents = []
while not self.isEOF() and self.source[self.index]!='|':
contents.append(self.parseTerm())
return {'type': 'Alternative',
'contents': contents}
def follows(self, chars):
for i, c in enumerate(chars):
if self.index+i>=self.length or self.source[self.index+i] != c:
return False
return True
def parseTerm(self):
assertion = self.parseAssertion()
if assertion:
return assertion
else:
return {'type': 'Term',
'contents': self.parseAtom()} # quantifier will go inside atom!
def parseAssertion(self):
if self.follows('$'):
content = SpecialChar('$')
self.index += 1
elif self.follows('^'):
content = SpecialChar('^')
self.index += 1
elif self.follows('\\b'):
content = SpecialChar('\\b')
self.index += 2
elif self.follows('\\B'):
content = SpecialChar('\\B')
self.index += 2
elif self.follows('(?='):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': False}
elif self.follows('(?!'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': True}
else:
return None
return {'type': 'Assertion',
'content': content}
def parseAtom(self):
if self.follows('.'):
content = SpecialChar('.')
self.index += 1
elif self.follows('\\'):
self.index += 1
content = self.parseAtomEscape()
elif self.follows('['):
content = self.parseCharacterClass()
elif self.follows('(?:'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif self.follows('('):
self.index += 1
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif isPatternCharacter(self.source[self.index]):
content = self.source[self.index]
self.index += 1
else:
return None
quantifier = self.parseQuantifier()
return {'type': 'Atom',
'content': content,
'quantifier': quantifier}
def parseQuantifier(self):
prefix = self.parseQuantifierPrefix()
if not prefix:
return None
greedy = True
if self.follows('?'):
self.index += 1
greedy = False
return {'type': 'Quantifier',
'contents': prefix,
'greedy': greedy}
def parseQuantifierPrefix(self):
if self.isEOF():
return None
if self.follows('+'):
content = '+'
self.index += 1
elif self.follows('?'):
content = '?'
self.index += 1
elif self.follows('*'):
content = '*'
self.index += 1
elif self.follows('{'): # try matching otherwise return None and restore the state
i = self.index
self.index += 1
digs1 = self.scanDecimalDigs()
# if no minimal number of digs provided then return no quantifier
if not digs1:
self.index = i
return None
# scan char limit if provided
if self.follows(','):
self.index += 1
digs2 = self.scanDecimalDigs()
else:
digs2 = ''
# must be valid!
if not self.follows('}'):
self.index = i
return None
else:
self.expect_character('}')
content = int(digs1), int(digs2) if digs2 else None
else:
return None
return content
def parseAtomEscape(self):
ch = self.source[self.index]
if isDecimalDigit(ch) and ch!=0:
digs = self.scanDecimalDigs()
elif ch in CHAR_CLASS_ESCAPE:
self.index += 1
return SpecialChar('\\' + ch)
else:
return self.parseCharacterEscape()
def parseCharacterEscape(self):
ch = self.source[self.index]
if ch in CONTROL_ESCAPE_CHARS:
return SpecialChar('\\' + ch)
if ch=='c':
'ok, fuck this shit.'
def scanDecimalDigs(self):
s = self.index
while not self.isEOF() and isDecimalDigit(self.source[self.index]):
self.index += 1
return self.source[s:self.index]
a = JsRegExpParser('a(?=x)', '')
print(a.parsePattern()) | gpl-2.0 | 643,782,974,549,146,200 | 30.493151 | 118 | 0.480133 | false |
Kunal57/MIT_6.00.1x | pset5/ps6.py | 1 | 8731 | import string
### DO NOT MODIFY THIS FUNCTION ###
def load_words(file_name):
'''
file_name (string): the name of the file containing
the list of words to load
Returns: a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
'''
print('Loading word list from file...')
# inFile: file
in_file = open(file_name, 'r')
# line: string
line = in_file.readline()
# word_list: list of strings
word_list = line.split()
print(' ', len(word_list), 'words loaded.')
in_file.close()
return word_list
### DO NOT MODIFY THIS FUNCTION ###
def is_word(word_list, word):
'''
Determines if word is a valid word, ignoring
capitalization and punctuation
word_list (list): list of words in the dictionary.
word (string): a possible word.
Returns: True if word is in word_list, False otherwise
Example:
>>> is_word(word_list, 'bat') returns
True
>>> is_word(word_list, 'asdf') returns
False
'''
word = word.lower()
word = word.strip(" !@#$%^&*()-_+={}[]|\:;'<>?,./\"")
return word in word_list
### DO NOT MODIFY THIS FUNCTION ###
def get_story_string():
"""
Returns: a joke in encrypted text.
"""
f = open("story.txt", "r")
story = str(f.read())
f.close()
return story
WORDLIST_FILENAME = 'words.txt'
class Message(object):
### DO NOT MODIFY THIS METHOD ###
def __init__(self, text):
'''
Initializes a Message object
text (string): the message's text
a Message object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words
'''
self.message_text = text
self.valid_words = load_words(WORDLIST_FILENAME)
### DO NOT MODIFY THIS METHOD ###
def get_message_text(self):
'''
Used to safely access self.message_text outside of the class
Returns: self.message_text
'''
return self.message_text
### DO NOT MODIFY THIS METHOD ###
def get_valid_words(self):
'''
Used to safely access a copy of self.valid_words outside of the class
Returns: a COPY of self.valid_words
'''
return self.valid_words[:]
def build_shift_dict(self, shift):
'''
Creates a dictionary that can be used to apply a cipher to a letter.
The dictionary maps every uppercase and lowercase letter to a
character shifted down the alphabet by the input shift. The dictionary
should have 52 keys of all the uppercase letters and all the lowercase
letters only.
shift (integer): the amount by which to shift every letter of the
alphabet. 0 <= shift < 26
Returns: a dictionary mapping a letter (string) to
another letter (string).
'''
alphabetDict = {}
lowerAlphabet = "abcdefghijklmnopqrstuvwxyz"
upperAlphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for x in range(len(lowerAlphabet)):
letterIndex = (x + shift) % 26
alphabetDict[lowerAlphabet[x]] = lowerAlphabet[letterIndex]
alphabetDict[upperAlphabet[x]] = upperAlphabet[letterIndex]
return alphabetDict
def apply_shift(self, shift):
'''
Applies the Caesar Cipher to self.message_text with the input shift.
Creates a new string that is self.message_text shifted down the
alphabet by some number of characters determined by the input shift
shift (integer): the shift with which to encrypt the message.
0 <= shift < 26
Returns: the message text (string) in which every character is shifted
down the alphabet by the input shift
'''
encryptedMessage = ""
alphabetDict = self.build_shift_dict(shift)
for letter in self.message_text:
if letter.isalpha():
encryptedMessage += alphabetDict.get(letter)
else:
encryptedMessage += letter
return encryptedMessage
class PlaintextMessage(Message):
def __init__(self, text, shift):
'''
Initializes a PlaintextMessage object
text (string): the message's text
shift (integer): the shift associated with this message
A PlaintextMessage object inherits from Message and has five attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
self.shift (integer, determined by input shift)
self.encrypting_dict (dictionary, built using shift)
self.message_text_encrypted (string, created using shift)
Hint: consider using the parent class constructor so less
code is repeated
'''
Message.__init__(self, text)
self.shift = shift
self.encrypting_dict = Message.build_shift_dict(self, shift)
self.message_text_encrypted = Message.apply_shift(self, shift)
def get_shift(self):
'''
Used to safely access self.shift outside of the class
Returns: self.shift
'''
return self.shift
def get_encrypting_dict(self):
'''
Used to safely access a copy self.encrypting_dict outside of the class
Returns: a COPY of self.encrypting_dict
'''
return self.encrypting_dict.copy()
def get_message_text_encrypted(self):
'''
Used to safely access self.message_text_encrypted outside of the class
Returns: self.message_text_encrypted
'''
return self.message_text_encrypted
def change_shift(self, shift):
'''
Changes self.shift of the PlaintextMessage and updates other
attributes determined by shift (ie. self.encrypting_dict and
message_text_encrypted).
shift (integer): the new shift that should be associated with this message.
0 <= shift < 26
Returns: nothing
'''
self.shift = shift
self.encrypting_dict = Message.build_shift_dict(self, shift)
self.message_text_encrypted = Message.apply_shift(self, shift)
class CiphertextMessage(Message):
def __init__(self, text):
'''
Initializes a CiphertextMessage object
text (string): the message's text
a CiphertextMessage object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
Message.__init__(self, text)
def decrypt_message(self):
'''
Decrypt self.message_text by trying every possible shift value
and find the "best" one. We will define "best" as the shift that
creates the maximum number of real words when we use apply_shift(shift)
on the message text. If s is the original shift value used to encrypt
the message, then we would expect 26 - s to be the best shift value
for decrypting it.
Note: if multiple shifts are equally good such that they all create
the maximum number of you may choose any of those shifts (and their
corresponding decrypted messages) to return
Returns: a tuple of the best shift value used to decrypt the message
and the decrypted message text using that shift value
'''
best = 0
decryptedMessage = ""
for x in range(26):
count = 0
message = Message.apply_shift(self, x)
for word in message.split(" "):
if is_word(self.valid_words, word):
count += 1
if count > best:
best = x
decryptedMessage = message
return (best, decryptedMessage)
#Example test case (PlaintextMessage)
plaintext = PlaintextMessage('hello', 2)
print('Expected Output: jgnnq')
print('Actual Output:', plaintext.get_message_text_encrypted())
#Example test case (CiphertextMessage)
ciphertext = CiphertextMessage('jgnnq')
print('Expected Output:', (24, 'hello'))
print('Actual Output:', ciphertext.decrypt_message())
def decrypt_story():
cipherMessage = CiphertextMessage(get_story_string())
return cipherMessage.decrypt_message()
print(decrypt_story()) | mit | 1,933,617,698,280,548,400 | 32.113281 | 83 | 0.608178 | false |
pblottiere/QGIS | tests/src/python/test_qgsrastercolorrampshader.py | 19 | 1826 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsColorRampShader.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '17/08/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtGui import QColor
from qgis.core import (QgsColorRampShader, QgsGradientColorRamp, QgsGradientStop)
from qgis.testing import unittest
class TestQgsRasterColorRampShader(unittest.TestCase):
def testNan(self):
shader = QgsColorRampShader()
item1 = QgsColorRampShader.ColorRampItem(1, QColor(0, 0, 0))
item2 = QgsColorRampShader.ColorRampItem(2, QColor(255, 255, 255))
shader.setColorRampItemList([item1, item2])
self.assertFalse(shader.shade(float('NaN'))[0])
self.assertFalse(shader.shade(float("inf"))[0])
def testCreateColorRamp(self):
shader = QgsColorRampShader(1, 3)
item1 = QgsColorRampShader.ColorRampItem(1, QColor(255, 0, 0))
item2 = QgsColorRampShader.ColorRampItem(2, QColor(255, 255, 0))
item3 = QgsColorRampShader.ColorRampItem(3, QColor(255, 255, 255))
shader.setColorRampItemList([item1, item2, item3])
shaderRamp = shader.createColorRamp()
gradientRamp = QgsGradientColorRamp(QColor(255, 0, 0), QColor(255, 255, 255), False, [QgsGradientStop(0.5, QColor(255, 255, 0))])
self.assertEqual(shaderRamp.color1(), gradientRamp.color1())
self.assertEqual(shaderRamp.color2(), gradientRamp.color2())
self.assertEqual(shaderRamp.stops(), gradientRamp.stops())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -2,245,814,860,994,307,600 | 34.803922 | 137 | 0.697152 | false |
anlar/prismriver | prismriver/cli.py | 2 | 3443 | import json
from prismriver import util, main
class SongJsonEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
def format_output(songs, output_format, txt_template=None):
if output_format == 'txt':
formatted_songs = []
for song in songs:
lyrics_txt = ''
if song.lyrics:
index = 0
for lyric in song.lyrics:
lyrics_txt += lyric
if index < len(song.lyrics) - 1:
lyrics_txt += '\n\n<<< --- --- --- >>>\n\n'
index += 1
result = txt_template
result = result.replace('%TITLE%', song.title)
result = result.replace('%ARTIST%', song.artist)
result = result.replace('%PLUGIN_ID%', song.plugin_name)
result = result.replace('%PLUGIN_NAME%', song.plugin_name)
result = result.replace('%LYRICS%', lyrics_txt)
formatted_songs.append(result)
result = ''
index = 0
for formatted_song in formatted_songs:
result += formatted_song
if index < len(formatted_songs) - 1:
result += '\n\n<<< --- --- --- --- --- >>>\n\n'
index += 1
return result
elif output_format == 'json':
return json.dumps(songs, cls=SongJsonEncoder, sort_keys=True, indent=4, ensure_ascii=False)
elif output_format == 'json_ascii':
return json.dumps(songs, cls=SongJsonEncoder, sort_keys=True, indent=4)
else:
pass
def list_plugins():
plugins = main.get_plugins()
plugins.sort(key=lambda x: x.plugin_name.lower())
for plugin in plugins:
print('{:<20} [id: {}]'.format(plugin.plugin_name, plugin.ID))
def run():
parser = util.init_args_parser()
parser.add_argument('--list', action='store_true', help='list available search plugins')
parser.add_argument('--song', action='store_true',
help='search for song information by artist and title (default action)')
parser.add_argument('--cleanup', action='store_true', help='remove outdated files from cache')
parser.add_argument("-f", "--format", type=str, default='txt',
help="lyrics output format (txt (default), json, json_ascii)")
parser.add_argument("-o", "--output", type=str, default='%ARTIST% - %TITLE%\nSource: %PLUGIN_NAME%\n\n%LYRICS%',
help="output template for txt format. Available parameters: "
"%%TITLE%% - song title, "
"%%ARTIST%% - song artist, "
"%%LYRICS%% - song lyrics, "
"%%PLUGIN_ID%% - plugin id, "
"%%PLUGIN_NAME%% - plugin name "
"(default value: %%ARTIST%% - %%TITLE%%\\nSource: %%PLUGIN_NAME%%\\n\\n%%LYRICS%%)"
)
params = parser.parse_args()
util.init_logging(params.quiet, params.verbose, params.log)
util.log_debug_info(params)
config = util.init_search_config(params)
util.log_config_info(config)
if params.list:
list_plugins()
elif params.cleanup:
main.cleanup_cache(config)
else:
result = main.search(params.artist, params.title, config)
if result:
print(format_output(result, params.format, params.output))
| mit | 507,045,041,935,330,300 | 34.132653 | 116 | 0.540227 | false |
mir-group/flare | flare/mgp/map3b.py | 1 | 11075 | import numpy as np
from numba import njit
from math import floor, ceil
from typing import List
from flare.struc import Structure
from flare.utils.element_coder import Z_to_element
from flare.mgp.mapxb import MapXbody, SingleMapXbody
from flare.mgp.grid_kernels import grid_kernel, self_kernel
from flare.kernels.utils import from_mask_to_args
class Map3body(MapXbody):
def __init__(self, **kwargs):
self.kernel_name = "threebody"
self.singlexbody = SingleMap3body
self.bodies = 3
self.pred_perm = [[0, 1, 2], [1, 0, 2]]
self.spc_perm = [[0, 1, 2], [0, 2, 1]]
self.num_lmp_maps = 0
super().__init__(**kwargs)
def build_bond_struc(self, species_list):
"""
build a bond structure, used in grid generating
"""
# 2 body (2 atoms (1 bond) config)
self.spc = []
N_spc = len(species_list)
self.num_lmp_maps = N_spc ** 3
for spc1 in species_list:
for spc2 in species_list:
for spc3 in species_list:
species = [spc1, spc2, spc3]
self.spc.append(species)
def get_arrays(self, atom_env):
spcs, comp_r, comp_xyz = get_triplets(
atom_env.ctype,
atom_env.etypes,
atom_env.bond_array_3,
atom_env.cross_bond_inds,
atom_env.cross_bond_dists,
atom_env.triplet_counts,
)
return spcs, comp_r, comp_xyz
def find_map_index(self, spc):
return self.spc.index(spc)
class SingleMap3body(SingleMapXbody):
def __init__(self, **kwargs):
"""
Build 3-body MGP
"""
self.bodies = 3
self.grid_dim = 3
self.kernel_name = "threebody"
self.pred_perm = [[0, 1, 2], [1, 0, 2]]
super().__init__(**kwargs)
# initialize bounds
self.set_bounds(None, None)
spc = self.species
self.species_code = "_".join([Z_to_element(spc) for spc in self.species])
self.kv3name = f"kv3_{self.species_code}"
def set_bounds(self, lower_bound, upper_bound):
if self.auto_lower:
if isinstance(lower_bound, float):
self.bounds[0] = np.ones(3) * lower_bound
else:
self.bounds[0] = lower_bound
if self.auto_upper:
if isinstance(upper_bound, float):
self.bounds[1] = np.ones(3) * upper_bound
else:
self.bounds[1] = upper_bound
def construct_grids(self):
"""
Return:
An array of shape (n_grid, 3)
"""
# build grids in each dimension
triplets = []
for d in range(3):
bonds = np.linspace(
self.bounds[0][d], self.bounds[1][d], self.grid_num[d], dtype=np.float64
)
triplets.append(bonds)
# concatenate into one array: n_grid x 3
mesh = np.meshgrid(*triplets, indexing="ij")
del triplets
mesh_list = []
n_grid = np.prod(self.grid_num)
for d in range(3):
mesh_list.append(np.reshape(mesh[d], n_grid))
mesh_list = np.array(mesh_list).T
return mesh_list
def grid_cutoff(self, triplets, r_cut, coords, derivative, cutoff_func):
return bonds_cutoff(triplets, r_cut, coords, derivative, cutoff_func)
def get_grid_kernel(self, kern_type, data, kernel_info, *grid_arrays):
c2 = self.species[0]
etypes2 = np.array(self.species[1:])
_, cutoffs, hyps, hyps_mask = kernel_info
hyps, r_cut = get_hyps_for_kern(hyps, cutoffs, hyps_mask, c2, etypes2)
return grid_kernel(
data,
self.bodies,
kern_type,
get_bonds_for_kern,
bonds_cutoff,
c2,
etypes2,
hyps,
r_cut,
*grid_arrays,
)
def get_self_kernel(self, kernel_info, *grid_arrays):
c2 = self.species[0]
etypes2 = np.array(self.species[1:])
_, cutoffs, hyps, hyps_mask = kernel_info
hyps, r_cut = get_hyps_for_kern(hyps, cutoffs, hyps_mask, c2, etypes2)
return self_kernel(
self.bodies, get_permutations, c2, etypes2, hyps, r_cut, *grid_arrays
)
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def bonds_cutoff(triplets, r_cut, coords, derivative, cutoff_func):
dfj_list = np.zeros((len(triplets), 3), dtype=np.float64)
if derivative:
for d in range(3):
inds = np.arange(3) * 3 + d
f0, df0 = cutoff_func(r_cut, triplets, coords[:, inds])
dfj = (
df0[:, 0] * f0[:, 1] * f0[:, 2]
+ f0[:, 0] * df0[:, 1] * f0[:, 2]
+ f0[:, 0] * f0[:, 1] * df0[:, 2]
)
dfj_list[:, d] = dfj
else:
f0, _ = cutoff_func(r_cut, triplets, 0) # (n_grid, 3)
fj = f0[:, 0] * f0[:, 1] * f0[:, 2] # (n_grid,)
fj = np.expand_dims(fj, axis=1)
return fj, dfj_list
# TODO: move this func to Parameters class
def get_hyps_for_kern(hyps, cutoffs, hyps_mask, c2, etypes2):
"""
Args:
data: a single env of a list of envs
"""
args = from_mask_to_args(hyps, cutoffs, hyps_mask)
if len(args) == 2:
hyps, cutoffs = args
r_cut = cutoffs[1]
else:
(
cutoff_2b,
cutoff_3b,
cutoff_mb,
nspec,
spec_mask,
nbond,
bond_mask,
ntriplet,
triplet_mask,
ncut3b,
cut3b_mask,
nmb,
mb_mask,
sig2,
ls2,
sig3,
ls3,
sigm,
lsm,
) = args
bc1 = spec_mask[c2]
bc2 = spec_mask[etypes2[0]]
bc3 = spec_mask[etypes2[1]]
ttype = triplet_mask[nspec * nspec * bc1 + nspec * bc2 + bc3]
ls = ls3[ttype]
sig = sig3[ttype]
r_cut = cutoff_3b
hyps = [sig, ls]
return hyps, r_cut
@njit
def get_triplets(
ctype, etypes, bond_array, cross_bond_inds, cross_bond_dists, triplets
):
exist_species = []
tris = []
tri_dir = []
for m in range(bond_array.shape[0]):
r1 = bond_array[m, 0]
c1 = bond_array[m, 1:]
spc1 = etypes[m]
for n in range(triplets[m]):
ind1 = cross_bond_inds[m, m + n + 1]
r2 = bond_array[ind1, 0]
c2 = bond_array[ind1, 1:]
spc2 = etypes[ind1]
c12 = np.sum(c1 * c2)
r12 = np.sqrt(r1 ** 2 + r2 ** 2 - 2 * r1 * r2 * c12)
if spc1 <= spc2:
spcs = [ctype, spc1, spc2]
triplet = np.array([r1, r2, r12])
coord = [c1, c2, np.zeros(3)]
else:
spcs = [ctype, spc2, spc1]
triplet = np.array([r2, r1, r12])
coord = [c2, c1, np.zeros(3)]
if spcs not in exist_species:
exist_species.append(spcs)
tris.append([triplet])
tri_dir.append([coord])
else:
k = exist_species.index(spcs)
tris[k].append(triplet)
tri_dir[k].append(coord)
return exist_species, tris, tri_dir
@njit
def get_permutations(c1, etypes1, c2, etypes2):
ei1 = etypes1[0]
ei2 = etypes1[1]
ej1 = etypes2[0]
ej2 = etypes2[1]
perms = []
if c1 == c2:
if (ei1 == ej1) and (ei2 == ej2):
perms.append([0, 1, 2])
if (ei1 == ej2) and (ei2 == ej1):
perms.append([1, 0, 2])
if c1 == ej1:
if (ei1 == ej2) and (ei2 == c2):
perms.append([1, 2, 0])
if (ei1 == c2) and (ei2 == ej2):
perms.append([0, 2, 1])
if c1 == ej2:
if (ei1 == ej1) and (ei2 == c2):
perms.append([2, 1, 0])
if (ei1 == c2) and (ei2 == ej1):
perms.append([2, 0, 1])
return perms
def get_bonds_for_kern(env, c2, etypes2):
return get_triplets_for_kern_jit(
env.bond_array_3,
env.ctype,
env.etypes,
env.cross_bond_inds,
env.cross_bond_dists,
env.triplet_counts,
c2,
etypes2,
)
@njit
def get_triplets_for_kern_jit(
bond_array_1,
c1,
etypes1,
cross_bond_inds_1,
cross_bond_dists_1,
triplets_1,
c2,
etypes2,
):
# triplet_list = np.empty((0, 6), dtype=np.float64)
triplet_list = []
ej1 = etypes2[0]
ej2 = etypes2[1]
all_spec = [c2, ej1, ej2]
if c1 in all_spec:
c1_ind = all_spec.index(c1)
ind_list = [0, 1, 2]
ind_list.remove(c1_ind)
all_spec.remove(c1)
for m in range(bond_array_1.shape[0]):
two_inds = [ind_list[0], ind_list[1]]
ri1 = bond_array_1[m, 0]
ci1 = bond_array_1[m, 1:]
ei1 = etypes1[m]
two_spec = [all_spec[0], all_spec[1]]
if ei1 in two_spec:
ei1_ind = ind_list[0] if ei1 == two_spec[0] else ind_list[1]
two_spec.remove(ei1)
two_inds.remove(ei1_ind)
one_spec = two_spec[0]
ei2_ind = two_inds[0]
for n in range(triplets_1[m]):
ind1 = cross_bond_inds_1[m, m + n + 1]
ei2 = etypes1[ind1]
if ei2 == one_spec:
ri2 = bond_array_1[ind1, 0]
ci2 = bond_array_1[ind1, 1:]
ri3 = cross_bond_dists_1[m, m + n + 1]
ci3 = np.zeros(3)
perms = get_permutations(c1, np.array([ei1, ei2]), c2, etypes2)
tri = np.array([ri1, ri2, ri3])
crd1 = np.array([ci1[0], ci2[0], ci3[0]])
crd2 = np.array([ci1[1], ci2[1], ci3[1]])
crd3 = np.array([ci1[2], ci2[2], ci3[2]])
# append permutations
nperm = len(perms)
for iperm in range(nperm):
perm = perms[iperm]
tricrd = np.take(tri, perm)
crd1_p = np.take(crd1, perm)
crd2_p = np.take(crd2, perm)
crd3_p = np.take(crd3, perm)
crd_p = np.vstack((crd1_p, crd2_p, crd3_p))
tricrd = np.hstack(
(tricrd, crd_p[:, 0], crd_p[:, 1], crd_p[:, 2])
)
triplet_list.append(tricrd)
# tricrd = np.expand_dims(tricrd, axis=0)
# triplet_list = np.vstack((triplet_list, tricrd))
return triplet_list
| mit | 4,418,217,167,793,085,400 | 27.916449 | 88 | 0.469977 | false |
thaim/ansible | test/units/module_utils/facts/test_ansible_collector.py | 9 | 18914 | # -*- coding: utf-8 -*-
#
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
# for testing
from units.compat import unittest
from units.compat.mock import Mock, patch
from ansible.module_utils.facts import collector
from ansible.module_utils.facts import ansible_collector
from ansible.module_utils.facts import namespace
from ansible.module_utils.facts.other.facter import FacterFactCollector
from ansible.module_utils.facts.other.ohai import OhaiFactCollector
from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector
from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector
from ansible.module_utils.facts.system.date_time import DateTimeFactCollector
from ansible.module_utils.facts.system.env import EnvFactCollector
from ansible.module_utils.facts.system.distribution import DistributionFactCollector
from ansible.module_utils.facts.system.dns import DnsFactCollector
from ansible.module_utils.facts.system.fips import FipsFactCollector
from ansible.module_utils.facts.system.local import LocalFactCollector
from ansible.module_utils.facts.system.lsb import LSBFactCollector
from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector, OpenBSDPkgMgrFactCollector
from ansible.module_utils.facts.system.platform import PlatformFactCollector
from ansible.module_utils.facts.system.python import PythonFactCollector
from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils.facts.system.user import UserFactCollector
# from ansible.module_utils.facts.hardware.base import HardwareCollector
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.virtual.base import VirtualCollector
ALL_COLLECTOR_CLASSES = \
[PlatformFactCollector,
DistributionFactCollector,
SelinuxFactCollector,
ApparmorFactCollector,
SystemCapabilitiesFactCollector,
FipsFactCollector,
PkgMgrFactCollector,
OpenBSDPkgMgrFactCollector,
ServiceMgrFactCollector,
LSBFactCollector,
DateTimeFactCollector,
UserFactCollector,
LocalFactCollector,
EnvFactCollector,
DnsFactCollector,
PythonFactCollector,
# FIXME: re-enable when hardware doesnt Hardware() doesnt munge self.facts
# HardwareCollector
NetworkCollector,
VirtualCollector,
OhaiFactCollector,
FacterFactCollector]
def mock_module(gather_subset=None,
filter=None):
if gather_subset is None:
gather_subset = ['all', '!facter', '!ohai']
if filter is None:
filter = '*'
mock_module = Mock()
mock_module.params = {'gather_subset': gather_subset,
'gather_timeout': 5,
'filter': filter}
mock_module.get_bin_path = Mock(return_value=None)
return mock_module
def _collectors(module,
all_collector_classes=None,
minimal_gather_subset=None):
gather_subset = module.params.get('gather_subset')
if all_collector_classes is None:
all_collector_classes = ALL_COLLECTOR_CLASSES
if minimal_gather_subset is None:
minimal_gather_subset = frozenset([])
collector_classes = \
collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset)
collectors = []
for collector_class in collector_classes:
collector_obj = collector_class()
collectors.append(collector_obj)
# Add a collector that knows what gather_subset we used so it it can provide a fact
collector_meta_data_collector = \
ansible_collector.CollectorMetaDataCollector(gather_subset=gather_subset,
module_setup=True)
collectors.append(collector_meta_data_collector)
return collectors
ns = namespace.PrefixFactNamespace('ansible_facts', 'ansible_')
# FIXME: this is brute force, but hopefully enough to get some refactoring to make facts testable
class TestInPlace(unittest.TestCase):
def _mock_module(self, gather_subset=None):
return mock_module(gather_subset=gather_subset)
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return _collectors(module=module,
all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset)
def test(self):
gather_subset = ['all']
mock_module = self._mock_module(gather_subset=gather_subset)
all_collector_classes = [EnvFactCollector]
collectors = self._collectors(mock_module,
all_collector_classes=all_collector_classes)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
res = fact_collector.collect(module=mock_module)
self.assertIsInstance(res, dict)
self.assertIn('env', res)
self.assertIn('gather_subset', res)
self.assertEqual(res['gather_subset'], ['all'])
def test1(self):
gather_subset = ['all']
mock_module = self._mock_module(gather_subset=gather_subset)
collectors = self._collectors(mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
res = fact_collector.collect(module=mock_module)
self.assertIsInstance(res, dict)
# just assert it's not almost empty
# with run_command and get_file_content mock, many facts are empty, like network
self.assertGreater(len(res), 20)
def test_empty_all_collector_classes(self):
mock_module = self._mock_module()
all_collector_classes = []
collectors = self._collectors(mock_module,
all_collector_classes=all_collector_classes)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
res = fact_collector.collect()
self.assertIsInstance(res, dict)
# just assert it's not almost empty
self.assertLess(len(res), 3)
# def test_facts_class(self):
# mock_module = self._mock_module()
# Facts(mock_module)
# def test_facts_class_load_on_init_false(self):
# mock_module = self._mock_module()
# Facts(mock_module, load_on_init=False)
# # FIXME: assert something
class TestCollectedFacts(unittest.TestCase):
gather_subset = ['all', '!facter', '!ohai']
min_fact_count = 30
max_fact_count = 1000
# TODO: add ansible_cmdline, ansible_*_pubkey* back when TempFactCollector goes away
expected_facts = ['date_time',
'user_id', 'distribution',
'gather_subset', 'module_setup',
'env']
not_expected_facts = ['facter', 'ohai']
collected_facts = {}
def _mock_module(self, gather_subset=None):
return mock_module(gather_subset=self.gather_subset)
@patch('platform.system', return_value='Linux')
@patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='systemd')
def setUp(self, mock_gfc, mock_ps):
mock_module = self._mock_module()
collectors = self._collectors(mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
self.facts = fact_collector.collect(module=mock_module,
collected_facts=self.collected_facts)
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return _collectors(module=module,
all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset)
def test_basics(self):
self._assert_basics(self.facts)
def test_expected_facts(self):
self._assert_expected_facts(self.facts)
def test_not_expected_facts(self):
self._assert_not_expected_facts(self.facts)
def _assert_basics(self, facts):
self.assertIsInstance(facts, dict)
# just assert it's not almost empty
self.assertGreaterEqual(len(facts), self.min_fact_count)
# and that is not huge number of keys
self.assertLess(len(facts), self.max_fact_count)
# everything starts with ansible_ namespace
def _assert_ansible_namespace(self, facts):
# FIXME: kluge for non-namespace fact
facts.pop('module_setup', None)
facts.pop('gather_subset', None)
for fact_key in facts:
self.assertTrue(fact_key.startswith('ansible_'),
'The fact name "%s" does not startwith "ansible_"' % fact_key)
def _assert_expected_facts(self, facts):
facts_keys = sorted(facts.keys())
for expected_fact in self.expected_facts:
self.assertIn(expected_fact, facts_keys)
def _assert_not_expected_facts(self, facts):
facts_keys = sorted(facts.keys())
for not_expected_fact in self.not_expected_facts:
self.assertNotIn(not_expected_fact, facts_keys)
class ProvidesOtherFactCollector(collector.BaseFactCollector):
name = 'provides_something'
_fact_ids = set(['needed_fact'])
def collect(self, module=None, collected_facts=None):
return {'needed_fact': 'THE_NEEDED_FACT_VALUE'}
class RequiresOtherFactCollector(collector.BaseFactCollector):
name = 'requires_something'
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
fact_dict = {}
fact_dict['needed_fact'] = collected_facts['needed_fact']
fact_dict['compound_fact'] = "compound-%s" % collected_facts['needed_fact']
return fact_dict
class ConCatFactCollector(collector.BaseFactCollector):
name = 'concat_collected'
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
fact_dict = {}
con_cat_list = []
for key, value in collected_facts.items():
con_cat_list.append(value)
fact_dict['concat_fact'] = '-'.join(con_cat_list)
return fact_dict
class TestCollectorDepsWithFilter(unittest.TestCase):
gather_subset = ['all', '!facter', '!ohai']
def _mock_module(self, gather_subset=None, filter=None):
return mock_module(gather_subset=self.gather_subset,
filter=filter)
def setUp(self):
self.mock_module = self._mock_module()
self.collectors = self._collectors(mock_module)
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return [ProvidesOtherFactCollector(),
RequiresOtherFactCollector()]
def test_no_filter(self):
_mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'])
facts_dict = self._collect(_mock_module)
expected = {'needed_fact': 'THE_NEEDED_FACT_VALUE',
'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'}
self.assertEquals(expected, facts_dict)
def test_with_filter_on_compound_fact(self):
_mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'],
filter='compound_fact')
facts_dict = self._collect(_mock_module)
expected = {'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'}
self.assertEquals(expected, facts_dict)
def test_with_filter_on_needed_fact(self):
_mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'],
filter='needed_fact')
facts_dict = self._collect(_mock_module)
expected = {'needed_fact': 'THE_NEEDED_FACT_VALUE'}
self.assertEquals(expected, facts_dict)
def test_with_filter_on_compound_gather_compound(self):
_mock_module = mock_module(gather_subset=['!all', '!any', 'compound_fact'],
filter='compound_fact')
facts_dict = self._collect(_mock_module)
expected = {'compound_fact': 'compound-THE_NEEDED_FACT_VALUE'}
self.assertEquals(expected, facts_dict)
def test_with_filter_no_match(self):
_mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'],
filter='ansible_this_doesnt_exist')
facts_dict = self._collect(_mock_module)
expected = {}
self.assertEquals(expected, facts_dict)
def test_concat_collector(self):
_mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'])
_collectors = self._collectors(_mock_module)
_collectors.append(ConCatFactCollector())
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=_collectors,
namespace=ns,
filter_spec=_mock_module.params['filter'])
collected_facts = {}
facts_dict = fact_collector.collect(module=_mock_module,
collected_facts=collected_facts)
self.assertIn('concat_fact', facts_dict)
self.assertTrue('THE_NEEDED_FACT_VALUE' in facts_dict['concat_fact'])
def test_concat_collector_with_filter_on_concat(self):
_mock_module = mock_module(gather_subset=['all', '!facter', '!ohai'],
filter='concat_fact')
_collectors = self._collectors(_mock_module)
_collectors.append(ConCatFactCollector())
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=_collectors,
namespace=ns,
filter_spec=_mock_module.params['filter'])
collected_facts = {}
facts_dict = fact_collector.collect(module=_mock_module,
collected_facts=collected_facts)
self.assertIn('concat_fact', facts_dict)
self.assertTrue('THE_NEEDED_FACT_VALUE' in facts_dict['concat_fact'])
self.assertTrue('compound' in facts_dict['concat_fact'])
def _collect(self, _mock_module, collected_facts=None):
_collectors = self._collectors(_mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=_collectors,
namespace=ns,
filter_spec=_mock_module.params['filter'])
facts_dict = fact_collector.collect(module=_mock_module,
collected_facts=collected_facts)
return facts_dict
class ExceptionThrowingCollector(collector.BaseFactCollector):
def collect(self, module=None, collected_facts=None):
raise Exception('A collector failed')
class TestExceptionCollectedFacts(TestCollectedFacts):
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
collectors = _collectors(module=module,
all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset)
c = [ExceptionThrowingCollector()] + collectors
return c
class TestOnlyExceptionCollector(TestCollectedFacts):
expected_facts = []
min_fact_count = 0
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return [ExceptionThrowingCollector()]
class TestMinimalCollectedFacts(TestCollectedFacts):
gather_subset = ['!all']
min_fact_count = 1
max_fact_count = 10
expected_facts = ['gather_subset',
'module_setup']
not_expected_facts = ['lsb']
class TestFacterCollectedFacts(TestCollectedFacts):
gather_subset = ['!all', 'facter']
min_fact_count = 1
max_fact_count = 10
expected_facts = ['gather_subset',
'module_setup']
not_expected_facts = ['lsb']
class TestOhaiCollectedFacts(TestCollectedFacts):
gather_subset = ['!all', 'ohai']
min_fact_count = 1
max_fact_count = 10
expected_facts = ['gather_subset',
'module_setup']
not_expected_facts = ['lsb']
class TestPkgMgrFacts(TestCollectedFacts):
gather_subset = ['pkg_mgr']
min_fact_count = 1
max_fact_count = 20
expected_facts = ['gather_subset',
'module_setup',
'pkg_mgr']
collected_facts = {
"ansible_distribution": "Fedora",
"ansible_distribution_major_version": "28",
"ansible_os_family": "RedHat"
}
class TestOpenBSDPkgMgrFacts(TestPkgMgrFacts):
def test_is_openbsd_pkg(self):
self.assertIn('pkg_mgr', self.facts)
self.assertEqual(self.facts['pkg_mgr'], 'openbsd_pkg')
def setUp(self):
self.patcher = patch('platform.system')
mock_platform = self.patcher.start()
mock_platform.return_value = 'OpenBSD'
mock_module = self._mock_module()
collectors = self._collectors(mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
self.facts = fact_collector.collect(module=mock_module)
def tearDown(self):
self.patcher.stop()
| mit | -7,210,130,124,361,021,000 | 36.527778 | 101 | 0.626943 | false |
grandmasterchef/WhatManager2 | home/migrations/0001_initial.py | 4 | 6525 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import home.info_holder
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DownloadLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('zone', models.CharField(max_length=16)),
('path', models.TextField()),
('preferred', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('type', models.TextField()),
('message', models.TextField()),
('traceback', models.TextField(null=True)),
('user', models.ForeignKey(related_name=b'wm_logentry',
to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'permissions': (('view_logentry', 'Can view the logs.'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ReplicaSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('zone', models.CharField(max_length=16)),
('name', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TransInstance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('name', models.TextField()),
('host', models.TextField()),
('port', models.IntegerField()),
('peer_port', models.IntegerField()),
('username', models.TextField()),
('password', models.TextField()),
('replica_set', models.ForeignKey(to='home.ReplicaSet')),
],
options={
'permissions': (
('view_transinstance_stats', 'Can view current Transmission stats.'),
('run_checks', 'Can run the validity checks.')
),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WhatFulltext',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('info', models.TextField()),
('more_info', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WhatLoginCache',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('cookies', models.TextField()),
('authkey', models.TextField()),
('passkey', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WhatTorrent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('info_hash', models.CharField(max_length=40, db_index=True)),
('torrent_file', models.TextField()),
('torrent_file_name', models.TextField()),
('retrieved', models.DateTimeField()),
('info', models.TextField()),
('tags', models.TextField()),
('what_group_id', models.IntegerField()),
('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'permissions': (
('view_whattorrent', 'Can view torrents.'),
('download_whattorrent', 'Can download and play torrents.')
),
},
bases=(models.Model, home.info_holder.InfoHolder),
),
migrations.CreateModel(
name='TransTorrent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('info_hash', models.CharField(max_length=40)),
('torrent_id', models.IntegerField(null=True)),
('torrent_name', models.TextField(null=True)),
('torrent_size', models.BigIntegerField(null=True)),
('torrent_uploaded', models.BigIntegerField(null=True)),
('torrent_done', models.FloatField(null=True)),
('torrent_date_added', models.DateTimeField(null=True)),
('torrent_error', models.IntegerField(null=True)),
('torrent_error_string', models.TextField(null=True)),
('instance', models.ForeignKey(to='home.TransInstance')),
('location', models.ForeignKey(to='home.DownloadLocation')),
('what_torrent', models.ForeignKey(to='home.WhatTorrent')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.RunSQL(
'ALTER TABLE `home_whatfulltext` ENGINE = MYISAM',
'ALTER TABLE `home_whatfulltext` ENGINE = INNODB',
),
migrations.RunSQL(
'ALTER TABLE `home_whatfulltext` ADD FULLTEXT `info_fts` (`info`)',
'ALTER TABLE `home_whatfulltext` DROP INDEX `info_fts`',
),
]
| mit | 1,549,478,202,849,230,300 | 40.56051 | 94 | 0.479234 | false |
rec/echomesh | code/python/external/parsedatetime/tests/TestSimpleOffsets.py | 1 | 7518 |
"""
Test parsing of 'simple' offsets
"""
import unittest, time, datetime
import parsedatetime as pdt
# a special compare function is used to allow us to ignore the seconds as
# the running of the test could cross a minute boundary
def _compareResults(result, check):
target, t_flag = result
value, v_flag = check
t_yr, t_mth, t_dy, t_hr, t_min, _, _, _, _ = target
v_yr, v_mth, v_dy, v_hr, v_min, _, _, _, _ = value
return ((t_yr == v_yr) and (t_mth == v_mth) and (t_dy == v_dy) and
(t_hr == v_hr) and (t_min == v_min)) and (t_flag == v_flag)
class test(unittest.TestCase):
def setUp(self):
self.cal = pdt.Calendar()
self.yr, self.mth, self.dy, self.hr, self.mn, self.sec, self.wd, self.yd, self.isdst = time.localtime()
def testMinutesFromNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(minutes=5)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('5 minutes from now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 min from now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5m from now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('in 5 minutes', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('in 5 min', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 min from now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 minutes', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 min', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5m', start), (target, 2)))
def testMinutesBeforeNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(minutes=-5)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('5 minutes before now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 min before now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5m before now', start), (target, 2)))
def testHoursFromNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(hours=5)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('5 hours from now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hour from now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hr from now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('in 5 hours', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('in 5 hour', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hr from now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hr', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5h', start), (target, 2)))
def testHoursBeforeNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(hours=-5)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('5 hours before now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hr before now', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5h before now', start), (target, 2)))
def testOffsetAfterNoon(self):
s = datetime.datetime(self.yr, self.mth, self.dy, 10, 0, 0)
t = datetime.datetime(self.yr, self.mth, self.dy, 12, 0, 0) + datetime.timedelta(hours=5)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('5 hours after 12pm', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours after 12 pm', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours after 12:00pm', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours after 12:00 pm', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours after noon', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours from noon', start), (target, 2)))
def testOffsetBeforeNoon(self):
s = datetime.datetime.now()
t = datetime.datetime(self.yr, self.mth, self.dy, 12, 0, 0) + datetime.timedelta(hours=-5)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('5 hours before noon', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours before 12pm', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours before 12 pm', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours before 12:00pm', start), (target, 2)))
self.assertTrue(_compareResults(self.cal.parse('5 hours before 12:00 pm', start), (target, 2)))
def testWeekFromNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(weeks=1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('in 1 week', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('1 week from now', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('in 7 days', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('7 days from now', start), (target, 1)))
#self.assertTrue(_compareResults(self.cal.parse('next week', start), (target, 1)))
def testWeekBeforeNow(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(weeks=-1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('1 week before now', start), (target, 0)))
self.assertTrue(_compareResults(self.cal.parse('7 days before now', start), (target, 0)))
#self.assertTrue(_compareResults(self.cal.parse('last week', start), (target, 0)))
def testSpecials(self):
s = datetime.datetime.now()
t = datetime.datetime(self.yr, self.mth, self.dy, 9, 0, 0) + datetime.timedelta(days=1)
start = s.timetuple()
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('tomorrow', start), (target, 1)))
self.assertTrue(_compareResults(self.cal.parse('next day', start), (target, 1)))
t = datetime.datetime(self.yr, self.mth, self.dy, 9, 0, 0) + datetime.timedelta(days=-1)
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('yesterday', start), (target, 1)))
t = datetime.datetime(self.yr, self.mth, self.dy, 9, 0, 0)
target = t.timetuple()
self.assertTrue(_compareResults(self.cal.parse('today', start), (target, 1)))
if __name__ == "__main__":
unittest.main()
| mit | -4,099,626,455,720,227,300 | 44.841463 | 111 | 0.61519 | false |
kobronson/cs-voltdb | tests/scripts/exp_test.py | 1 | 22827 | #!/usr/bin/env python2.6
# This file is part of VoltDB.
# Copyright (C) 2008-2013 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import filecmp
import fnmatch
import getpass
import os.path
import shutil
import socket
import subprocess
import sys
import time
import urllib
from collections import defaultdict
from optparse import OptionParser
from subprocess import call # invoke unix/linux cmds
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
# add the path to the volt python client, just based on knowing
# where we are now
sys.path.append('../../lib/python')
try:
from voltdbclient import *
except ImportError:
sys.path.append('./lib/python')
from voltdbclient import *
from Query import VoltQueryClient
from XMLUtils import prettify # To create a human readable xml file
hostname = socket.gethostname()
pkgName = {'comm': 'LINUX-voltdb',
'voltkv': 'LINUX-voltdb-voltkv',
'voltcache': 'LINUX-voltdb-voltcache',
'pro': 'LINUX-voltdb-ent'}
pkgDict = {'comm': 'Community',
'pro': 'Enterprise',
'voltkv': 'Voltkv',
'voltcache': 'Voltcache',
'all': "Community, Pro, Voltkv, Voltcache"}
suiteDict = {'helloworld': 'HelloWorld',
'voltcache': 'Voltcache',
'voltkv': 'Voltkv',
'voter': 'Voter',
'all': 'HelloWorld, Voter, Voltcache, Voltkv'}
tail = "tar.gz"
# http://volt0/kits/candidate/LINUX-voltdb-2.8.1.tar.gz
# http://volt0/kits/candidate/LINUX-voltdb-ent-2.8.1.tar.gz
root = "http://volt0/kits/branch/"
testname = os.path.basename(os.path.abspath(__file__)).replace(".py", "")
#logDir = destDir + getpass.getuser() + "_" + testname + "_log/"
elem2Test = {'helloworld':'./run.sh', 'voltcache':'./run.sh', 'voltkv':'./run.sh', 'voter':'./run.sh'}
defaultHost = "localhost"
defaultPort = 21212
sectionBreak="====================================================="
# To parse the output of './examples/voter/run.sh client' and get a specific portion
# of the output. A sample value would be like the one below:
'''
Voting Results
--------------------------------------------------------------------------------
A total of 8166781 votes were received...
- 7,816,923 Accepted
- 79,031 Rejected (Invalid Contestant)
- 12 Rejected (Maximum Vote Count Reached)
- 0 Failed (Transaction Error)
Contestant Name Votes Received
Edwina Burnam 2,156,993
Jessie Eichman 1,652,654
Alana Bregman 1,189,909
Kelly Clauss 1,084,995
Jessie Alloway 1,060,892
Tabatha Gehling 939,604
The Winner is: Edwina Burnam
'''
def findSectionInFile(srce, start, end):
flag = 0
status = False
ins = open(srce, "r" )
str = ""
for line in ins:
if(flag == 0 and line.find(start) > -1):
flag = 1
if(flag == 1):
str += line
if(flag == 1 and line.find(end) > -1):
flag = 0
status = True
break
return (status, str)
# To read a srouce file 'srce' into an array
def readFileIntoArray(srce):
content = []
if(os.path.getsize(srce) > 0):
with open(srce) as f:
content = f.readlines()
return content
# The release number can be optionally passed in from cmdline with -r switch
# If it's ommitted at cmdline, then this function is called to get the release
# number from 'version.txt'
def getReleaseNum():
path = os.path.dirname(os.path.abspath(__file__))
root = path.replace("tests/scripts", "")
verFile = root + "version.txt"
ver = readFileIntoArray(verFile)[0].rstrip()
return ver
# Always create a fresh new subdir
def createAFreshDir(dir):
ret = 0
if os.path.exists(dir):
shutil.rmtree(dir)
if not os.path.exists(dir):
os.makedirs(dir)
if not os.path.exists(dir):
ret = -1
return ret
# To get a VoltDB tar ball file and untar it in a designated place
def installVoltDB(pkg, release):
info = {}
info["ok"] = False
thispkg = pkgName[pkg] + '-' + release + "." + tail
srce = root + thispkg
dest = os.path.join('/tmp', thispkg)
cmd = "wget " + srce + " -O " + dest + " 2>/dev/null"
print sectionBreak
print "Getting " + srce
print "to " +dest
ret = call(cmd, shell=True)
if ret != 0 or not os.path.exists(dest):
info["err"] = "Cannot download '%s'" % srce
return info
fsize = os.path.getsize(dest)
if fsize == 0:
info["err"] = "The pkg '%s' is blank!" % dest
return info
ret = createAFreshDir(workDir)
if ret != 0:
info["err"] = "Cannot create the working directory: '%s'" % workDir
return info
cmd = "tar zxf " + dest + " -C " + workDir + " 2>/dev/null"
ret = call(cmd, shell=True)
if ret == 0:
# info["dest"] = dest
info["srce"] = srce
info["pkgname"] = thispkg
info["workDir"] = workDir
info["ok"] = True
else:
info["err"] = "VoltDB pkg '%s' installation FAILED at location '%s'" \
% (dest, workDir)
if "ent" in thispkg:
print "hear me"
info["license"] = getEnterpriseLicense(workDir, release)
return info
# end of installVoltDB(pkg, release):
def getEnterpriseLicense(workDir, release):
print workDir
print release
url = root + "license.xml"
filename = os.path.join(workDir, "voltdb-ent-" + release, "voltdb","license.xml")
urllib.urlretrieve(url,filename)
print "Retrieved to " + filename
return True
# Sample key/val pairs for testSuiteList are:
# key: voltcache, val: /tmp/<user_name>_exp_test/voltdb-2.8.1/examples/voltcache
# key: voter, val: /tmp/<user_name>_exp_test/voltdb-2.8.1/examples/voter
# key: voltkv, val: /tmp/<user_name>_exp_test/voltdb-2.8.1/examples/voltkv
# key: helloworld', val: /tmp/<user_name>_exp_test/voltdb-2.8.1/doc/tutorials/helloworld
def setTestSuite(dname, suite):
testSuiteList = {}
for dirname, dirnames, filenames in os.walk(dname):
for subdirname in dirnames:
if subdirname in elem2Test.keys():
path = os.path.join(dirname, subdirname)
run_sh = path + "/" + elem2Test[subdirname]
if(os.access(run_sh, os.X_OK)):
if(suite != "all"):
if(path.find(suite) > -1):
testSuiteList[suite] = path
else:
if(path.find(subdirname) > -1):
testSuiteList[subdirname] = path
return testSuiteList
# Not used yet.
# It would be necessary if we wanted to run certain queries before
def stopPS(ps):
print "Going to kill this process: '%d'" % ps.pid
killer = subprocess.Popen("kill -9 %d" % (ps.pid), shell = True)
killer.communicate()
if killer.returncode != 0:
# print >> sys.stderr, "Failed to kill the server process %d" % (server.pid)
print "Failed to kill the server process %d" % (ps.pid)
# To return a voltDB client
def getClient():
host = defaultHost
port = defaultPort
client = None
for i in xrange(10):
try:
client = VoltQueryClient(host, port)
client.set_quiet(True)
client.set_timeout(5.0) # 5 seconds
break
except socket.error:
time.sleep(1)
if client == None:
print >> sys.stderr, "Unable to connect/create client"
sys.stderr.flush()
exit(1)
return client
# Not used yet.
# Currently, both startService() and stopService() are implemented in
# execThisService(). However, if we wanted to run certain queries before
# shutdown VoltDB, we have to separate startService() and stopService(),
# so that we can add more implementations in between.
def startService(service, logS, logC):
cmd = service + " > " + logS + " 2>&1"
service_ps = subprocess.Popen(cmd, shell=True)
time.sleep(2)
client = getClient()
cmd = service + " client > " + logC + " 2>&1"
ret = call(cmd, shell=True)
print "returning results from service execution: '%s'" % ret
time.sleep(1)
return (service_ps, client)
# Not used yet. Refer to the comments for startService()
def stopService(ps, serviceHandle):
serviceHandle.onecmd("shutdown")
ps.communicate()
# To execute 'run.sh' and save the output in logS
# To execute 'run.sh client' and save the output in logC
def execThisService(service, logS, logC):
cmd = service + " > " + logS + " 2>&1"
print " Server - Exec CMD: '%s'" % cmd
service_ps = subprocess.Popen(cmd, shell=True)
time.sleep(2)
client = getClient()
cmd = service + " client > " + logC + " 2>&1"
print " Client - Exec CMD: '%s'" % cmd
ret = call(cmd, shell=True)
print " Returning results from service execution: '%s'" % ret
client.onecmd("shutdown")
service_ps.communicate()
# Further assertion is required
# We want to make sure that logFileC contains several key strings
# which is defined in 'staticKeyStr'
def assertVoltkv_Voltcache(mod, logC):
staticKeyStr = {
"Command Line Configuration":1,
"Setup & Initialization":1,
"Starting Benchmark":1,
"KV Store Results":1,
"Client Workload Statistics":1,
"System Server Statistics":1,
}
dynamicKeyStr = {}
with open(logC) as f:
content = f.readlines()
cnt = 0
for line in content:
x = line.strip()
dynamicKeyStr[x] = 1
if x in staticKeyStr.keys():
cnt += 1
result = False
msg = None
keys = {}
if(cnt == len(staticKeyStr)):
msg = "The client output has all the expected key words"
keys = staticKeyStr
result = True
else:
msg = "The client output does not have all the expected key words"
for key in staticKeyStr:
if key not in dynamicKeyStr.keys():
keys[key] = key
return (result, msg, keys)
# We want to make sure that logFileC contains this KEY string:
# The Winner is: Edwina Burnam
def assertVoter(mod, logC):
result = False
aStr = "Voting Results"
expected = "The Winner is: Edwina Burnam"
# The 'section' returned by findSectionInFile is not used here,
# However, this piece of info could be used by something else
# which calls findSectionInFile().
(result, section) = findSectionInFile(logC, aStr, expected)
if(result == False):
expected = "ERROR: The Winner is NOT Edwina Burnam!"
# Again, 'section' is not used in this implementation
# section += "\n\n" + expected
# It could return 'section' in some other implementation
# that calls findSectionInFile()
return (result, expected)
# To make sure that we see the key string 'Hola, Mundo!'
def assertHelloWorld(modulename, logC):
expected = "Hola, Mundo!"
buf = readFileIntoArray(logC)
for line in buf:
if(expected == line.rstrip()):
msg = expected
result = True
break
else:
msg = "Expected '%s' for module '%s'. Actually returned: '%s'" % (expected, modulename, actual)
result = False
return (result, msg)
# To make sure the content of logC which is the output of 'run.sh client'
# is identical to the static baseline file.
# If True, the test is PASSED
# If False, then we need to parse the LogC more carefully before we declare
# this test is FAILED
def assertClient(e, logC):
baselineD = origDir + "/plannertester/baseline/"
baselineF = baselineD + e + "/client_output.txt"
# print "baselineD = '%s', baselineF = '%s'" % (baselineD, baselineF)
ret = False
msg = None
if(os.path.exists(baselineF)):
ret = filecmp.cmp(baselineF, logC)
if(ret == True):
msg = "The client output matches the baseline:"
else:
msg = "Warning!! The client output does NOT match the baseline:"
msg += "\nBaseline: %s" % baselineF
msg += "\nThe client output: %s" % logC
else:
msg = "Warning!! Cannot find the baseline file:\n%s" % baselineF
return (ret, msg)
def startTest(testSuiteList):
statusBySuite = {}
msgBySuite = {}
keyWordsBySuite = {}
msg = ""
result = False
# testSuiteList is a dictionary whose keys are test suite names, e.g. helloworld,
# voter, voltkv, & voltcache and the corresponding values are paths where the
# executable run.sh is in. Note that all run.sh can only be invoked as './run.sh
# by design.
for (suiteName, path) in testSuiteList.iteritems():
keyStrSet = None
if suiteName in elem2Test.keys():
# Could be an overkill
os.chdir(path)
currDir = os.getcwd()
service = elem2Test[suiteName]
print ">>> Test: %s\n Current Directory: '%s'" % (suiteName, currDir)
logFileS = os.path.join(logDir, suiteName + "_server")
logFileC = os.path.join(logDir,suiteName + "_client")
print " Log File for VoltDB Server: '%s'" % logFileS
print " Log File for VoltDB Client: '%s'" % logFileC
execThisService(service, logFileS, logFileC)
if(suiteName == "helloworld"):
(result, msg) = assertHelloWorld(suiteName, logFileC)
elif(suiteName == "voter"):
(result, msg) = assertVoter(suiteName, logFileC)
elif(suiteName == "voltkv" or suiteName == "voltcache"):
(result, msg, keyStrSet) = assertVoltkv_Voltcache(suiteName, logFileC)
else:
# Should never fall into this block
msg = "Unknown Suite Name: '%s'. To be implemented. Exit with an error..." % suiteName
print "==-->> %s" % msg
exit(1)
statusBySuite[suiteName] = result
msgBySuite[suiteName] = msg
keyWordsBySuite[suiteName] = keyStrSet
os.chdir(origDir)
# end of for e in testSuiteList:
return (statusBySuite, msgBySuite, keyWordsBySuite)
# end of startTest(testSuiteList):
# status, msg, & keyStrings are all 2-D dictionaries, which have the same keys with
# different values.
# First level keys: module name, e.g. comm, pro, voltkv, voltcache
# Second level keys: suite name, e.g. helloworld, voter, voltkv, voltcache
# Values for status: True or False, which is the testing status for this suite in this package
# Values for msg: A descriptive testing message for this suite in this package
# Values for keyStrings: Only applied for package 'voltkv' & 'voltcache'. If a test suite is
# failed for package either 'voltkv' or 'voltcache', the final report
# will display a list missing strings that are expected in log files
# for client.
def create_rpt(info, status, msg, keyStrings, elapsed, rptf):
testtime = "%.2f" % elapsed
testsuites = Element('testsuites', {'time':testtime})
for (mod, suiteNameDict) in status.iteritems():
testsuite = SubElement(testsuites, 'testsuite',
{'package':info["pkgname"],'URL':info["srce"],
'hostname':hostname, 'name':pkgDict[mod]})
for (suitename, status4ThisSuite) in suiteNameDict.iteritems():
failureCnt = "0"
errCnt = "0"
if(status4ThisSuite == False):
failureCnt = "1"
else:
failureCnt = "0"
print "==-->>Package Name: '%s', Suite Name: '%s', Status = '%s'" \
% (mod, suitename, status4ThisSuite)
if(info["ok"] == False):
errCnt = "1"
else:
errCnt = "0"
testcase = SubElement(testsuite, 'testcase',
{'errors':errCnt,'failures':failureCnt, 'name':suitename})
if(failureCnt == "1"):
failure = SubElement(testcase, 'failure',
{'Message':msg[mod][suitename]})
misStr = None
if(keyStrings[mod][suitename] != None):
for j in keyStrings[mod][suitename]:
if(misStr == None):
misStr = j
else:
misStr += ", " + j
missing = SubElement(failure, 'Missing',
{'MissingString':misStr})
else:
failure = SubElement(testcase, 'info',
{'Message':msg[mod][suitename]})
if(errCnt == "1"):
error = SubElement(testcase, 'error',
{'Error':info["err"]})
fo = open(rptf, "wb")
fo.write(prettify(testsuites))
fo.close()
if not os.path.exists(rptf):
reportfile = None
return rptf
if __name__ == "__main__":
start = time.time()
usage = "Usage: %prog [options]"
parser = OptionParser(usage="%prog [-b <branch name>] [-r <release #>] [-p <comm|pro|voltkv|voltcache|all> <-s all|helloworld|voter|voltkv|voltcache>]", version="%prog 1.0")
parser.add_option("-r", "--release", dest="release",
help="VoltDB release number. If omitted, will be read from version.txt.")
parser.add_option("-p", "--package", dest="pkg",
help="VoltDB package type: comm, pro, voltkv or voltcache. Default is comm. If not set, then this framework will take all packages.")
parser.add_option("-s", "--suite", dest="suite",
help="Test suite name, if not set, then this framework will take all suites. If an incorrect suite name is passed in, then the test suite name is set to 'all' as a default value.")
parser.add_option("-x","--reportxml", dest="reportfile", default="exp_test.xml",
help="Report file location")
parser.add_option("-b","--branch", dest="branch", default="master",
help="Branch name to test")
parser.add_option("-o","--output", dest="destDir", default=os.getcwd(),
help="Output Directory")
parser.set_defaults(pkg="all")
parser.set_defaults(suite="all")
(options, args) = parser.parse_args()
destDir = options.destDir
logDir = os.path.join(destDir,getpass.getuser() + "_" + testname + '_log')
workDir = os.path.join(destDir,getpass.getuser() + "_" + testname)
if not os.path.exists(logDir):
os.makedirs(logDir)
suite = options.suite
if suite not in elem2Test.keys() and suite != "all":
print "Warning: unknown suite name - '%s'" % suite
suite = "all"
print "Info: So we're going to cover all test suites '%s' in this run" % suiteDict[suite]
origDir = os.getcwd()
releaseNum = options.release
if(releaseNum == None):
releaseNum = getReleaseNum()
branchName = options.branch
root = root.replace("branch", branchName)
list = None
if(options.pkg in pkgDict):
print sectionBreak
print "Testing Branch in this RUN: %s" % branchName
print "Testing Version in this RUN: %s" % releaseNum
print "--------------------------------------"
if(options.pkg == "all"):
list = pkgName.keys()
print "Testing all packages in this RUN:"
print "---------------------------------"
for item in pkgName:
pkgFullName = pkgName[item] + '-' + releaseNum + "." + tail
print "%s - %s" % (pkgDict[item], pkgFullName)
else:
list = [options.pkg]
pkgFullName = pkgName[options.pkg] + '-' + releaseNum + "." + tail
print "Testing this package only in this RUN:"
print "--------------------------------------"
print "%s - %s" % (pkgDict[options.pkg], pkgFullName)
else:
print "Unknown package name passed in from cmdline: %s" % options.pkg
print "Select from: " + ', '.join((pkgDict.keys()))
exit(1)
tf = msg = keys = None
tfD = defaultdict(dict)
msgD = defaultdict(dict)
keysD = defaultdict(dict)
for p in list:
ret = installVoltDB(p, releaseNum)
if not ret["ok"]:
print "Error!! %s" % ret["err"]
exit(1)
testSuiteList = setTestSuite(ret["workDir"], suite)
(tf, msg, keys) = startTest(testSuiteList)
tfD[p] = tf
msgD[p] = msg
keysD[p] = keys
status = True
# tfD is a 2-D dictionary.
# First level keys: module name, e.g. comm, pro, voltkv, voltcache
# Second level keys: suite name, e.g. helloworld, voter, voltkv, voltcache
# Values: True or False, which is the testing status for this suite in this package
for (module, suiteNameDict) in tfD.iteritems():
for (suitename, status4ThisSuite) in suiteNameDict.iteritems():
if not status4ThisSuite: # status4ThisSuite == tfD[module][suitename]:
status = False
print >> sys.stderr, "The test suite '%s' in '%s' package is FAILED \
\n'%s'\n%s" \
% (suitename, module, msgD[module][suitename], sectionBreak)
elapsed = (time.time() - start)
reportXML = create_rpt(ret, tfD, msgD, keysD, elapsed, options.reportfile)
print "Refer to the final report '%s' for details." % reportXML
print "Total time consumed in this run: '%.2f'" % elapsed
if(status == False):
print "\nAt least one test suite is Failed!!\n"
exit(1)
print "######################"
print "All tests are PASSED!!"
print "######################"
for p in msgD:
for suitename in msgD[p]:
print "%s - %s -> %s" % (pkgDict[p], suiteDict[suitename], msgD[p][suitename])
exit(0)
| agpl-3.0 | 1,400,331,217,541,593,000 | 37.108514 | 202 | 0.596399 | false |
MarioVilas/secondlife-experiments | SecondLifeCamper.py | 1 | 1408 | from win32con import *
from win32gui import *
from time import sleep
class SecondLifeCamper:
"Second Life Camper class"
hWnd = 0 # Second Life window handle
def __init__(self):
"Constructor"
self.getWindowHandle()
if self.hWnd == 0:
raise Exception, "Cannot find the Second Life window"
def getWindowHandle(self):
"Get the Second Life window handle"
def EnumWindowProc(hWnd, self):
if GetWindowText(hWnd) == "Second Life":
self.hWnd = hWnd
EnumWindows(EnumWindowProc, self)
return self.hWnd
def sendKey(self, keycode):
"Send a single keystroke"
SendMessage(self.hWnd, WM_KEYDOWN, keycode, 0)
sleep(0.5)
SendMessage(self.hWnd, WM_KEYUP, keycode, 0)
sleep(0.5)
def camp(self):
"Camping routine"
# Loop until the user hits Ctrl-C
while 1:
# Make a little pause so we don't use 100% CPU
sleep(10)
# Send a keystroke for "left arrow"
self.sendKey(VK_LEFT)
# Send a keystroke for "right arrow"
self.sendKey(VK_RIGHT)
# when run from the commandline as a script, do this
if __name__ == "__main__":
s = SecondLifeCamper()
print "Camping..."
s.camp()
| gpl-2.0 | 8,501,252,513,469,108,000 | 23.6 | 65 | 0.547585 | false |
kalaspuff/tomodachi | tomodachi/watcher.py | 1 | 6685 | import asyncio
import logging
import os
import sys
import zlib
from typing import Any, Callable, Dict, List, Optional
def crc(file_path: str) -> str:
prev = 0
for line in open(file_path, "rb"):
prev = zlib.crc32(line, prev)
return "%X" % (prev & 0xFFFFFFFF)
class Watcher(object):
def __init__(self, root: Optional[List] = None, configuration: Optional[Dict] = None) -> None:
self.watched_files: Dict[str, float] = {}
self.watched_files_crc: Dict[str, str] = {}
self.root: List[str] = []
self.ignored_dirs = ["__pycache__", ".git", ".svn", "__ignored__", "__temporary__", "__tmp__"]
self.watched_file_endings = [".py", ".pyi", ".json", ".yml", ".html", ".phtml"]
if not root:
directory = os.path.realpath(sys.argv[0].rsplit("/", 1)[0])
self.root = [os.path.dirname(directory) if os.path.isfile(directory) else directory]
else:
self.root = root
if configuration is not None:
ignored_dirs_list = configuration.get("options", {}).get("watcher", {}).get("ignored_dirs", [])
if ignored_dirs_list:
self.ignored_dirs.extend(ignored_dirs_list)
watched_file_endings_list = (
configuration.get("options", {}).get("watcher", {}).get("watched_file_endings", [])
)
if watched_file_endings_list:
self.watched_file_endings.extend(watched_file_endings_list)
self.update_watched_files()
def update_watched_files(self, reindex: bool = False) -> Dict:
watched_files: Dict[str, float] = {}
watched_files_crc: Dict[str, str] = {}
if not self.watched_files or reindex:
for r in self.root:
for root, dirs, files in os.walk(r, topdown=True):
dirs[:] = [d for d in dirs if d not in self.ignored_dirs]
for file in files:
file_path = os.path.join(root, file)
_dir = os.path.dirname(file_path)
if (
_dir not in self.ignored_dirs
and not any(
[
os.path.join(root, _dir).endswith("/{}".format(ignored_dir))
or "/{}/".format(ignored_dir) in os.path.join(root, _dir)
for ignored_dir in self.ignored_dirs
]
)
and any([file.endswith(ending) for ending in self.watched_file_endings])
and "/." not in file_path
):
watched_files[file_path] = os.path.getmtime(file_path)
watched_files_crc[file_path] = (
crc(file_path)
if watched_files[file_path] != self.watched_files.get(file_path)
else self.watched_files_crc.get(file_path, "")
)
else:
for file_path, mtime in self.watched_files.items():
try:
watched_files[file_path] = os.path.getmtime(file_path)
watched_files_crc[file_path] = (
crc(file_path)
if watched_files[file_path] != self.watched_files.get(file_path)
else self.watched_files_crc.get(file_path, "")
)
except FileNotFoundError:
pass
if self.watched_files and self.watched_files != watched_files and self.watched_files_crc == watched_files_crc:
self.watched_files = watched_files
if self.watched_files and self.watched_files != watched_files:
added = [
k[((len(self.root[0]) if k.startswith(self.root[0]) else -1) + 1) :]
for k in watched_files.keys()
if k not in self.watched_files.keys()
]
removed = [
k[((len(self.root[0]) if k.startswith(self.root[0]) else -1) + 1) :]
for k in self.watched_files.keys()
if k not in watched_files.keys()
]
updated = [
k[((len(self.root[0]) if k.startswith(self.root[0]) else -1) + 1) :]
for k in watched_files.keys()
if k in self.watched_files.keys() and self.watched_files[k] != watched_files[k]
]
self.watched_files = watched_files
self.watched_files_crc = watched_files_crc
return {"added": added, "removed": removed, "updated": updated}
self.watched_files = watched_files
self.watched_files_crc = watched_files_crc
return {}
async def watch(self, loop: asyncio.AbstractEventLoop = None, callback_func: Optional[Callable] = None) -> Any:
_loop: Any = asyncio.get_event_loop() if not loop else loop
async def _watch_loop() -> None:
loop_counter = 0
while True:
loop_counter = (loop_counter + 1) % 20
updated_files = self.update_watched_files(reindex=(loop_counter == 0))
if updated_files:
added = updated_files.get("added")
removed = updated_files.get("removed")
updated = updated_files.get("updated")
if removed:
if len(removed) > 2:
removed[2] = "..."
logging.getLogger("watcher.files").warning(
"Removed files: {}".format(", ".join([file for file in removed][0:3]))
)
if added:
if len(added) > 2:
added[2] = "..."
logging.getLogger("watcher.files").warning(
"New files: {}".format(", ".join([file for file in added][0:3]))
)
if updated:
if len(updated) > 2:
updated[2] = "..."
logging.getLogger("watcher.files").warning(
"Updated files: {}".format(", ".join([file for file in updated][0:3]))
)
if callback_func:
await callback_func(set([file for file in added] + [file for file in updated]))
await asyncio.sleep(0.5)
return _loop.create_task(_watch_loop())
| mit | 4,378,000,183,308,738,600 | 44.47619 | 118 | 0.477337 | false |
leiferikb/bitpop | src/tools/memory_inspector/memory_inspector/data/file_storage_unittest.py | 19 | 5178 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This unittest covers both file_storage and serialization modules."""
import os
import tempfile
import time
import unittest
from memory_inspector.core import memory_map
from memory_inspector.core import native_heap
from memory_inspector.core import stacktrace
from memory_inspector.core import symbol
from memory_inspector.data import file_storage
class FileStorageTest(unittest.TestCase):
def setUp(self):
self._storage_path = tempfile.mkdtemp()
self._storage = file_storage.Storage(self._storage_path)
def tearDown(self):
os.removedirs(self._storage_path)
def testSettings(self):
settings_1 = { 'foo' : 1, 'bar' : 2 }
settings_2 = { 'foo' : 1, 'bar' : 2 }
self._storage.StoreSettings('one', settings_1)
self._storage.StoreSettings('two', settings_2)
self._DeepCompare(settings_1, self._storage.LoadSettings('one'))
self._DeepCompare(settings_2, self._storage.LoadSettings('two'))
self._storage.StoreSettings('one', {})
self._storage.StoreSettings('two', {})
def testArchives(self):
self._storage.OpenArchive('foo', create=True)
self._storage.OpenArchive('bar', create=True)
self._storage.OpenArchive('baz', create=True)
self._storage.DeleteArchive('bar')
self.assertTrue('foo' in self._storage.ListArchives())
self.assertFalse('bar' in self._storage.ListArchives())
self.assertTrue('baz' in self._storage.ListArchives())
self._storage.DeleteArchive('foo')
self._storage.DeleteArchive('baz')
def testSnapshots(self):
archive = self._storage.OpenArchive('snapshots', create=True)
t1 = archive.StartNewSnapshot()
archive.StoreMemMaps(memory_map.Map())
time.sleep(0.01) # Max snapshot resolution is in the order of usecs.
t2 = archive.StartNewSnapshot()
archive.StoreMemMaps(memory_map.Map())
archive.StoreNativeHeap(native_heap.NativeHeap())
self.assertIn(t1, archive.ListSnapshots())
self.assertIn(t2, archive.ListSnapshots())
self.assertTrue(archive.HasMemMaps(t1))
self.assertFalse(archive.HasNativeHeap(t1))
self.assertTrue(archive.HasMemMaps(t2))
self.assertTrue(archive.HasNativeHeap(t2))
self._storage.DeleteArchive('snapshots')
def testMmap(self):
archive = self._storage.OpenArchive('mmap', create=True)
timestamp = archive.StartNewSnapshot()
mmap = memory_map.Map()
map_entry1 = memory_map.MapEntry(4096, 8191, 'rw--', '/foo', 0)
map_entry2 = memory_map.MapEntry(65536, 81919, 'rw--', '/bar', 4096)
map_entry2.resident_pages = [5]
mmap.Add(map_entry1)
mmap.Add(map_entry2)
archive.StoreMemMaps(mmap)
mmap_deser = archive.LoadMemMaps(timestamp)
self._DeepCompare(mmap, mmap_deser)
self._storage.DeleteArchive('mmap')
def testNativeHeap(self):
archive = self._storage.OpenArchive('nheap', create=True)
timestamp = archive.StartNewSnapshot()
nh = native_heap.NativeHeap()
for i in xrange(1, 4):
stack_trace = stacktrace.Stacktrace()
frame = nh.GetStackFrame(i * 10 + 1)
frame.SetExecFileInfo('foo.so', 1)
stack_trace.Add(frame)
frame = nh.GetStackFrame(i * 10 + 2)
frame.SetExecFileInfo('bar.so', 2)
stack_trace.Add(frame)
nh.Add(native_heap.Allocation(i * 2, i * 3, stack_trace))
archive.StoreNativeHeap(nh)
nh_deser = archive.LoadNativeHeap(timestamp)
self._DeepCompare(nh, nh_deser)
self._storage.DeleteArchive('nheap')
def testSymbols(self):
archive = self._storage.OpenArchive('symbols', create=True)
symbols = symbol.Symbols()
# Symbol db is global per archive, no need to StartNewSnapshot.
symbols.Add('foo.so', 1, symbol.Symbol('sym1', 'file1.c', 11))
symbols.Add('bar.so', 2, symbol.Symbol('sym2', 'file2.c', 12))
sym3 = symbol.Symbol('sym3', 'file2.c', 13)
sym3.AddSourceLineInfo('outer_file.c', 23)
symbols.Add('baz.so', 3, sym3)
archive.StoreSymbols(symbols)
symbols_deser = archive.LoadSymbols()
self._DeepCompare(symbols, symbols_deser)
self._storage.DeleteArchive('symbols')
def _DeepCompare(self, a, b, prefix=''):
"""Recursively compares two objects (original and deserialized)."""
self.assertEqual(a is None, b is None)
if a is None:
return
_BASICTYPES = (int, basestring, float)
if isinstance(a, _BASICTYPES) and isinstance(b, _BASICTYPES):
return self.assertEqual(a, b, prefix)
self.assertEqual(type(a), type(b), prefix + ' type (%s vs %s' % (
type(a), type(b)))
if isinstance(a, list):
self.assertEqual(len(a), len(b), prefix + ' len (%d vs %d)' % (
len(a), len(b)))
for i in range(len(a)):
self._DeepCompare(a[i], b[i], prefix + '[%d]' % i)
return
if isinstance(a, dict):
self.assertEqual(a.keys(), b.keys(), prefix + ' keys (%s vs %s)' % (
str(a.keys()), str(b.keys())))
for k in a.iterkeys():
self._DeepCompare(a[k], b[k], prefix + '.' + str(k))
return
return self._DeepCompare(a.__dict__, b.__dict__, prefix) | gpl-3.0 | 6,880,012,481,837,985,000 | 36.528986 | 74 | 0.671881 | false |
foone/3dmmInternals | generate/find_classes.py | 1 | 6460 | import re,collections,itertools,json
import pefile,ms3dmm,os,pdb,struct
from build_func_list import lintJSON
def parseFixed(line,*segments):
i=0
for segment in segments:
if segment<0:
segment=-segment
elif segment>0:
yield line[i:i+segment]
else: # ==0
yield line[i:]
return
i+=segment
class EXE(object):
def __init__(self):
pe=self.pe = pefile.PE(ms3dmm.getAnyEXE())
self.base=pe.OPTIONAL_HEADER.ImageBase
self.data=pe.get_memory_mapped_image()
def probeVTable(self, address):
data,base=self.data,self.base
def probeForPiece():
for i in itertools.count():
o=address-base+(4*i)
yield struct.unpack('<L',data[o:o+4])[0]
return itertools.takewhile(lambda x:x!=0, probeForPiece())
class Source(object):
def __init__(self):
self.json_file = '../classes.json'
self.constructors={}
self.vtables=collections.defaultdict(list)
self.vtable_entries=collections.defaultdict(list)
self.class_ids={}
self.malloc_calls={}
self.classes=collections.defaultdict(lambda:collections.defaultdict(dict))
self.loadJSON()
def parse(self, filename):
for function in Function.parse('disassembly.txt'):
if function.isConstructor():
self.constructors[function.address]=function
for offset,line in function.getVTableLines():
self.vtables[function.address].append((line,offset))
self.malloc_calls[function.address]=list(function.findMallocCalls())
classid,line=function.getClassID()
if classid:
self.class_ids[function.address]=(line,classid)
def parseVTables(self, exe):
for function_address,lines in self.vtables.items():
for line,offset in lines:
address=int(offset,16)
self.vtable_entries[(function_address,offset)].extend(exe.probeVTable(address))
def findConstructorsForClassIDs(self):
for (function_address,offset),entries in self.vtable_entries.items():
klass = self.addClass(function_address, offset)
mallocs=self.malloc_calls.get(function_address,[])
if mallocs:
klass['malloc-sizes']=mallocs
klass['vtable']=entries
if len(entries)<5:
print 'TOO SMALL FOR BASETHING',function_address,offset
continue # BaseThing has 5 virtual methods, so if there are less than 5 this isn't a BaseThing subclass
id_method='%08X' % entries[1]
line,classid = self.class_ids.get(id_method,(None,None))
if line:
cid=int(classid,16)
cidstr=Source.cleanClassID(cid)
print 'POSSIBLE CLASS ID',function_address,offset,cidstr,len(entries),mallocs
klass['id']={'hex':classid,'string':cidstr,'id_method':entries[1]}
else:
print 'VTABLE, NO CLASSID',function_address,offset,id_method,len(entries),mallocs
def addClass(self,address,offset):
klass = self.classes[address]['find_classes'][offset]={}
return klass
def loadJSON(self):
try:
with open(self.json_file,'rb') as f:
self.classes.update(json.load(f)['classes'])
except IOError:
pass
def saveJSON(self):
with open(self.json_file,'wb') as f:
f.write(lintJSON({'classes':self.classes}))
@staticmethod
def cleanClassID(classid):
s=struct.pack('!L',classid)
return re.sub('[^A-Za-z0-9 ]','?',s.replace('\0',' '))
class Function(object):
CLASS_ID_PATTERN = re.compile(r"^MOV EAX,(?:OFFSET )?([0-9A-F]{3,})$")
VTABLE_LOAD_PATTERN = re.compile(r"^MOV DWORD PTR DS:\[[A-Z]{3}\],OFFSET ([0-9A-F]+)")
PUSH_CONSTANT_PATTERN = re.compile(r"^PUSH ([0-9A-F]+)$")
def __init__(self,lines=None):
if lines is None:
lines=[]
self.lines=lines
def add(self,line):
if line.isSpacer():
return # ignore this line rather than adding it to the function
self.lines.append(line)
def getClassID(self):
lines=self.lines
if len(lines)==2 and lines[1].command=='RETN':
m=Function.CLASS_ID_PATTERN.match(lines[0].command)
if m:
return m.group(1),lines[0]
return None,None
def isConstructor(self):
return any(Function.VTABLE_LOAD_PATTERN.match(line.command) for line in self.lines)
def getVTableLines(self):
for line in self.lines:
m=Function.VTABLE_LOAD_PATTERN.search(line.command)
if m:
yield m.group(1),line
def findMallocCalls(self):
prev=None
for line in self.lines:
if line.command=='CALL malloc':
if prev:
m=Function.PUSH_CONSTANT_PATTERN.search(prev.command)
if m:
yield int(m.group(1),16)
prev=line
@property
def address(self):
try:
return self.lines[0].address
except IndexError:
return None
def length(self):
return len(self.lines)
def __len__(self):
return len(self.lines)
def __iter__(self):
return self.lines.__iter__()
def __repr__(self):
lines=self.lines
if not lines:
return 'Function<Empty>'
else:
return 'Function<%08X>' % self.lines[0].addressValue
@staticmethod
def parse(filename):
buffer=Function()
for line in Disassembly.parse(filename):
if line.isSpacer() or line.startsNewFunction():
if buffer:
yield buffer
buffer=Function()
buffer.add(line)
if buffer:
yield buffer
class Disassembly(object):
ADDRESS_PATTERN = re.compile('^[0-9A-F]{8} ')
PARTS=('address','indicator','hex','command','comments')
def __init__(self,buffer):
self.line = line = '\n'.join(buffer)
self.address,self.indicator,self.hex,self.command,comments=parseFixed(line,8,-2,2,-2,13,-1,41,0)
if comments.startswith(';'):
self.comments=comments[1:]
else:
self.comments=''
self.command=self.command.strip()
def __str__(self):
return 'Disassembly({})'.format(', '.join('{}={}'.format(key,getattr(self,key)) for key in Disassembly.PARTS))
def isSpacer(self):
return self.command=='INT3'
def startsNewFunction(self):
return self.indicator=='/$'
@property
def addressValue(self):
return int(self.address,16)
@staticmethod
def parse(file):
with open(file,'rb') as f:
f.next() # skip header
f.next()
buffer=[]
for line in f:
if Disassembly.ADDRESS_PATTERN.match(line):
if buffer:
yield Disassembly(buffer)
buffer=[]
buffer.append(line.rstrip('\r\n'))
if buffer:
yield Disassembly(buffer)
if __name__=='__main__':
source = Source()
source.parse('disassembly.txt')
exe=EXE()
source.parseVTables(exe)
source.findConstructorsForClassIDs()
source.saveJSON()
"""
for function in Function.parse('disassembly.txt'):
if function.isConstructor():
print function,len(function),function.isClassID(),function.isConstructor()
for line in function:
print '\t%s' % line.line
pass
c=c+1
print c
""" | unlicense | 5,159,623,017,111,422,000 | 25.588477 | 112 | 0.687307 | false |
zdenop/lector | lector/utils/__init__.py | 1 | 4266 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Lector utils
Copyright (C) 2011-2013 Davide Setti, Zdenko Podobný
Website: http://code.google.com/p/lector
This program is released under the GNU GPLv2
"""
import sys
import os
from glob import glob
from subprocess import Popen, PIPE
from PyQt5.QtGui import QImage
# workaroung to run textwidget outside of Lector
CMD_FOLDER = os.path.dirname(os.path.abspath(__file__))
CMD_FOLDER += "/../"
if CMD_FOLDER not in sys.path:
sys.path.insert(0, CMD_FOLDER)
from utils import settings
def pilImage2Qt(im):
if im.mode != 'RGB':
im = im.convert('RGB')
s = im.tostring("jpeg", "RGB")
qtimage = QImage()
qtimage.loadFromData(s)
# from PIL import ImageQt
# qtimage = ImageQt.ImageQt(im)
return qtimage
def extract_tesseract_languages_path(error_message):
"""
>>> extract_tesseract_languages_path("Unable to load unicharset file /usr/share/tesseract-ocr/tessdata/invalid.unicharset")
('/usr/share/tesseract-ocr/tessdata', '.unicharset')
"""
# problem if there is space in path
print("error_message", error_message)
if len(error_message) < 1:
return "", ""
invalid_path = error_message.split()[-1]
path, invalid_fn = os.path.split(invalid_path)
_, extension = os.path.splitext(invalid_fn)
return path, extension
def get_tesseract_languages():
"""
get list of lang
"""
tess_exec = settings.get('tesseract-ocr:executable')
if not tess_exec:
tess_exec = 'tesseract'
try:
poTess = Popen([tess_exec, '--list-langs'],
shell=False, stdout=PIPE, stderr=PIPE)
stdout_message, lTess = poTess.communicate()
# we need to remove not needed information e.g. OpenCL performamnce
if isinstance(lTess, bytes):
lTess = str(lTess, 'utf-8')
out_split = lTess.split('\n')
langlist = list()
add_lang = False
for row in out_split:
if row.startswith('List of'):
add_lang = True
if add_lang:
langlist.append(row.strip())
if langlist:
return langlist
else:
return get_tesseract_languages_old()
except OSError as ex:
print("ex", ex)
return None
def get_tesseract_languages_old():
"""
make a list of installed language data files
"""
tess_exec = settings.get('tesseract-ocr:executable')
if not tess_exec:
tess_exec = 'tesseract'
try:
poTess = Popen([tess_exec, 'a', 'a', '-l', 'invalid'],
shell=False, stdout=PIPE, stderr=PIPE)
stdout_message, lTess = poTess.communicate()
tess_data_prefix, langdata_ext = \
extract_tesseract_languages_path(lTess)
except OSError as ex:
print("ex", ex)
return None
# env. setting can help to handle path with spaces
tess_data_prefix = settings.get('tesseract-ocr:TESSDATA_PREFIX:')
if not tess_data_prefix:
tess_data_prefix = os.getenv('TESSDATA_PREFIX')
tessdata_path = os.path.join(tess_data_prefix, "tessdata")
if not os.path.exists(tessdata_path):
print("Tesseract data path ('%s') do not exist!" % tessdata_path)
return None
langdata = glob(tess_data_prefix + os.path.sep + '*' + langdata_ext)
return [os.path.splitext(os.path.split(uc)[1])[0] for uc in langdata]
def get_spellchecker_languages(directory = None):
"""
Check if spellchecker is installed and provide list of languages
"""
try:
import enchant
if (directory):
enchant.set_param("enchant.myspell.dictionary.path", directory)
langs = enchant.list_languages()
return sorted(langs)
except:
print("can not start spellchecker!!!")
import traceback
traceback.print_exc()
return None
# for testing module funcionality
def main():
""" Main loop to run test
"""
# langs = get_spellchecker_languages()
langs = get_tesseract_languages()
if langs:
print('Found languages:', langs)
else:
print('No lang found!')
return
if __name__ == '__main__':
main()
| gpl-2.0 | -483,635,185,359,244,200 | 27.433333 | 127 | 0.609144 | false |
bobbymckinney/hot_press_monitor | program_hotpress/EnhancedStatusBar.py | 12 | 10680 | # --------------------------------------------------------------------------- #
# ENHANCEDSTATUSBAR wxPython IMPLEMENTATION
# Python Code By:
#
# Andrea Gavana, @ 31 May 2005
# Nitro, @ 21 September 2005
# Latest Revision: 21 September 2005, 19.57.20 GMT+2
# Latest Revision before Latest Revision: 21 September 2005, 18.29.35 GMT+2
# Latest Revision before Latest Revision before Latest Revision: 31 May 2005, 23.17 CET
#
#
# TODO List/Caveats
#
# 1. Some Requests/Features To Add?
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# [email protected]
# [email protected]
#
# Or, Obviously, To The wxPython Mailing List!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------- #
""" Description:
EnhancedStatusBar Is A Slight Modification (Actually A Subclassing) Of wx.StatusBar.
It Allows You To Add Almost Any Widget You Like To The wx.StatusBar Of Your Main
Frame Application And Also To Layout Them Properly.
What It Can Do:
1) Almost All The Functionalities Of wx.StatusBar Are Still Present;
2) You Can Add Different Kind Of Widgets Into Every Field Of The EnhancedStatusBar;
3) The AddWidget() Methods Accepts 2 Layout Inputs:
- horizontalalignment: This Specify The Horizontal Alignment For Your Widget,
And Can Be ESB_EXACT_FIT, ESB_ALIGN_CENTER_HORIZONTAL, ESB_ALIGN_LEFT And
ESB_ALIGN_RIGHT;
- varticalalignment: This Specify The Vertical Alignment For Your Widget,
And Can Be ESB_EXACT_FIT, ESB_ALIGN_CENTER_VERTICAL, ESB_ALIGN_BOTTOM And
ESB_ALIGN_LEFT;
EnhancedStatusBar Is Freeware And Distributed Under The wxPython License.
Latest Revision: 21 September 2005, 19.57.20 GMT+2
Latest Revision before Latest Revision: 21 September 2005, 18.29.35 GMT+2
Latest Revision before Latest Revision before Latest Revision: 31 May 2005, 23.17 CET
"""
import wx
# Horizontal Alignment Constants
ESB_ALIGN_CENTER_VERTICAL = 1
ESB_ALIGN_TOP = 2
ESB_ALIGN_BOTTOM = 3
# Vertical Alignment Constants
ESB_ALIGN_CENTER_HORIZONTAL = 11
ESB_ALIGN_LEFT = 12
ESB_ALIGN_RIGHT = 13
# Exact Fit (Either Horizontal Or Vertical Or Both) Constant
ESB_EXACT_FIT = 20
# ---------------------------------------------------------------
# Class EnhancedStatusBar
# ---------------------------------------------------------------
# This Is The Main Class Implementation. See The Demo For Details
# ---------------------------------------------------------------
class EnhancedStatusBarItem(object):
def __init__(self, widget, pos, horizontalalignment=ESB_ALIGN_CENTER_HORIZONTAL, verticalalignment=ESB_ALIGN_CENTER_VERTICAL):
self.__dict__.update( locals() )
class EnhancedStatusBar(wx.StatusBar):
def __init__(self, parent, id=wx.ID_ANY, style=wx.ST_SIZEGRIP,
name="EnhancedStatusBar"):
"""Default Class Constructor.
EnhancedStatusBar.__init__(self, parent, id=wx.ID_ANY,
style=wx.ST_SIZEGRIP,
name="EnhancedStatusBar")
"""
wx.StatusBar.__init__(self, parent, id, style, name)
self._items = {}
self._curPos = 0
self._parent = parent
wx.EVT_SIZE(self, self.OnSize)
wx.CallAfter(self.OnSize, None)
def OnSize(self, event):
"""Handles The wx.EVT_SIZE Events For The StatusBar.
Actually, All The Calculations Linked To HorizontalAlignment And
VerticalAlignment Are Done In This Function."""
for pos, item in self._items.items():
widget, horizontalalignment, verticalalignment = item.widget, item.horizontalalignment, item.verticalalignment
rect = self.GetFieldRect(pos)
widgetpos = widget.GetPosition()
widgetsize = widget.GetSize()
rect = self.GetFieldRect(pos)
if horizontalalignment == ESB_EXACT_FIT:
if verticalalignment == ESB_EXACT_FIT:
widget.SetSize((rect.width-2, rect.height-2))
widget.SetPosition((rect.x-1, rect.y-1))
elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:
if widgetsize[1] < rect.width - 1:
diffs = (rect.height - widgetsize[1])/2
widget.SetSize((rect.width-2, widgetsize[1]))
widget.SetPosition((rect.x-1, rect.y+diffs))
else:
widget.SetSize((rect.width-2, widgetsize[1]))
widget.SetPosition((rect.x-1, rect.y-1))
elif verticalalignment == ESB_ALIGN_TOP:
widget.SetSize((rect.width-2, widgetsize[1]))
widget.SetPosition((rect.x-1, rect.y))
elif verticalalignment == ESB_ALIGN_BOTTOM:
widget.SetSize((rect.width-2, widgetsize[1]))
widget.SetPosition((rect.x-1, rect.height-widgetsize[1]))
elif horizontalalignment == ESB_ALIGN_LEFT:
xpos = rect.x - 1
if verticalalignment == ESB_EXACT_FIT:
widget.SetSize((widgetsize[0], rect.height-2))
widget.SetPosition((xpos, rect.y-1))
elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:
if widgetsize[1] < rect.height - 1:
diffs = (rect.height - widgetsize[1])/2
widget.SetPosition((xpos, rect.y+diffs))
else:
widget.SetSize((widgetsize[0], rect.height-2))
widget.SetPosition((xpos, rect.y-1))
elif verticalalignment == ESB_ALIGN_TOP:
widget.SetPosition((xpos, rect.y))
elif verticalalignment == ESB_ALIGN_BOTTOM:
widget.SetPosition((xpos, rect.height-widgetsize[1]))
elif horizontalalignment == ESB_ALIGN_RIGHT:
xpos = rect.x + rect.width - widgetsize[0] - 1
if verticalalignment == ESB_EXACT_FIT:
widget.SetSize((widgetsize[0], rect.height-2))
widget.SetPosition((xpos, rect.y-1))
elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:
if widgetsize[1] < rect.height - 1:
diffs = (rect.height - widgetsize[1])/2
widget.SetPosition((xpos, rect.y+diffs))
else:
widget.SetSize((widgetsize[0], rect.height-2))
widget.SetPosition((xpos, rect.y-1))
elif verticalalignment == ESB_ALIGN_TOP:
widget.SetPosition((xpos, rect.y))
elif verticalalignment == ESB_ALIGN_BOTTOM:
widget.SetPosition((xpos, rect.height-widgetsize[1]))
elif horizontalalignment == ESB_ALIGN_CENTER_HORIZONTAL:
xpos = rect.x + (rect.width - widgetsize[0])/2 - 1
if verticalalignment == ESB_EXACT_FIT:
widget.SetSize((widgetsize[0], rect.height))
widget.SetPosition((xpos, rect.y))
elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:
if widgetsize[1] < rect.height - 1:
diffs = (rect.height - widgetsize[1])/2
widget.SetPosition((xpos, rect.y+diffs))
else:
widget.SetSize((widgetsize[0], rect.height-1))
widget.SetPosition((xpos, rect.y+1))
elif verticalalignment == ESB_ALIGN_TOP:
widget.SetPosition((xpos, rect.y))
elif verticalalignment == ESB_ALIGN_BOTTOM:
widget.SetPosition((xpos, rect.height-widgetsize[1]))
if event is not None:
event.Skip()
def AddWidget(self, widget, horizontalalignment=ESB_ALIGN_CENTER_HORIZONTAL,
verticalalignment=ESB_ALIGN_CENTER_VERTICAL, pos = -1):
"""Add A Widget To The EnhancedStatusBar.
Parameters:
- horizontalalignment: This Can Be One Of:
a) ESB_EXACT_FIT: The Widget Will Fit Horizontally The StatusBar Field Width;
b) ESB_ALIGN_CENTER_HORIZONTAL: The Widget Will Be Centered Horizontally In
The StatusBar Field;
c) ESB_ALIGN_LEFT: The Widget Will Be Left Aligned In The StatusBar Field;
d) ESB_ALIGN_RIGHT: The Widget Will Be Right Aligned In The StatusBar Field;
- verticalalignment:
a) ESB_EXACT_FIT: The Widget Will Fit Vertically The StatusBar Field Height;
b) ESB_ALIGN_CENTER_VERTICAL: The Widget Will Be Centered Vertically In
The StatusBar Field;
c) ESB_ALIGN_BOTTOM: The Widget Will Be Bottom Aligned In The StatusBar Field;
d) ESB_ALIGN_TOP: The Widget Will Be TOP Aligned In The StatusBar Field;
"""
if pos == -1:
pos = self._curPos
self._curPos += 1
if self.GetFieldsCount() <= pos:
raise "\nERROR: EnhancedStatusBar has a max of %d items, you tried to set item #%d" % (self.GetFieldsCount(), pos)
if horizontalalignment not in [ESB_ALIGN_CENTER_HORIZONTAL, ESB_EXACT_FIT,
ESB_ALIGN_LEFT, ESB_ALIGN_RIGHT]:
raise '\nERROR: Parameter "horizontalalignment" Should Be One Of '\
'"ESB_ALIGN_CENTER_HORIZONTAL", "ESB_ALIGN_LEFT", "ESB_ALIGN_RIGHT"' \
'"ESB_EXACT_FIT"'
if verticalalignment not in [ESB_ALIGN_CENTER_VERTICAL, ESB_EXACT_FIT,
ESB_ALIGN_TOP, ESB_ALIGN_BOTTOM]:
raise '\nERROR: Parameter "verticalalignment" Should Be One Of '\
'"ESB_ALIGN_CENTER_VERTICAL", "ESB_ALIGN_TOP", "ESB_ALIGN_BOTTOM"' \
'"ESB_EXACT_FIT"'
try:
self.RemoveChild(self._items[pos].widget)
self._items[pos].widget.Destroy()
except KeyError: pass
self._items[pos] = EnhancedStatusBarItem(widget, pos, horizontalalignment, verticalalignment)
wx.CallAfter(self.OnSize, None)
| gpl-3.0 | -77,100,964,562,159,200 | 41.591837 | 130 | 0.552809 | false |
arannasousa/pagseguro_xml | exemplos/testes_consultas.py | 1 | 2403 | # coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor: Arannã Sousa Santos
# Mês: 12
# Ano: 2015
# Projeto: pagseguro_xml
# e-mail: [email protected]
# ---------------------------------------------------------------
import logging
from pagseguro_xml.consultas import ApiPagSeguroConsulta_v3, CONST_v3
logger = logging.basicConfig(level=logging.DEBUG)
PAGSEGURO_API_AMBIENTE = u'sandbox'
PAGSEGURO_API_EMAIL = u'[email protected]'
PAGSEGURO_API_TOKEN_PRODUCAO = u''
PAGSEGURO_API_TOKEN_SANDBOX = u''
api = ApiPagSeguroConsulta_v3(ambiente=CONST_v3.AMBIENTE.SANDBOX)
PAGSEGURO_API_TOKEN = PAGSEGURO_API_TOKEN_SANDBOX
CHAVE_TRANSACAO = u'' # primeiro teste
# CHAVE_TRANSACAO = u'' # segundo teste
def exemploDetalhes():
# retorno pode ser uma mensagem ou a Classe que representa
# os Detalhes [pagseguro_xml.consultas.classes.ClasseTransacaoDetalhes]
ok, retorno = api.detalhes_v3(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, CHAVE_TRANSACAO)
if ok:
print u'-' * 50
print retorno.xml
print u'-' * 50
for a in retorno.alertas:
print a
else:
print u'Motivo do erro:', retorno
def exemploHistorico():
from datetime import datetime
inicial = datetime(2015, 12, 9)
final = datetime(2015, 12, 12)
ok, retorno = api.historico_v2(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, inicial, final)
if ok:
print u'-' * 50
print retorno.xml
print u'-' * 50
for i, transacao in enumerate(retorno.transactions, start=1):
print transacao.xml
for a in retorno.alertas:
print a
else:
print u'Motivo do erro:', retorno
def exemploAbandonadas():
from datetime import datetime
inicial = datetime(2015, 12, 9)
final = datetime(2015, 12, 12)
ok, retorno = api.abandonadas_v2(PAGSEGURO_API_EMAIL, PAGSEGURO_API_TOKEN, inicial, final)
if ok:
print u'-' * 50
print retorno.xml
print u'-' * 50
for transacao in retorno.transactions:
print transacao.xml
for a in retorno.alertas:
print a
else:
print u'Motivo do erro:', retorno
print u'#' * 50
exemploDetalhes()
print u'*' * 50
exemploHistorico()
print u'*' * 50
exemploAbandonadas()
print u'#' * 50 | gpl-2.0 | -3,923,947,067,807,223,000 | 22.782178 | 94 | 0.601416 | false |
zhimin711/nova | nova/objects/compute_node.py | 2 | 19033 | # Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
import nova.conf
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import pci_device_pool
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@base.NovaObjectRegistry.register
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_service_id()
# Version 1.2: String attributes updated to support unicode
# Version 1.3: Added stats field
# Version 1.4: Added host ip field
# Version 1.5: Added numa_topology field
# Version 1.6: Added supported_hv_specs
# Version 1.7: Added host field
# Version 1.8: Added get_by_host_and_nodename()
# Version 1.9: Added pci_device_pools
# Version 1.10: Added get_first_node_by_host_for_old_compat()
# Version 1.11: PciDevicePoolList version 1.1
# Version 1.12: HVSpec version 1.1
# Version 1.13: Changed service_id field to be nullable
# Version 1.14: Added cpu_allocation_ratio and ram_allocation_ratio
# Version 1.15: Added uuid
# Version 1.16: Added disk_allocation_ratio
VERSION = '1.16'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(read_only=True),
'service_id': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'vcpus': fields.IntegerField(),
'memory_mb': fields.IntegerField(),
'local_gb': fields.IntegerField(),
'vcpus_used': fields.IntegerField(),
'memory_mb_used': fields.IntegerField(),
'local_gb_used': fields.IntegerField(),
'hypervisor_type': fields.StringField(),
'hypervisor_version': fields.IntegerField(),
'hypervisor_hostname': fields.StringField(nullable=True),
'free_ram_mb': fields.IntegerField(nullable=True),
'free_disk_gb': fields.IntegerField(nullable=True),
'current_workload': fields.IntegerField(nullable=True),
'running_vms': fields.IntegerField(nullable=True),
# TODO(melwitt): cpu_info is non-nullable in the schema but we must
# wait until version 2.0 of ComputeNode to change it to non-nullable
'cpu_info': fields.StringField(nullable=True),
'disk_available_least': fields.IntegerField(nullable=True),
'metrics': fields.StringField(nullable=True),
'stats': fields.DictOfNullableStringsField(nullable=True),
'host_ip': fields.IPAddressField(nullable=True),
# TODO(rlrossit): because of history, numa_topology is held here as a
# StringField, not a NUMATopology object. In version 2 of ComputeNode
# this will be converted over to a fields.ObjectField('NUMATopology')
'numa_topology': fields.StringField(nullable=True),
# NOTE(pmurray): the supported_hv_specs field maps to the
# supported_instances field in the database
'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
# NOTE(pmurray): the pci_device_pools field maps to the
# pci_stats field in the database
'pci_device_pools': fields.ObjectField('PciDevicePoolList',
nullable=True),
'cpu_allocation_ratio': fields.FloatField(),
'ram_allocation_ratio': fields.FloatField(),
'disk_allocation_ratio': fields.FloatField(),
}
def obj_make_compatible(self, primitive, target_version):
super(ComputeNode, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 16):
if 'disk_allocation_ratio' in primitive:
del primitive['disk_allocation_ratio']
if target_version < (1, 15):
if 'uuid' in primitive:
del primitive['uuid']
if target_version < (1, 14):
if 'ram_allocation_ratio' in primitive:
del primitive['ram_allocation_ratio']
if 'cpu_allocation_ratio' in primitive:
del primitive['cpu_allocation_ratio']
if target_version < (1, 13) and primitive.get('service_id') is None:
# service_id is non-nullable in versions before 1.13
try:
service = objects.Service.get_by_compute_host(
self._context, primitive['host'])
primitive['service_id'] = service.id
except (exception.ComputeHostNotFound, KeyError):
# NOTE(hanlind): In case anything goes wrong like service not
# found or host not being set, catch and set a fake value just
# to allow for older versions that demand a value to work.
# Setting to -1 will, if value is later used result in a
# ServiceNotFound, so should be safe.
primitive['service_id'] = -1
if target_version < (1, 7) and 'host' in primitive:
del primitive['host']
if target_version < (1, 5) and 'numa_topology' in primitive:
del primitive['numa_topology']
if target_version < (1, 4) and 'host_ip' in primitive:
del primitive['host_ip']
if target_version < (1, 3) and 'stats' in primitive:
# pre 1.3 version does not have a stats field
del primitive['stats']
@staticmethod
def _host_from_db_object(compute, db_compute):
if (('host' not in db_compute or db_compute['host'] is None)
and 'service_id' in db_compute
and db_compute['service_id'] is not None):
# FIXME(sbauza) : Unconverted compute record, provide compatibility
# This has to stay until we can be sure that any/all compute nodes
# in the database have been converted to use the host field
# Service field of ComputeNode could be deprecated in a next patch,
# so let's use directly the Service object
try:
service = objects.Service.get_by_id(
compute._context, db_compute['service_id'])
except exception.ServiceNotFound:
compute.host = None
return
try:
compute.host = service.host
except (AttributeError, exception.OrphanedObjectError):
# Host can be nullable in Service
compute.host = None
elif 'host' in db_compute and db_compute['host'] is not None:
# New-style DB having host as a field
compute.host = db_compute['host']
else:
# We assume it should not happen but in case, let's set it to None
compute.host = None
@staticmethod
def _from_db_object(context, compute, db_compute):
special_cases = set([
'stats',
'supported_hv_specs',
'host',
'pci_device_pools',
'uuid',
])
fields = set(compute.fields) - special_cases
for key in fields:
value = db_compute[key]
# NOTE(sbauza): Since all compute nodes don't possibly run the
# latest RT code updating allocation ratios, we need to provide
# a backwards compatible way of hydrating them.
# As we want to care about our operators and since we don't want to
# ask them to change their configuration files before upgrading, we
# prefer to hardcode the default values for the ratios here until
# the next release (Newton) where the opt default values will be
# restored for both cpu (16.0), ram (1.5) and disk (1.0)
# allocation ratios.
# TODO(sbauza): Remove that in the next major version bump where
# we break compatibilility with old Liberty computes
if (key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio'
or key == 'disk_allocation_ratio'):
if value == 0.0:
# Operator has not yet provided a new value for that ratio
# on the compute node
value = None
if value is None:
# ResourceTracker is not updating the value (old node)
# or the compute node is updated but the default value has
# not been changed
value = getattr(CONF, key)
if value == 0.0 and key == 'cpu_allocation_ratio':
# It's not specified either on the controller
value = 16.0
if value == 0.0 and key == 'ram_allocation_ratio':
# It's not specified either on the controller
value = 1.5
if value == 0.0 and key == 'disk_allocation_ratio':
# It's not specified either on the controller
value = 1.0
setattr(compute, key, value)
stats = db_compute['stats']
if stats:
compute.stats = jsonutils.loads(stats)
sup_insts = db_compute.get('supported_instances')
if sup_insts:
hv_specs = jsonutils.loads(sup_insts)
hv_specs = [objects.HVSpec.from_list(hv_spec)
for hv_spec in hv_specs]
compute.supported_hv_specs = hv_specs
pci_stats = db_compute.get('pci_stats')
if pci_stats is not None:
pci_stats = pci_device_pool.from_pci_stats(pci_stats)
compute.pci_device_pools = pci_stats
compute._context = context
# Make sure that we correctly set the host field depending on either
# host column is present in the table or not
compute._host_from_db_object(compute, db_compute)
# NOTE(danms): Remove this conditional load (and remove uuid from
# the list of special_cases above) once we're in Newton and have
# enforced that all UUIDs in the database are not NULL.
if db_compute.get('uuid'):
compute.uuid = db_compute['uuid']
compute.obj_reset_changes()
# NOTE(danms): This needs to come after obj_reset_changes() to make
# sure we only save the uuid, if we generate one.
# FIXME(danms): Remove this in Newton once we have enforced that
# all compute nodes have uuids set in the database.
if 'uuid' not in compute:
compute.uuid = uuidutils.generate_uuid()
LOG.debug('Generated UUID %(uuid)s for compute node %(id)i',
dict(uuid=compute.uuid, id=compute.id))
compute.save()
return compute
@base.remotable_classmethod
def get_by_id(cls, context, compute_id):
db_compute = db.compute_node_get(context, compute_id)
return cls._from_db_object(context, cls(), db_compute)
# NOTE(hanlind): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def get_by_service_id(cls, context, service_id):
db_computes = db.compute_nodes_get_by_service_id(context, service_id)
# NOTE(sbauza): Old version was returning an item, we need to keep this
# behaviour for backwards compatibility
db_compute = db_computes[0]
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_host_and_nodename(cls, context, host, nodename):
db_compute = db.compute_node_get_by_host_and_nodename(
context, host, nodename)
return cls._from_db_object(context, cls(), db_compute)
# TODO(pkholkin): Remove this method in the next major version bump
@base.remotable_classmethod
def get_first_node_by_host_for_old_compat(cls, context, host,
use_slave=False):
computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
# FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple
# nodes per host, we should return all the nodes and modify the callers
# instead.
# Arbitrarily returning the first node.
return computes[0]
@staticmethod
def _convert_stats_to_db_format(updates):
stats = updates.pop('stats', None)
if stats is not None:
updates['stats'] = jsonutils.dumps(stats)
@staticmethod
def _convert_host_ip_to_db_format(updates):
host_ip = updates.pop('host_ip', None)
if host_ip:
updates['host_ip'] = str(host_ip)
@staticmethod
def _convert_supported_instances_to_db_format(updates):
hv_specs = updates.pop('supported_hv_specs', None)
if hv_specs is not None:
hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
updates['supported_instances'] = jsonutils.dumps(hv_specs)
@staticmethod
def _convert_pci_stats_to_db_format(updates):
if 'pci_device_pools' in updates:
pools = updates.pop('pci_device_pools')
if pools is not None:
pools = jsonutils.dumps(pools.obj_to_primitive())
updates['pci_stats'] = pools
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_create(self._context, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def save(self, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, no longer relevant
updates = self.obj_get_changes()
updates.pop('id', None)
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def destroy(self):
db.compute_node_delete(self._context, self.id)
def update_from_virt_driver(self, resources):
# NOTE(pmurray): the virt driver provides a dict of values that
# can be copied into the compute node. The names and representation
# do not exactly match.
# TODO(pmurray): the resources dict should be formalized.
keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology", "hypervisor_type",
"hypervisor_version", "hypervisor_hostname",
"disk_available_least", "host_ip"]
for key in keys:
if key in resources:
setattr(self, key, resources[key])
# supported_instances has a different name in compute_node
if 'supported_instances' in resources:
si = resources['supported_instances']
self.supported_hv_specs = [objects.HVSpec.from_list(s) for s in si]
@base.NovaObjectRegistry.register
class ComputeNodeList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# ComputeNode <= version 1.2
# Version 1.1 ComputeNode version 1.3
# Version 1.2 Add get_by_service()
# Version 1.3 ComputeNode version 1.4
# Version 1.4 ComputeNode version 1.5
# Version 1.5 Add use_slave to get_by_service
# Version 1.6 ComputeNode version 1.6
# Version 1.7 ComputeNode version 1.7
# Version 1.8 ComputeNode version 1.8 + add get_all_by_host()
# Version 1.9 ComputeNode version 1.9
# Version 1.10 ComputeNode version 1.10
# Version 1.11 ComputeNode version 1.11
# Version 1.12 ComputeNode version 1.12
# Version 1.13 ComputeNode version 1.13
# Version 1.14 ComputeNode version 1.14
VERSION = '1.14'
fields = {
'objects': fields.ListOfObjectsField('ComputeNode'),
}
@base.remotable_classmethod
def get_all(cls, context):
db_computes = db.compute_node_get_all(context)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def get_by_hypervisor(cls, context, hypervisor_match):
db_computes = db.compute_node_search_by_hypervisor(context,
hypervisor_match)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
# NOTE(hanlind): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def _get_by_service(cls, context, service_id, use_slave=False):
try:
db_computes = db.compute_nodes_get_by_service_id(
context, service_id)
except exception.ServiceNotFound:
# NOTE(sbauza): Previous behaviour was returning an empty list
# if the service was created with no computes, we need to keep it.
db_computes = []
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@staticmethod
@db.select_db_reader_mode
def _db_compute_node_get_all_by_host(context, host, use_slave=False):
return db.compute_node_get_all_by_host(context, host)
@base.remotable_classmethod
def get_all_by_host(cls, context, host, use_slave=False):
db_computes = cls._db_compute_node_get_all_by_host(context, host,
use_slave=use_slave)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
| apache-2.0 | -3,126,293,330,617,601,500 | 44.209026 | 79 | 0.612568 | false |
liangjisheng/Data-Struct | books/algorithmicGraphics/chapter7/01_dijkstras_algorithm.py | 1 | 2301 | #!/usr/bin/python3
# the graph
graph = {}
graph["start"] = {}
graph["start"]["a"] = 6
graph["start"]["b"] = 2
graph["a"] = {}
graph["a"]["fin"] = 1
graph["b"] = {}
graph["b"]["a"] = 3
graph["b"]["fin"] = 5
graph["fin"] = {}
# print(graph)
# the costs table
infinity = float("inf")
costs = {}
costs["a"] = 6
costs["b"] = 2
costs["fin"] = infinity
# the parents table
parents = {}
parents["a"] = "start"
parents["b"] = "start"
parents["fin"] = None
processed = []
def find_lowest_cost_node(costs):
lowest_cost = float("inf")
lowest_cost_node = None
# Go through each node.
# 遍历所有节点
for node in costs:
cost = costs[node]
# If it's the lowest cost so far and hasn't been processed yet...
# 如果当前节点的开销更低且未处理过
if cost < lowest_cost and node not in processed:
# ... set it as the new lowest-cost node.
# 就将其视为开销最低的节点
lowest_cost = cost
lowest_cost_node = node
return lowest_cost_node
# Find the lowest-cost node that you haven't processed yet.
# 在未处理的节点中找出开销最小的节点
node = find_lowest_cost_node(costs)
# If you've processed all the nodes, this while loop is done.
# 这个while循环在所有节点都被处理后结束
while node is not None:
cost = costs[node]
# Go through all the neighbors of this node.
# 遍历当前节点的所有邻居
neighbors = graph[node]
for n in neighbors.keys():
new_cost = cost + neighbors[n]
# If it's cheaper to get to this neighbor by going through this node...
# 如果经当前节点前往该邻居更近
if costs[n] > new_cost:
# ... update the cost for this node.
# 更新该邻居的开销
costs[n] = new_cost
# This node becomes the new parent for this neighbor.
# 同时该邻居的父节点设置为当前节点
parents[n] = node
# Mark the node as processed.
# 将当前节点标记为处理过
processed.append(node)
# Find the next node to process, and loop.
# 找出接下来要处理的节点,并循环
node = find_lowest_cost_node(costs)
print("Cost from the start to each node:")
print(costs)
| gpl-2.0 | -89,505,241,438,024,210 | 24.2125 | 79 | 0.599405 | false |
naparuba/kunai | data/global-configuration/packs/mongodb/collectors/pymongo/ismaster.py | 19 | 4227 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse a response to the 'ismaster' command."""
import itertools
from bson.py3compat import imap
from pymongo import common
from pymongo.server_type import SERVER_TYPE
def _get_server_type(doc):
"""Determine the server type from an ismaster response."""
if not doc.get('ok'):
return SERVER_TYPE.Unknown
if doc.get('isreplicaset'):
return SERVER_TYPE.RSGhost
elif doc.get('setName'):
if doc.get('hidden'):
return SERVER_TYPE.RSOther
elif doc.get('ismaster'):
return SERVER_TYPE.RSPrimary
elif doc.get('secondary'):
return SERVER_TYPE.RSSecondary
elif doc.get('arbiterOnly'):
return SERVER_TYPE.RSArbiter
else:
return SERVER_TYPE.RSOther
elif doc.get('msg') == 'isdbgrid':
return SERVER_TYPE.Mongos
else:
return SERVER_TYPE.Standalone
class IsMaster(object):
__slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable')
def __init__(self, doc):
"""Parse an ismaster response from the server."""
self._server_type = _get_server_type(doc)
self._doc = doc
self._is_writable = self._server_type in (
SERVER_TYPE.RSPrimary,
SERVER_TYPE.Standalone,
SERVER_TYPE.Mongos)
self._is_readable = (
self.server_type == SERVER_TYPE.RSSecondary
or self._is_writable)
@property
def document(self):
"""The complete ismaster command response document.
.. versionadded:: 3.4
"""
return self._doc.copy()
@property
def server_type(self):
return self._server_type
@property
def all_hosts(self):
"""List of hosts, passives, and arbiters known to this server."""
return set(imap(common.clean_node, itertools.chain(
self._doc.get('hosts', []),
self._doc.get('passives', []),
self._doc.get('arbiters', []))))
@property
def tags(self):
"""Replica set member tags or empty dict."""
return self._doc.get('tags', {})
@property
def primary(self):
"""This server's opinion about who the primary is, or None."""
if self._doc.get('primary'):
return common.partition_node(self._doc['primary'])
else:
return None
@property
def replica_set_name(self):
"""Replica set name or None."""
return self._doc.get('setName')
@property
def max_bson_size(self):
return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE)
@property
def max_message_size(self):
return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size)
@property
def max_write_batch_size(self):
return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE)
@property
def min_wire_version(self):
return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION)
@property
def max_wire_version(self):
return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION)
@property
def set_version(self):
return self._doc.get('setVersion')
@property
def election_id(self):
return self._doc.get('electionId')
@property
def is_writable(self):
return self._is_writable
@property
def is_readable(self):
return self._is_readable
@property
def me(self):
me = self._doc.get('me')
if me:
return common.clean_node(me)
@property
def last_write_date(self):
return self._doc.get('lastWrite', {}).get('lastWriteDate')
| mit | -7,159,681,268,196,811,000 | 27.952055 | 78 | 0.617932 | false |
fbradyirl/home-assistant | tests/components/homekit_controller/specific_devices/test_lennox_e30.py | 4 | 1534 | """
Regression tests for Aqara Gateway V3.
https://github.com/home-assistant/home-assistant/issues/20885
"""
from homeassistant.components.climate.const import SUPPORT_TARGET_TEMPERATURE
from tests.components.homekit_controller.common import (
setup_accessories_from_file,
setup_test_accessories,
Helper,
)
async def test_lennox_e30_setup(hass):
"""Test that a Lennox E30 can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "lennox_e30.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
climate = entity_registry.async_get("climate.lennox")
assert climate.unique_id == "homekit-XXXXXXXX-100"
climate_helper = Helper(
hass, "climate.lennox", pairing, accessories[0], config_entry
)
climate_state = await climate_helper.poll_and_get_state()
assert climate_state.attributes["friendly_name"] == "Lennox"
assert climate_state.attributes["supported_features"] == (
SUPPORT_TARGET_TEMPERATURE
)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(climate.device_id)
assert device.manufacturer == "Lennox"
assert device.name == "Lennox"
assert device.model == "E30 2B"
assert device.sw_version == "3.40.XX"
# The fixture contains a single accessory - so its a single device
# and no bridge
assert device.via_device_id is None
| apache-2.0 | 1,703,705,301,467,829,800 | 33.863636 | 77 | 0.715776 | false |
tudorbarascu/QGIS | python/plugins/processing/algs/grass7/ext/r_mask_vect.py | 36 | 1842 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_mask_vect.py
--------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
from processing.algs.grass7.Grass7Utils import Grass7Utils
def processCommand(alg, parameters, context, feedback):
# Remove input
alg.removeParameter('input')
alg.processCommand(parameters, context, feedback, True)
def processOutputs(alg, parameters, context, feedback):
createOpt = alg.parameterAsString(parameters, alg.GRASS_RASTER_FORMAT_OPT, context)
metaOpt = alg.parameterAsString(parameters, alg.GRASS_RASTER_FORMAT_META, context)
# We need to export the raster with all its bands and its color table
fileName = alg.parameterAsOutputLayer(parameters, 'output', context)
outFormat = Grass7Utils.getRasterFormatFromFilename(fileName)
grassName = alg.exportedLayers['input']
alg.exportRasterLayer(grassName, fileName, True,
outFormat, createOpt, metaOpt)
| gpl-2.0 | -3,413,943,310,125,184,500 | 42.714286 | 87 | 0.527233 | false |
winiceo/weibo | test/timer.py | 4 | 1181 | #!/usr/bin/env python
# coding: utf-8
# Copyright(c) 2013
# Gmail:liuzheng712
#
import sched
import time
import persion
import loginweibo as Lwb
import jiexi
import file
def write(session, who):
page, who = Lwb.getPage(session, 'http://weibo.com/' + who)
WB_text, WB_time, WB_comefrom, WB_like, WB_pinlun, WB_forward, WB_mid = jiexi.detail(page)
for i in range(0, len(WB_text)):
file.inputweibo(who, WB_text[i], WB_time[i], WB_mid[i],
WB_comefrom[i], WB_like[i], WB_forward[i], WB_pinlun[i])
print time.time() , ':Success!:' , who
if len(persion.usename) == 0:
username, passwd = Lwb.Login()
else:
username = persion.usename
passwd = persion.passwd
session = Lwb.getCookies(username, passwd)
scheduler = sched.scheduler(time.time, time.sleep)
#print time.time
#print time.sleep
def print_event(name):
print 'EVENT:', time.time(), name
print 'START:', time.time()
ff = open(file.Wpath + '/' + 'waitlist', 'r')
for i in range(1, 86400):
who = ff.readline().strip().lstrip()
if len(who) == 0:
continue
if who == 'end':
break
scheduler.enter(i, 1, write, (session, who))
scheduler.run()
| apache-2.0 | 450,907,969,305,715,800 | 23.102041 | 94 | 0.638442 | false |
Naville/WTFJH | Tools/WTFJHAnalyzer/introspy/HTMLReportGenerator.py | 4 | 2032 | import shutil
import os
class HTMLReportGenerator:
"""
Generates an HTML report given an analyzed Introspy DB.
"""
TRACED_CALLS_FILE_NAME = 'tracedCalls.js'
FINDINGS_FILE_NAME = 'findings.js'
API_GROUPS_FILE_NAME = 'apiGroups.js'
# TODO: merge the two templates and get rid of this
HTML_TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), 'html')
ANDROID_TEMPLATE = 'report-android.html'
IOS_TEMPLATE = 'report-ios.html'
FINAL_TEMPLATE = 'report.html'
def __init__(self, analyzedDB, androidDb):
self.analyzedDB = analyzedDB
self.androidDb = androidDb
def write_report_to_directory(self, outDir):
# Copy the HTML template
shutil.copytree(os.path.abspath(self.HTML_TEMPLATE_PATH), outDir)
if self.androidDb:
shutil.move(os.path.join(outDir, self.ANDROID_TEMPLATE),
os.path.join(outDir, self.FINAL_TEMPLATE))
# Remove the wrong template file
os.remove(os.path.join(outDir, self.IOS_TEMPLATE))
else:
shutil.move(os.path.join(outDir, self.IOS_TEMPLATE),
os.path.join(outDir, self.FINAL_TEMPLATE))
# Remove the wrong template file
os.remove(os.path.join(outDir, self.ANDROID_TEMPLATE))
# Copy the DB file
shutil.copy(self.analyzedDB.dbPath, outDir)
# Dump the traced calls
with open(os.path.join(outDir, self.TRACED_CALLS_FILE_NAME), 'w') as jsFile:
jsFile.write('var tracedCalls = ' + self.analyzedDB.get_traced_calls_as_JSON() + ';')
# Dump the findings
with open(os.path.join(outDir, self.FINDINGS_FILE_NAME), 'w') as jsFile:
jsFile.write( 'var findings = ' + self.analyzedDB.get_findings_as_JSON() + ';')
# Dump the API groups
with open(os.path.join(outDir, self.API_GROUPS_FILE_NAME), 'w') as jsFile:
jsFile.write('var apiGroups = ' + self.analyzedDB.get_API_groups_as_JSON() + ';')
| gpl-3.0 | 4,387,454,301,732,937,000 | 34.034483 | 97 | 0.618602 | false |
vishnu-kumar/PeformanceFramework | rally/common/db/sqlalchemy/types.py | 16 | 3020 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from sqlalchemy.dialects import mysql as mysql_types
from sqlalchemy.ext import mutable
from sqlalchemy import types as sa_types
from rally.common import costilius
class JSONEncodedDict(sa_types.TypeDecorator):
"""Represents an immutable structure as a json-encoded string."""
impl = sa_types.Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value, sort_keys=False)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = costilius.json_loads(
value, object_pairs_hook=costilius.OrderedDict)
return value
class BigJSONEncodedDict(JSONEncodedDict):
"""Represents an immutable structure as a json-encoded string.
MySql can store only 64kb in Text type, and for example in psql or
sqlite we are able to store more then 1GB. In some cases, like storing
results of task 64kb is not enough. So this type uses for MySql
LONGTEXT that allows us to store 4GiB.
"""
def load_dialect_impl(self, dialect):
if dialect.name == "mysql":
return dialect.type_descriptor(mysql_types.LONGTEXT)
else:
return dialect.type_descriptor(sa_types.Text)
class MutableDict(mutable.Mutable, dict):
@classmethod
def coerce(cls, key, value):
"""Convert plain dictionaries to MutableDict."""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return mutable.Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
class MutableJSONEncodedDict(JSONEncodedDict):
"""Represent a mutable structure as a json-encoded string."""
class BigMutableJSONEncodedDict(BigJSONEncodedDict):
"""Represent a big mutable structure as a json-encoded string."""
MutableDict.associate_with(MutableJSONEncodedDict)
MutableDict.associate_with(BigMutableJSONEncodedDict)
| apache-2.0 | 4,215,042,025,243,224,000 | 31.12766 | 78 | 0.683444 | false |
brunobergher/dotfiles | sublime/pygments/all/pygments/lexers/nimrod.py | 45 | 5105 | # -*- coding: utf-8 -*-
"""
pygments.lexers.nimrod
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Nimrod language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['NimrodLexer']
class NimrodLexer(RegexLexer):
"""
For `Nimrod <http://nimrod-code.org/>`_ source code.
.. versionadded:: 1.5
"""
name = 'Nimrod'
aliases = ['nimrod', 'nim']
filenames = ['*.nim', '*.nimrod']
mimetypes = ['text/x-nimrod']
flags = re.MULTILINE | re.IGNORECASE | re.UNICODE
def underscorize(words):
newWords = []
new = ""
for word in words:
for ch in word:
new += (ch + "_?")
newWords.append(new)
new = ""
return "|".join(newWords)
keywords = [
'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break',
'case', 'cast', 'const', 'continue', 'converter', 'discard',
'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally',
'for', 'generic', 'if', 'implies', 'in', 'yield',
'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method',
'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc',
'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try',
'tuple', 'type', 'when', 'while', 'with', 'without', 'xor'
]
keywordsPseudo = [
'nil', 'true', 'false'
]
opWords = [
'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
'notin', 'is', 'isnot'
]
types = [
'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
]
tokens = {
'root': [
(r'##.*$', String.Doc),
(r'#.*$', Comment),
(r'[*=><+\-/@$~&%!?|\\\[\]]', Operator),
(r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;',
Punctuation),
# Strings
(r'(?:[\w]+)"', String, 'rdqs'),
(r'"""', String, 'tdqs'),
('"', String, 'dqs'),
# Char
("'", String.Char, 'chars'),
# Keywords
(r'(%s)\b' % underscorize(opWords), Operator.Word),
(r'(p_?r_?o_?c_?\s)(?![(\[\]])', Keyword, 'funcname'),
(r'(%s)\b' % underscorize(keywords), Keyword),
(r'(%s)\b' % underscorize(['from', 'import', 'include']),
Keyword.Namespace),
(r'(v_?a_?r)\b', Keyword.Declaration),
(r'(%s)\b' % underscorize(types), Keyword.Type),
(r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
# Identifiers
(r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
# Numbers
(r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))',
Number.Float, ('float-suffix', 'float-number')),
(r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'),
(r'0b[01][01_]*', Number.Bin, 'int-suffix'),
(r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
(r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
# Whitespace
(r'\s+', Text),
(r'.+$', Error),
],
'chars': [
(r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape),
(r"'", String.Char, '#pop'),
(r".", String.Char)
],
'strings': [
(r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
(r'[^\\\'"$\n]+', String),
# quotes, dollars and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'\$', String)
# newlines are an error (use "nl" state)
],
'dqs': [
(r'\\([\\abcefnrtvl"\']|\n|x[a-f0-9]{2}|[0-9]{1,3})',
String.Escape),
(r'"', String, '#pop'),
include('strings')
],
'rdqs': [
(r'"(?!")', String, '#pop'),
(r'""', String.Escape),
include('strings')
],
'tdqs': [
(r'"""(?!")', String, '#pop'),
include('strings'),
include('nl')
],
'funcname': [
(r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
(r'`.+`', Name.Function, '#pop')
],
'nl': [
(r'\n', String)
],
'float-number': [
(r'\.(?!\.)[0-9_]*', Number.Float),
(r'e[+-]?[0-9][0-9_]*', Number.Float),
default('#pop')
],
'float-suffix': [
(r'\'f(32|64)', Number.Float),
default('#pop')
],
'int-suffix': [
(r'\'i(32|64)', Number.Integer.Long),
(r'\'i(8|16)', Number.Integer),
default('#pop')
],
}
| mit | -2,051,269,694,479,359,200 | 31.106918 | 80 | 0.408031 | false |
Uran198/coala | tests/processes/BearRunningTest.py | 23 | 11649 | import multiprocessing
import queue
import unittest
from coalib.bears.GlobalBear import GlobalBear
from coalib.bears.LocalBear import LocalBear
from coalib.processes.BearRunning import (
LOG_LEVEL, LogMessage, run, send_msg, task_done)
from coalib.processes.CONTROL_ELEMENT import CONTROL_ELEMENT
from coalib.results.Result import RESULT_SEVERITY, Result
from coalib.settings.Section import Section
class LocalTestBear(LocalBear):
def run(self, filename, file):
if filename == "file1":
raise Exception("Just to throw anything here.")
return [Result.from_values("LocalTestBear",
"something went wrong",
filename)]
class SimpleBear(LocalBear):
def run(self,
filename,
file,
*args,
dependency_results=None,
**kwargs):
return [Result.from_values("SimpleBear",
"something went wrong",
filename),
# This result should not be passed to DependentBear
Result.from_values("FakeBear",
"something went wrong",
filename),
Result.from_values("SimpleBear",
"another thing went wrong",
filename)]
class DependentBear(LocalBear):
def run(self,
filename,
file,
*args,
dependency_results=None,
**kwargs):
assert len(dependency_results["SimpleBear"]) == 2
@staticmethod
def get_dependencies():
return [SimpleBear]
class SimpleGlobalBear(GlobalBear):
def run(self,
*args,
dependency_results=None,
**kwargs):
return [Result("SimpleGlobalBear", "something went wrong"),
# This result should not be passed to DependentBear
Result("FakeBear", "something went wrong"),
Result("SimpleGlobalBear", "another thing went wrong")]
class DependentGlobalBear(GlobalBear):
def run(self,
*args,
dependency_results=None,
**kwargs):
assert len(dependency_results["SimpleGlobalBear"]) == 3
@staticmethod
def get_dependencies():
return [SimpleGlobalBear]
class GlobalTestBear(GlobalBear):
def run(self):
result = []
for file, contents in self.file_dict.items():
result.append(Result.from_values("GlobalTestBear",
"Files are bad in general!",
file,
severity=RESULT_SEVERITY.INFO))
return result
class EvilBear(LocalBear):
def execute(self, *args, **kwargs):
raise NotImplementedError
class UnexpectedBear1(LocalBear):
def run(self, filename, file):
return [1,
Result("UnexpectedBear1", "test result")]
class UnexpectedBear2(LocalBear):
def run(self, filename, file):
return 1
class BearRunningUnitTest(unittest.TestCase):
def setUp(self):
self.settings = Section("name")
self.file_name_queue = queue.Queue()
self.local_bear_list = []
self.global_bear_list = []
self.global_bear_queue = queue.Queue()
self.file_dict = {}
manager = multiprocessing.Manager()
self.local_result_dict = manager.dict()
self.global_result_dict = manager.dict()
self.message_queue = queue.Queue()
self.control_queue = queue.Queue()
def test_queue_done_marking(self):
self.message_queue.put("test")
task_done(self.message_queue) # Should make the queue joinable
self.message_queue.join()
task_done("test") # Should pass silently
def test_messaging(self):
send_msg(self.message_queue,
0,
LOG_LEVEL.DEBUG,
"test",
"messag",
delimiter="-",
end="e")
self.assertEqual(self.message_queue.get(),
LogMessage(LOG_LEVEL.DEBUG, "test-message"))
def test_dependencies(self):
self.local_bear_list.append(SimpleBear(self.settings,
self.message_queue))
self.local_bear_list.append(DependentBear(self.settings,
self.message_queue))
self.global_bear_list.append(SimpleGlobalBear({},
self.settings,
self.message_queue))
self.global_bear_list.append(DependentGlobalBear({},
self.settings,
self.message_queue))
self.global_bear_queue.put(1)
self.global_bear_queue.put(0)
self.file_name_queue.put("t")
self.file_dict["t"] = []
run(self.file_name_queue,
self.local_bear_list,
self.global_bear_list,
self.global_bear_queue,
self.file_dict,
self.local_result_dict,
self.global_result_dict,
self.message_queue,
self.control_queue)
try:
while True:
msg = self.message_queue.get(timeout=0)
self.assertEqual(msg.log_level, LOG_LEVEL.DEBUG)
except queue.Empty:
pass
def test_evil_bear(self):
self.local_bear_list.append(EvilBear(self.settings,
self.message_queue))
self.file_name_queue.put("t")
self.file_dict["t"] = []
run(self.file_name_queue,
self.local_bear_list,
self.global_bear_list,
self.global_bear_queue,
self.file_dict,
self.local_result_dict,
self.global_result_dict,
self.message_queue,
self.control_queue)
def test_strange_bear(self):
self.local_bear_list.append(UnexpectedBear1(self.settings,
self.message_queue))
self.local_bear_list.append(UnexpectedBear2(self.settings,
self.message_queue))
self.file_name_queue.put("t")
self.file_dict["t"] = []
run(self.file_name_queue,
self.local_bear_list,
self.global_bear_list,
self.global_bear_queue,
self.file_dict,
self.local_result_dict,
self.global_result_dict,
self.message_queue,
self.control_queue)
expected_messages = [LOG_LEVEL.DEBUG,
LOG_LEVEL.ERROR,
LOG_LEVEL.DEBUG,
LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING]
for msg in expected_messages:
self.assertEqual(msg, self.message_queue.get(timeout=0).log_level)
class BearRunningIntegrationTest(unittest.TestCase):
example_file = """a
b
c
d
"""
def setUp(self):
self.settings = Section("name")
self.file_name_queue = queue.Queue()
self.local_bear_list = []
self.global_bear_list = []
self.global_bear_queue = queue.Queue()
self.file_dict = {}
manager = multiprocessing.Manager()
self.local_result_dict = manager.dict()
self.global_result_dict = manager.dict()
self.message_queue = queue.Queue()
self.control_queue = queue.Queue()
self.file1 = "file1"
self.file2 = "arbitrary"
self.file_name_queue.put(self.file1)
self.file_name_queue.put(self.file2)
self.file_name_queue.put("invalid file")
self.local_bear_list.append(LocalTestBear(self.settings,
self.message_queue))
self.local_bear_list.append("not a valid bear")
self.file_dict[self.file1] = self.example_file
self.file_dict[self.file2] = self.example_file
self.global_bear_list.append(GlobalTestBear(self.file_dict,
self.settings,
self.message_queue))
self.global_bear_list.append("not a valid bear")
self.global_bear_queue.put(0)
self.global_bear_queue.put(1)
def test_run(self):
run(self.file_name_queue,
self.local_bear_list,
self.global_bear_list,
self.global_bear_queue,
self.file_dict,
self.local_result_dict,
self.global_result_dict,
self.message_queue,
self.control_queue)
expected_messages = [LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING,
LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING,
LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING,
LOG_LEVEL.ERROR,
LOG_LEVEL.DEBUG,
LOG_LEVEL.DEBUG,
LOG_LEVEL.WARNING]
for msg in expected_messages:
self.assertEqual(msg, self.message_queue.get(timeout=0).log_level)
local_result_expected = [[],
[Result.from_values("LocalTestBear",
"something went wrong",
'arbitrary')]
]
for expected in local_result_expected:
control_elem, index = self.control_queue.get()
self.assertEqual(control_elem, CONTROL_ELEMENT.LOCAL)
real = self.local_result_dict[index]
self.assertEqual(real, expected)
global_results_expected = [Result.from_values(
"GlobalTestBear",
"Files are bad in general!",
"file1",
severity=RESULT_SEVERITY.INFO),
Result.from_values(
"GlobalTestBear",
"Files are bad in general!",
"arbitrary",
severity=RESULT_SEVERITY.INFO)]
control_elem, index = self.control_queue.get()
self.assertEqual(control_elem, CONTROL_ELEMENT.LOCAL_FINISHED)
control_elem, index = self.control_queue.get()
self.assertEqual(control_elem, CONTROL_ELEMENT.GLOBAL)
real = self.global_result_dict[index]
self.assertEqual(sorted(global_results_expected), sorted(real))
control_elem, none = self.control_queue.get(timeout=0)
self.assertEqual(control_elem, CONTROL_ELEMENT.GLOBAL_FINISHED)
self.assertEqual(none, None)
# The invalid bear gets a None in that dict for dependency resolution
self.assertEqual(len(self.global_result_dict), 2)
self.assertEqual(len(self.local_result_dict),
len(local_result_expected))
self.assertRaises(queue.Empty, self.message_queue.get, timeout=0)
self.assertRaises(queue.Empty, self.control_queue.get, timeout=0)
| agpl-3.0 | 1,091,570,157,490,676,900 | 34.515244 | 78 | 0.51704 | false |
shakamunyi/nova | nova/image/download/file.py | 6 | 7286 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo.config import cfg
from nova import exception
from nova.i18n import _, _LI
import nova.image.download.base as xfer_base
import nova.virt.libvirt.utils as lv_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
opt_group = cfg.ListOpt(name='filesystems', default=[],
help=_('List of file systems that are configured '
'in this file in the '
'image_file_url:<list entry name> '
'sections'))
CONF.register_opt(opt_group, group="image_file_url")
# This module extends the configuration options for nova.conf. If the user
# wishes to use the specific configuration settings the following needs to
# be added to nova.conf:
# [image_file_url]
# filesystem = <a list of strings referencing a config section>
#
# For each entry in the filesystem list a new configuration section must be
# added with the following format:
# [image_file_url:<list entry>]
# id = <string>
# mountpoint = <string>
#
# id:
# An opaque string. In order for this module to know that the remote
# FS is the same one that is mounted locally it must share information
# with the glance deployment. Both glance and nova-compute must be
# configured with a unique matching string. This ensures that the
# file:// advertised URL is describing a file system that is known
# to nova-compute
# mountpoint:
# The location at which the file system is locally mounted. Glance
# may mount a shared file system on a different path than nova-compute.
# This value will be compared against the metadata advertised with
# glance and paths will be adjusted to ensure that the correct file
# file is copied.
#
# If these values are not added to nova.conf and the file module is in the
# allowed_direct_url_schemes list, then the legacy behavior will occur such
# that a copy will be attempted assuming that the glance and nova file systems
# are the same.
class FileTransfer(xfer_base.TransferBase):
desc_required_keys = ['id', 'mountpoint']
# NOTE(jbresnah) because the group under which these options are added is
# dyncamically determined these options need to stay out of global space
# or they will confuse generate_sample.sh
filesystem_opts = [
cfg.StrOpt('id',
help=_('A unique ID given to each file system. This is '
'value is set in Glance and agreed upon here so '
'that the operator knowns they are dealing with '
'the same file system.')),
cfg.StrOpt('mountpoint',
help=_('The path at which the file system is mounted.')),
]
def _get_options(self):
fs_dict = {}
for fs in CONF.image_file_url.filesystems:
group_name = 'image_file_url:' + fs
conf_group = CONF[group_name]
if conf_group.id is None:
msg = _('The group %s(group_name) must be configured with '
'an id.')
raise exception.ImageDownloadModuleConfigurationError(
module=str(self), reason=msg)
fs_dict[CONF[group_name].id] = CONF[group_name]
return fs_dict
def __init__(self):
# create the needed options
for fs in CONF.image_file_url.filesystems:
group_name = 'image_file_url:' + fs
CONF.register_opts(self.filesystem_opts, group=group_name)
def _verify_config(self):
for fs_key in self.filesystems:
for r in self.desc_required_keys:
fs_ent = self.filesystems[fs_key]
if fs_ent[r] is None:
msg = _('The key %s is required in all file system '
'descriptions.')
LOG.error(msg)
raise exception.ImageDownloadModuleConfigurationError(
module=str(self), reason=msg)
def _file_system_lookup(self, metadata, url_parts):
for r in self.desc_required_keys:
if r not in metadata:
url = url_parts.geturl()
msg = _('The key %(r)s is required in the location metadata '
'to access the url %(url)s.') % {'r': r, 'url': url}
LOG.info(msg)
raise exception.ImageDownloadModuleMetaDataError(
module=str(self), reason=msg)
id = metadata['id']
if id not in self.filesystems:
msg = _('The ID %(id)s is unknown.') % {'id': id}
LOG.info(msg)
return
fs_descriptor = self.filesystems[id]
return fs_descriptor
def _normalize_destination(self, nova_mount, glance_mount, path):
if not path.startswith(glance_mount):
msg = (_('The mount point advertised by glance: %(glance_mount)s, '
'does not match the URL path: %(path)s') %
{'glance_mount': glance_mount, 'path': path})
raise exception.ImageDownloadModuleMetaDataError(
module=str(self), reason=msg)
new_path = path.replace(glance_mount, nova_mount, 1)
return new_path
def download(self, context, url_parts, dst_file, metadata, **kwargs):
self.filesystems = self._get_options()
if not self.filesystems:
# NOTE(jbresnah) when nothing is configured assume legacy behavior
nova_mountpoint = '/'
glance_mountpoint = '/'
else:
self._verify_config()
fs_descriptor = self._file_system_lookup(metadata, url_parts)
if fs_descriptor is None:
msg = (_('No matching ID for the URL %s was found.') %
url_parts.geturl())
raise exception.ImageDownloadModuleError(reason=msg,
module=str(self))
nova_mountpoint = fs_descriptor['mountpoint']
glance_mountpoint = metadata['mountpoint']
source_file = self._normalize_destination(nova_mountpoint,
glance_mountpoint,
url_parts.path)
lv_utils.copy_image(source_file, dst_file)
LOG.info(_LI('Copied %(source_file)s using %(module_str)s'),
{'source_file': source_file, 'module_str': str(self)})
def get_download_handler(**kwargs):
return FileTransfer()
def get_schemes():
return ['file', 'filesystem']
| apache-2.0 | 7,571,600,141,632,877,000 | 41.115607 | 79 | 0.594702 | false |
andela-bojengwa/pleft | pleft/urls.py | 6 | 1054 | # Copyright 2010 Sander Dijkhuis <[email protected]>
#
# This file is part of Pleft.
#
# Pleft is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pleft is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pleft. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.conf.urls.defaults import *
import plapp.urls
urlpatterns = patterns(
'',
)
urlpatterns += plapp.urls.urlpatterns
if settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| gpl-3.0 | 8,214,239,488,321,774,000 | 30 | 67 | 0.721063 | false |
10clouds/django-tenant-schemas | tenant_schemas/models.py | 1 | 4059 | from django.conf import settings
from django.db import models, connection, transaction
from django.core.management import call_command
from tenant_schemas.postgresql_backend.base import _check_identifier
from tenant_schemas.signals import post_schema_sync
from tenant_schemas.utils import schema_exists, django_is_in_test_mode
from tenant_schemas.utils import get_public_schema_name
class TenantMixin(models.Model):
auto_drop_schema = False # USE THIS WITH CAUTION!
# set this flag to true on a parent class if
# you want the schema to be automatically
# removed after tenant remove.
auto_create_schema = True # set this flag to false on a parent class if
# you dont want the schema to be automatically
# created upon save.
domain_url = models.CharField(max_length=128, unique=True)
schema_name = models.CharField(max_length=63)
class Meta:
abstract = True
def save(self, verbosity=1, *args, **kwargs):
is_new = self.pk is None
if is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. Current schema is %s."
% connection.schema_name)
elif not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or the public schema. Current schema is %s."
% connection.schema_name)
super(TenantMixin, self).save(*args, **kwargs)
if is_new and self.auto_create_schema:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self)
def delete(self, *args, **kwargs):
"""
Drops the schema related to the tenant instance. Just drop the schema if the parent
class model has the attribute auto_drop_schema set to True.
"""
if connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or the public schema. Current schema is %s."
% connection.schema_name)
if schema_exists(self.schema_name) and self.auto_drop_schema:
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
transaction.commit_unless_managed()
super(TenantMixin, self).delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True, verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if the schema
already exists before creating it. Returns true if the schema was created, false
otherwise.
"""
# safety check
_check_identifier(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
transaction.commit_unless_managed()
if sync_schema:
call_command('syncdb',
schema_name=self.schema_name,
tenant=True,
public=False,
interactive=False, # don't ask to create an admin user
migrate_all=True, # migrate all apps directly to last version
verbosity=verbosity,
)
# fake all migrations
if ('south' in settings.INSTALLED_APPS
and not django_is_in_test_mode()):
call_command('migrate', fake=True,
schema_name=self.schema_name, verbosity=verbosity)
connection.set_schema_to_public()
return True
| mit | 2,011,183,975,632,575,000 | 42.645161 | 117 | 0.603843 | false |
hlmnrmr/superdesk-core | superdesk/publish/publish_service.py | 3 | 5719 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import superdesk
from bson import ObjectId
from flask import current_app as app
from superdesk import get_resource_service, config
from superdesk.utc import utcnow
from superdesk.errors import SubscriberError, SuperdeskPublishError, PublishQueueError
logger = logging.getLogger(__name__)
extensions = {
'NITF': 'ntf',
'XML': 'xml',
'NINJS': 'json'}
class PublishServiceBase():
"""Base publish service class."""
DEFAULT_EXT = "txt"
def _transmit(self, queue_item, subscriber):
"""Performs the publishing of the queued item. Implement in subclass
@param queue_item: the queued item document
@type queue_item: dict
@param subscriber: the subscriber document
@type subscriber: dict
"""
raise NotImplementedError()
def transmit(self, queue_item):
subscriber = get_resource_service('subscribers').find_one(req=None, _id=queue_item['subscriber_id'])
if not subscriber.get('is_active'):
raise SubscriberError.subscriber_inactive_error(Exception('Subscriber inactive'), subscriber)
else:
try:
# "formatted_item" is the item as str
# "encoded_item" is the bytes version
# if "encoded_item_id" exists we use it, else
# we fill encoded_item using "formatted_item" and "item_encoding"
if 'encoded_item_id' in queue_item:
encoded_item_id = queue_item['encoded_item_id']
queue_item['encoded_item'] = app.storage.get(encoded_item_id).read()
else:
encoding = queue_item.get('item_encoding', 'utf-8')
queue_item['encoded_item'] = queue_item['formatted_item'].encode(encoding, errors='replace')
self._transmit(queue_item, subscriber) or []
self.update_item_status(queue_item, 'success')
except SuperdeskPublishError as error:
self.update_item_status(queue_item, 'error', error)
self.close_transmitter(subscriber, error)
raise error
def close_transmitter(self, subscriber, error):
"""
Checks if the transmitter has the error code set in the list of critical errors then closes the transmitter.
:param error: The error thrown during transmission
"""
if subscriber.get('critical_errors', {}).get(str(error.code)):
update = {
'is_active': False,
'last_closed': {
'closed_at': utcnow(),
'message': 'Subscriber made inactive due to critical error: {}'.format(error)
}
}
get_resource_service('subscribers').system_update(subscriber[config.ID_FIELD], update, subscriber)
def update_item_status(self, queue_item, status, error=None):
try:
item_update = {'state': status}
if status == 'in-progress':
item_update['transmit_started_at'] = utcnow()
elif status == 'success':
item_update['completed_at'] = utcnow()
elif status == 'error' and error:
item_update['error_message'] = '{}:{}'.format(error, str(error.system_exception))
publish_queue_service = superdesk.get_resource_service('publish_queue')
queue_id = ObjectId(queue_item.get('_id')) if isinstance(queue_item.get('_id'), str) else queue_item.get(
'_id')
publish_queue_service.patch(queue_id, item_update)
except Exception as ex:
raise PublishQueueError.item_update_error(ex)
@classmethod
def get_file_extension(cls, queue_item):
try:
format_ = queue_item['destination']['format'].upper()
except KeyError:
pass
else:
# "in" is used in addition of equality, so subclass can inherit extensions
# e.g.: "NITF" will work for "NTB NITF"
try:
return extensions[format_]
except KeyError:
for f, ext in extensions.items():
if f in format_:
return ext
# nothing found, we return default extension
return cls.DEFAULT_EXT
@classmethod
def get_filename(cls, queue_item):
config = queue_item.get('destination', {}).get('config', {})
# use the file extension from config if it is set otherwise use extension for the format
extension = config.get('file_extension') or cls.get_file_extension(queue_item)
return '{}-{}-{}.{}'.format(
queue_item['item_id'],
str(queue_item.get('item_version', '')),
str(queue_item.get('published_seq_num', '')),
extension).replace(':', '-')
@staticmethod
def register_file_extension(format_, ext):
"""Register new file extension
:param format_: item format
:param ext: extension to use
"""
if format_ in extensions:
logger.warning("overriding existing extension for {}".format(format_))
extensions[format_.upper()] = ext
PublishService = PublishServiceBase
def get_publish_service():
return PublishService
def set_publish_service(publish_service_class):
global PublishService
PublishService = publish_service_class
| agpl-3.0 | -7,803,154,476,969,788,000 | 36.625 | 117 | 0.599056 | false |
ChristianFrisson/gaia | test/unittest/data/svm/regenerate_histories.py | 3 | 2927 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Gaia
#
# Gaia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from gaia2 import *
import os, yaml
ds = DataSet()
ds.load('../dataset_small.db')
# add genre information
ds = transform(ds, 'addfield', { 'string': 'genre' })
gt = {}
toremove = []
for root, dirs, files in os.walk('../dataset_small/'):
for f in files:
if not f.endswith('.sig'): continue
genre = root.split('/')[-1]
if genre == 'Sample':
toremove += [ f[:-4] ]
else:
ds.point(f[:-4])['genre'] = genre
ds.removePoints(toremove)
ds = transform(ds, 'removevl')
ds = transform(ds, 'fixlength')
genre_ds = ds
# singledesc
ds = genre_ds
ds = transform(ds, 'normalize', { 'except': '*cov',
'independent': False })
ds = transform(ds, 'svmtrain', { 'className': 'genre',
'descriptorNames': 'spectral_centroid.mean' })
ds.history().save('test_svm_singledesc.history')
gtorig = yaml.load(open('../svm_old/test_svm_singledesc.gt.yaml'))
for p in ds.points():
if str(p['genre']) != gtorig[p.name()]:
print 'for point', p.name(), 'expected:', gtorig[p.name()], 'got:', p['genre']
# multidimdesc
ds = genre_ds
ds = transform(ds, 'normalize', { 'except': '*cov',
'independent': False })
ds = transform(ds, 'svmtrain', {'className': 'genre',
'descriptorNames': 'mfcc.mean'})
ds.history().save('test_svm_multidimdesc.history')
# all
ds = genre_ds
ds = transform(ds, 'normalize', { 'except': '*cov',
'independent': True,
'outliers': 0.1 })
ds = transform(ds, 'svmtrain', { 'className': 'genre',
'descriptorNames': '*.mean' })
ds.history().save('test_svm_all.history')
# probability
ds = genre_ds
ds = transform(ds, 'normalize', { 'except': '*cov',
'outliers': 0.05 })
ds = transform(ds, 'svmtrain', { 'className': 'genre',
'descriptorNames': '*.mean',
'probability': True })
ds.history().save('test_svm_probability.history')
| agpl-3.0 | 7,576,689,615,823,634,000 | 29.175258 | 86 | 0.583874 | false |
cheetrios/bopify | bopify.py | 1 | 10142 | """
__name__ = bopify.py
__author__ = Cheetrios
__description__ = Main application file for deployment
"""
import uuid
import sqlite3
import requests
import base64
import urllib
import json
import spotipy
import spotipy.util as util
from spotipy.oauth2 import SpotifyClientCredentials
from flask import Flask, render_template, request, redirect, url_for
from flask import session as flask_session
from flask_oauthlib.client import OAuth, OAuthException
from flask import g
from forms import CreateForm, JoinForm, SearchForm
SESS_DB = "db/sessions.db"
SONG_DB = "db/song.db"
# Client Keys
CLIENT_ID = "d251ae2dd5824052874019013ee73eb0"
CLIENT_SECRET = "ee5c56305aea428b986c4a0964361cb2"
# Spotify URLS
SPOTIFY_AUTH_URL = "https://accounts.spotify.com/authorize"
SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token"
SPOTIFY_API_BASE_URL = "https://api.spotify.com"
API_VERSION = "v1"
SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION)
REDIRECT_URI = "http://localhost:5000/login/authorized/"
SCOPE = "playlist-modify-public playlist-modify-private"
app = Flask(__name__)
app.config.from_object("config")
oauth = OAuth(app)
# ========================== DB Setup Functions ============================= #
def create_sess_db(cur):
"""Helper function used for setting up the sessions DB
given a cursor into the DB. DB is setup per:
| Session ID | Session Name | Master | Participants ID |
Args:
cur (SQLite cursor): pointer into sessions DB
Returns: Void
"""
cur.execute("""CREATE TABLE sessions
(sessid text, sessname text, sessgenre text,
masterid text, partid text)""")
cur.commit()
def delete_sess_db(cur):
"""Drops the sessions table. Do NOT call unless cleaning up testing
Args:
cur (SQLite cursor): pointer into sessions DB
Returns: Void
"""
cur.execute("DROP TABLE sessions")
cur.commit()
def create_song_db(cur):
"""Helper function used for setting up the songs DB (for sessions->song
mapping lookups) given a cursor into the DB. DB is setup per:
| Session ID | Song ID (Spotify) | Song Name | Order | Is_master
Args:
cur (SQLite cursor): pointer into songs DB
Returns: Void
"""
cur.execute("""CREATE TABLE songs
(sessid text, songid text, songname text,
position integer, ismaster integer)""")
cur.commit()
def delete_song_db(cur):
"""Drops the songs table. Do NOT call unless cleaning up testing
Args:
cur (SQLite cursor): pointer into songs DB
Returns: Void
"""
cur.execute("DROP TABLE songs")
cur.commit()
# ========================== Helper Functions =============================== #
def join_session(cur, session_id, session_name, session_genre,
master_id, participant_id):
"""Helper function used for joining a room. Automatically redirects to the
page where all joined rooms are listed
Args:
cur (SQLite cursor): pointer into sessions DB
session_id (str): randomly generated bop session ID
session_name (str): title given to session
session_genre (str): metadata of bop session created
master_id (str): ID of master of the session
participant_id(str): ID of the joining member
Returns: Void
"""
cur.cursor().execute("INSERT INTO sessions VALUES (?,?,?,?,?)",
[session_id, session_name, session_genre, master_id, participant_id])
cur.commit()
# ========================== Flask Route Setup ============================== #
auth_query_parameters = {
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE,
"client_id": CLIENT_ID
}
@app.route("/")
def index():
# Auth Step 1: Authorization
url_args = "&".join(["{}={}".format(key,urllib.quote(val)) for key,val in auth_query_parameters.iteritems()])
auth_url = "{}/?{}".format(SPOTIFY_AUTH_URL, url_args)
return redirect(auth_url)
@app.route("/login/authorized/")
def spotify_authorized():
"""Logs user in and saves information (ID) into session for DB access
Args:
Returns: Redirection to bop sessions listing page
"""
# Requests refresh and access tokens
auth_token = request.args['code']
code_payload = {
"grant_type": "authorization_code",
"code": str(auth_token),
"redirect_uri": REDIRECT_URI
}
base64encoded = base64.b64encode("{}:{}".format(CLIENT_ID, CLIENT_SECRET))
headers = {"Authorization": "Basic {}".format(base64encoded)}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers)
# Tokens are Returned to Application
response_data = json.loads(post_request.text)
access_token = response_data["access_token"]
refresh_token = response_data["refresh_token"]
token_type = response_data["token_type"]
expires_in = response_data["expires_in"]
# Use the access token to access Spotify API
authorization_header = {"Authorization":"Bearer {}".format(access_token)}
# Get profile data
user_profile_api_endpoint = "{}/me".format(SPOTIFY_API_URL)
profile_response = requests.get(user_profile_api_endpoint, headers=authorization_header)
profile_data = json.loads(profile_response.text)
# used to confirm that a user has logged in (for finding sessions)
flask_session["user_id"] = profile_data["id"]
return redirect(url_for("bop"))
@app.route("/bop/", methods=["GET", "POST"])
def bop():
"""Main sessions page where session can be created or joined. Post requests
can be one of two: create or join, where the first makes new session and
makes current user the master and the second adding the user as a member.
Args:
Returns: Redirection to the particular room page if a new room was created or
a repopulated version of the sessions landing page
"""
# DB Columns: sessid | sessname | sessgenre | masterid | partid |
cur = sqlite3.connect(SESS_DB)
c = cur.cursor()
# sessions the user is already a part of: do NOT display on "join" list
sessions = c.execute("""SELECT * FROM sessions WHERE partid=?""",
(flask_session["user_id"],)).fetchall()
session_ids = [session[0] for session in sessions]
full = c.execute("""SELECT * FROM sessions""").fetchall()
joinable = [session for session in full if session[0] not in session_ids]
create = CreateForm()
join = JoinForm()
join.session.choices = [(session[0], session[1]) for session in joinable]
# case where the person just created a new session: creates a
# new entry in DB and redirects them to the session page
if create.validate_on_submit() and create.create.data:
session_id = str(uuid.uuid4())
join_session(cur=cur,
session_id=session_id,
session_name=create.session.data,
session_genre=create.genre.data,
master_id=flask_session["user_id"],
participant_id=flask_session["user_id"])
return redirect(url_for("room", sessid=session_id))
elif join.validate_on_submit():
reference = c.execute("""SELECT * FROM sessions WHERE sessid=?""",
(join.session.data,)).fetchone()
join_session(cur=cur,
session_id=reference[0],
session_name=reference[1],
session_genre=reference[2],
master_id=reference[3],
participant_id=flask_session["user_id"])
return redirect("/bop/")
# case of hitting the page after logging in (did not click create)
return render_template("session.html",
joinable=joinable,
sessions=sessions,
create=create, join=join)
@app.route("/room/<sessid>/", methods=["GET", "POST"])
def room(sessid):
"""Page associated to a particular bop session, showing songs in the room.
Post requests correspond to when a request to add a song has been made
Args:
sessid (str): bop session ID room corresponds to (from DB)
Returns: Bop session room page
"""
# determines whether or not current user is master
sess_cur = sqlite3.connect(SESS_DB) # | sessid | sessname | sessgenre | masterid | partid |
reference = sess_cur.cursor().execute(
"""SELECT * FROM sessions WHERE sessid=?""", (sessid,)).fetchone()
is_master = (reference is None or reference[3] == flask_session["user_id"])
song_cur = sqlite3.connect(SONG_DB) # | sessid | songid | songname | position | ismaster
songs = song_cur.cursor().execute(
"""SELECT * FROM songs WHERE sessid=?""", (sessid,)).fetchall()
search = SearchForm()
client_credentials_manager = SpotifyClientCredentials()
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
queried = []
# if user is searching for a song
if search.validate_on_submit():
query = search.query.data
queried = sp.search(q=query)["tracks"]["items"]
# when the master presses "play"
elif request.method == "POST" and "play" in request.form:
song = song_cur.cursor().execute(
"""SELECT * FROM songs
WHERE sessid=?
AND ismaster=1
ORDER BY position ASC""", (sessid,)).fetchone()
# when the master accepts the proposal and adds the song
elif request.method == "POST" and "add" in request.form:
song_id = request.form.split(":")[1] # ID passed in through form name
song_cur.cursor().execute(
"""UPDATE songs
SET ismaster=1,
WHERE songid=?""", (song_id,))
song_cur.commit()
return render_template("room.html",
search=search,
is_master=is_master,
sessid=sessid,
songs=songs,
queried=queried)
@app.route("/room/<sessid>/<songid>/<songname>/<ismaster>/")
def queue(sessid, songid, songname, ismaster):
"""Reqeusts particular song to be added to the session queue.
INSECURE -- should not be able to easily modify URL to make "master request"
Args:
sessid (str): bop session ID room corresponds to (from sessions DB)
songid (str): song ID to be played (from songs DB)
songname (str): name of song to be played
ismaster (int): 1/0 int to indicate whether request is coming from
the session master or other participant respectively
Returns: Redirection to the bop session page
"""
cur = sqlite3.connect(SONG_DB)
songs = cur.cursor().execute(
"""SELECT * FROM songs WHERE sessid=?""", (sessid,)).fetchall()
cur.cursor().execute("INSERT INTO songs VALUES (?,?,?,?,?)",
[sessid, songid, songname, len(songs) + 1, ismaster])
cur.commit()
return redirect(url_for("room", sessid=sessid))
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True) | mit | -7,516,118,258,169,031,000 | 31.509615 | 113 | 0.68803 | false |
wubr2000/googleads-python-lib | examples/dfp/v201505/contact_service/update_contacts.py | 4 | 2384 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates contact addresses.
To determine which contacts exist, run get_all_contacts.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the contact to update.
CONTACT_ID = 'INSERT_CONTACT_ID_HERE'
def main(client, contact_id):
# Initialize appropriate service.
contact_service = client.GetService('ContactService', version='v201505')
# Create statement object to select the single contact by ID.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': contact_id
}
}]
query = 'WHERE id = :id'
statement = dfp.FilterStatement(query, values, 1)
# Get contacts by statement.
response = contact_service.getContactsByStatement(
statement.ToStatement())
if 'results' in response:
updated_contacts = []
for contact in response['results']:
contact['address'] = '123 New Street, New York, NY, 10011'
updated_contacts.append(contact)
# Update the contact on the server.
contacts = contact_service.updateContacts(updated_contacts)
# Display results.
for contact in contacts:
print (('Contact with ID \'%s\', name \'%s\', and address \'%s\' '
'was updated.')
% (contact['id'], contact['name'], contact['address']))
else:
print 'No contacts found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CONTACT_ID)
| apache-2.0 | -6,001,738,883,169,725,000 | 30.786667 | 77 | 0.696309 | false |
mpaf/pywinauto-64bit | pywinauto/unittests/test_HwndWrapper.py | 2 | 17900 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Tests for HwndWrapper"
import time
import pprint
import pdb
import warnings
import ctypes
import sys
sys.path.append(".")
from pywinauto.application import Application
from pywinauto.controls.HwndWrapper import HwndWrapper
from pywinauto import win32structures, win32defines
__revision__ = "$Revision: 234 $"
try:
from pywinauto.controls.HwndWrapper import *
except ImportError:
# allow it to be imported in a dev environment
import sys
pywinauto_imp = "\\".join(__file__.split('\\')[:-3])
print "sdfdsf", pywinauto_imp
sys.path.append(pywinauto_imp)
from pywinauto.controls.HwndWrapper import *
import unittest
class HwndWrapperTests(unittest.TestCase):
"Unit tests for the TreeViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
self.app = Application()
self.app.start_("calc.exe")
self.dlg = self.app.Calculator
self.ctrl = HwndWrapper(self.dlg.Backspace.handle)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.TypeKeys("%{F4}")
def testInvalidHandle(self):
"Test that an exception is raised with an invalid window handle"
self.assertRaises(InvalidWindowHandle, HwndWrapper, -1)
#def testText(self):
# "Test getting the window Text of the dialog"
# self.assertEquals(self.dlg.WindowText(), "Untitled - Notepad")
def testFriendlyClassName(self):
"Test getting the friendly classname of the dialog"
self.assertEquals(self.ctrl.FriendlyClassName(), "Button")
def testClass(self):
"Test getting the classname of the dialog"
self.assertEquals(self.ctrl.Class(), "Button")
def testWindowText(self):
"Test getting the window Text of the dialog"
self.assertEquals(self.ctrl.WindowText(), "Backspace")
def testStyle(self):
self.dlg.Style()
self.assertEquals(self.ctrl.Style(),
win32defines.WS_CHILD |
win32defines.WS_VISIBLE |
win32defines.BS_PUSHBUTTON |
win32defines.BS_TEXT)
def testExStyle(self):
self.assertEquals(self.ctrl.ExStyle(),
win32defines.WS_EX_NOPARENTNOTIFY |
win32defines.WS_EX_LEFT |
win32defines.WS_EX_LTRREADING |
win32defines.WS_EX_RIGHTSCROLLBAR)
self.assertEquals(self.dlg.ExStyle(),
win32defines.WS_EX_WINDOWEDGE |
win32defines.WS_EX_LEFT |
win32defines.WS_EX_LTRREADING |
win32defines.WS_EX_RIGHTSCROLLBAR |
win32defines.WS_EX_CONTROLPARENT |
win32defines.WS_EX_APPWINDOW)
def testControlID(self):
self.assertEquals(self.ctrl.ControlID(), 83)
self.dlg.ControlID()
def testUserData(self):
self.ctrl.UserData()
self.dlg.UserData()
def testContextHelpID(self):
self.ctrl.ContextHelpID()
self.dlg.ContextHelpID()
def testIsVisible(self):
self.assertEqual(self.ctrl.IsVisible(), True)
self.assertEqual(self.dlg.IsVisible(), True)
def testIsUnicode(self):
self.assertEqual(self.ctrl.IsUnicode(), True)
self.assertEqual(self.dlg.IsUnicode(), True)
def testIsEnabled(self):
self.assertEqual(self.ctrl.IsEnabled(), True)
self.assertEqual(self.dlg.IsEnabled(), True)
self.assertEqual(self.dlg.ChildWindow(
title = 'Ave', enabled_only = False).IsEnabled(), False)
def testCloseClick_bug(self):
self.dlg.Sta.Click()
Timings.closeclick_dialog_close_wait = .5
try:
self.app.StatisticsBox.CAD.CloseClick()
except timings.TimeoutError:
pass
self.app.StatisticsBox.TypeKeys("%{F4}")
#self.assertEquals(self.app.StatisticsBox.Exists(), False)
def testRectangle(self):
"Test getting the rectangle of the dialog"
rect = self.dlg.Rectangle()
self.assertNotEqual(rect.top, None)
self.assertNotEqual(rect.left, None)
self.assertNotEqual(rect.bottom, None)
self.assertNotEqual(rect.right, None)
self.assertEqual(rect.height(), 309)
self.assertEqual(rect.width(), 480)
def testClientRect(self):
rect = self.dlg.Rectangle()
cli = self.dlg.ClientRect()
self.assertEqual(cli.left , 0)
self.assertEqual(cli.top , 0)
assert(cli.width() < rect.width())
assert(cli.height() < rect.height())
def testFont(self):
self.assertNotEqual(self.dlg.Font(), self.ctrl.Font())
def ProcessID(self):
self.assertEqual(self.ctrl.ProcessID(), self.dlg.ProcessID)
self.assertNotEqual(self.ctrl.ProcessID(), 0)
def testHasStyle(self):
self.assertEqual(self.ctrl.HasStyle(win32defines.WS_CHILD), True)
self.assertEqual(self.dlg.HasStyle(win32defines.WS_CHILD), False)
self.assertEqual(self.ctrl.HasStyle(win32defines.WS_SYSMENU), False)
self.assertEqual(self.dlg.HasStyle(win32defines.WS_SYSMENU), True)
def testHasExStyle(self):
self.assertEqual(self.ctrl.HasExStyle(win32defines.WS_EX_NOPARENTNOTIFY), True)
self.assertEqual(self.dlg.HasExStyle(win32defines.WS_EX_NOPARENTNOTIFY), False)
self.assertEqual(self.ctrl.HasExStyle(win32defines.WS_EX_APPWINDOW), False)
self.assertEqual(self.dlg.HasExStyle(win32defines.WS_EX_APPWINDOW), True)
def testIsDialog(self):
self.assertEqual(self.ctrl.IsDialog(), False)
self.assertEqual(self.dlg.IsDialog(), True)
def testMenuItems(self):
self.assertEqual(self.ctrl.MenuItems(), [])
self.assertEqual(self.dlg.MenuItems()[1]['Text'], '&View')
def testParent(self):
self.assertEqual(self.ctrl.Parent(), self.dlg.handle)
def testTopLevelParent(self):
self.assertEqual(self.ctrl.TopLevelParent(), self.dlg.handle)
self.assertEqual(self.dlg.TopLevelParent(), self.dlg.handle)
def testTexts(self):
self.assertEqual(self.dlg.Texts(), [u'Calculator'])
self.assertEqual(self.ctrl.Texts(), [u'Backspace'])
self.assertEqual(self.dlg.Edit.Texts(), ['0. ', "0. "])
def testClientRects(self):
self.assertEqual(self.ctrl.ClientRects()[0], self.ctrl.ClientRect())
self.assertEqual(self.dlg.ClientRects()[0], self.dlg.ClientRect())
def testFonts(self):
self.assertEqual(self.ctrl.Fonts()[0], self.ctrl.Font())
self.assertEqual(self.dlg.Fonts()[0], self.dlg.Font())
def testChildren(self):
self.assertEqual(self.ctrl.Children(), [])
self.assertNotEqual(self.dlg.Children(), [])
def testIsChild(self):
self.assertEqual(self.ctrl.IsChild(self.dlg.WrapperObject()), True)
self.assertEqual(self.dlg.IsChild(self.ctrl), False)
def testSendMessage(self):
vk = self.dlg.SendMessage(win32defines.WM_GETDLGCODE)
self.assertEqual(0, vk)
code = self.dlg.Inv.SendMessage(win32defines.WM_GETDLGCODE)
self.assertEqual(0, vk)
def testSendMessageTimeout(self):
vk = self.dlg.SendMessageTimeout(win32defines.WM_GETDLGCODE)
self.assertEqual(0, vk)
code = self.dlg.Inv.SendMessageTimeout(win32defines.WM_GETDLGCODE)
self.assertEqual(0, vk)
def testPostMessage(self):
self.assertNotEquals(0, self.dlg.PostMessage(win32defines.WM_PAINT))
self.assertNotEquals(0, self.dlg.Inv.PostMessage(win32defines.WM_PAINT))
# def testNotifyMenuSelect(self):
# "Call NotifyMenuSelect to ensure it does not raise"
# self.ctrl.NotifyMenuSelect(1234)
# self.dlg.NotifyMenuSelect(1234)
def testNotifyParent(self):
"Call NotifyParent to ensure it does not raise"
self.ctrl.NotifyParent(1234)
#self.dlg.NotifyParent(1234)
def testGetProperties(self):
"Test getting the properties for the HwndWrapped control"
props = self.dlg.GetProperties()
self.assertEquals(
self.dlg.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.dlg.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.dlg, prop_name)(), props[prop_name])
# def testCaptureAsImage(self):
# pass
def testEquals(self):
self.assertNotEqual(self.ctrl, self.dlg.handle)
self.assertEqual(self.ctrl, self.ctrl.handle)
self.assertEqual(self.ctrl, self.ctrl)
# def testVerifyActionable(self):
# self.assertRaises()
# def testVerifyEnabled(self):
# self.assertRaises()
# def testVerifyVisible(self):
# self.assertRaises()
def testMoveWindow_same(self):
"Test calling movewindow without any parameters"
prevRect = self.dlg.Rectangle()
self.dlg.MoveWindow()
self.assertEquals(prevRect, self.dlg.Rectangle())
def testMoveWindow(self):
"Test moving the window"
dlgClientRect = self.dlg.ClientAreaRect()
prev_rect = self.ctrl.Rectangle() - dlgClientRect
new_rect = win32structures.RECT(prev_rect)
new_rect.left -= 1
new_rect.top -= 1
new_rect.right += 2
new_rect.bottom += 2
self.ctrl.MoveWindow(
new_rect.left,
new_rect.top,
new_rect.width(),
new_rect.height(),
)
self.assertEquals(
self.ctrl.Rectangle(),
new_rect + dlgClientRect)
self.ctrl.MoveWindow(prev_rect)
self.assertEquals(
self.ctrl.Rectangle(),
prev_rect + dlgClientRect)
def testMaximize(self):
self.dlg.Maximize()
self.assertEquals(self.dlg.GetShowState(), win32defines.SW_SHOWMAXIMIZED)
self.dlg.Restore()
def testMinimize(self):
self.dlg.Minimize()
self.assertEquals(self.dlg.GetShowState(), win32defines.SW_SHOWMINIMIZED)
self.dlg.Restore()
def testRestore(self):
self.dlg.Maximize()
self.dlg.Restore()
self.assertEquals(self.dlg.GetShowState(), win32defines.SW_SHOWNORMAL)
self.dlg.Minimize()
self.dlg.Restore()
self.assertEquals(self.dlg.GetShowState(), win32defines.SW_SHOWNORMAL)
def testGetFocus(self):
self.assertNotEqual(self.dlg.GetFocus(), None)
self.assertEqual(self.dlg.GetFocus(), self.ctrl.GetFocus())
self.dlg.Hyp.SetFocus()
self.assertEqual(self.dlg.GetFocus(), self.dlg.Hyp.handle)
def testSetFocus(self):
self.assertNotEqual(self.dlg.GetFocus(), self.dlg.Hyp.handle)
self.dlg.Hyp.SetFocus()
self.assertEqual(self.dlg.GetFocus(), self.dlg.Hyp.handle)
def testMenuSelect(self):
"Test selecting a menut item"
if not self.dlg.MenuItem("View -> Digit grouping").IsChecked():
self.dlg.MenuSelect("View -> Digit grouping")
self.dlg.TypeKeys("1234567")
self.dlg.MenuSelect("Edit->Copy")
self.dlg.CE.Click()
self.assertEquals(self.dlg.Edit.Texts()[1], "0. ")
self.dlg.MenuSelect("Edit->Paste")
self.assertEquals(self.dlg.Edit.Texts()[1], "1,234,567. ")
def testClose(self):
"Test the Close() method of windows"
# open the statistics dialog
try:
self.dlg.Sta.CloseClick()
except timings.TimeoutError:
pass
# make sure it is open and visible
self.assertTrue(self.app.StatisticsBox.IsVisible(), True)
# close it
self.app.StatisticsBox.Close()
# make sure that it is not visible
self.assertRaises(AttributeError, self.app.StatisticsBox)
# make sure the main calculator dialog is still open
self.assertEquals(self.dlg.IsVisible(), True)
class HwndWrapperMouseTests(unittest.TestCase):
"Unit tests for mouse actions of the HwndWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
self.app = Application()
self.app.start_("notepad.exe")
# Get the old font
self.app.UntitledNotepad.MenuSelect("Format->Font")
self.old_font = self.app.Font.FontComboBox.SelectedIndex()
self.old_font_style = self.app.Font.FontStyleCombo.SelectedIndex()
# ensure we have the correct settings for this test
self.app.Font.FontStyleCombo.Select(0)
self.app.Font.FontComboBox.Select("Lucida Console")
self.app.Font.OK.Click()
self.dlg = self.app.UntitledNotepad
self.ctrl = HwndWrapper(self.dlg.Edit.handle)
self.dlg.edit.SetEditText("Here is some text\r\n and some more")
def tearDown(self):
"Close the application after tests"
# Set the old font again
self.app.UntitledNotepad.MenuSelect("Format->Font")
self.app.Font.FontComboBox.Select(self.old_font)
self.app.Font.FontStyleCombo.Select(self.old_font_style)
self.app.Font.OK.Click()
# close the application
self.dlg.TypeKeys("%{F4}")
if self.app.Notepad.No.Exists():
self.app.Notepad.No.Click()
#def testText(self):
# "Test getting the window Text of the dialog"
# self.assertEquals(self.dlg.WindowText(), "Untitled - Notepad")
def testClick(self):
self.ctrl.Click(coords = (50, 10))
self.assertEquals(self.dlg.Edit.SelectionIndices(), (5,5))
def testClickInput(self):
self.ctrl.ClickInput(coords = (50, 10))
self.assertEquals(self.dlg.Edit.SelectionIndices(), (5,5))
def testDoubleClick(self):
self.ctrl.DoubleClick(coords = (60, 30))
self.assertEquals(self.dlg.Edit.SelectionIndices(), (24,29))
def testDoubleClickInput(self):
self.ctrl.DoubleClickInput(coords = (60, 30))
self.assertEquals(self.dlg.Edit.SelectionIndices(), (24,29))
def testMenuSelectNotepad_bug(self):
"In notepad - MenuSelect Edit->Paste did not work"
text = u'Here are some unicode characters \xef\xfc\r\n'
app2 = Application.start("notepad")
app2.UntitledNotepad.Edit.SetEditText(text)
app2.UntitledNotepad.MenuSelect("Edit->Select All")
app2.UntitledNotepad.MenuSelect("Edit->Copy")
self.dlg.MenuSelect("Edit->Select All")
self.dlg.MenuSelect("Edit->Paste")
self.dlg.MenuSelect("Edit->Paste")
self.dlg.MenuSelect("Edit->Paste")
app2.UntitledNotepad.MenuSelect("File->Exit")
app2.Notepad.No.Click()
self.assertEquals(self.dlg.Edit.TextBlock(), text*3)
#
# def testRightClick(self):
# pass
#
# def testPressMouse(self):
# pass
#
# def testReleaseMouse(self):
# pass
#
# def testMoveMouse(self):
# pass
#
# def testDragMouse(self):
# pass
#
# def testSetWindowText(self):
# pass
#
# def testTypeKeys(self):
# pass
#
# def testDebugMessage(self):
# pass
#
# def testDrawOutline(self):
# pass
#
class GetDialogPropsFromHandleTest(unittest.TestCase):
"Unit tests for mouse actions of the HwndWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
self.app = Application()
self.app.start_("notepad.exe")
self.dlg = self.app.UntitledNotepad
self.ctrl = HwndWrapper(self.dlg.Edit.handle)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.TypeKeys("%{F4}")
def test_GetDialogPropsFromHandle(self):
"Test some small stuff regarding GetDialogPropsFromHandle"
props_from_handle = GetDialogPropsFromHandle(self.dlg.handle)
props_from_dialog = GetDialogPropsFromHandle(self.dlg)
props_from_ctrl = GetDialogPropsFromHandle(self.ctrl)
self.assertEquals(props_from_handle, props_from_dialog)
##====================================================================
#def _unittests():
# "do some basic testing"
# from pywinauto.findwindows import find_windows
# import sys
#
# if len(sys.argv) < 2:
# handle = win32functions.GetDesktopWindow()
# else:
# try:
# handle = int(eval(sys.argv[1]))
#
# except ValueError:
#
# handle = find_windows(
# title_re = "^" + sys.argv[1],
# class_name = "#32770",
# visible_only = False)
#
# if not handle:
# print "dialog not found"
# sys.exit()
#
# props = GetDialogPropsFromHandle(handle)
# print len(props)
# #pprint(GetDialogPropsFromHandle(handle))
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 | -7,863,576,477,954,474,000 | 29.546075 | 87 | 0.641061 | false |
4Quant/tensorflow | tensorflow/python/client/graph_util_test.py | 3 | 8970 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.client import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
class DeviceFunctionsTest(tf.test.TestCase):
def testPinToCpu(self):
with ops.Graph().as_default() as g, g.device(graph_util.pin_to_cpu):
const_a = constant_op.constant(5.0)
const_b = constant_op.constant(10.0)
add_c = const_a + const_b
var_v = state_ops.variable_op([], dtype=dtypes.float32)
assign_c_to_v = state_ops.assign(var_v, add_c)
const_string = constant_op.constant("on a cpu")
dynamic_stitch_int_result = data_flow_ops.dynamic_stitch(
[[0, 1, 2], [2, 3]], [[12, 23, 34], [1, 2]])
dynamic_stitch_float_result = data_flow_ops.dynamic_stitch(
[[0, 1, 2], [2, 3]], [[12.0, 23.0, 34.0], [1.0, 2.0]])
self.assertDeviceEqual(const_a.device, "/device:CPU:0")
self.assertDeviceEqual(const_b.device, "/device:CPU:0")
self.assertDeviceEqual(add_c.device, "/device:CPU:0")
self.assertDeviceEqual(var_v.device, "/device:CPU:0")
self.assertDeviceEqual(assign_c_to_v.device, "/device:CPU:0")
self.assertDeviceEqual(const_string.device, "/device:CPU:0")
self.assertDeviceEqual(dynamic_stitch_int_result.device, "/device:CPU:0")
self.assertDeviceEqual(dynamic_stitch_float_result.device, "/device:CPU:0")
def testPinRequiredOpsOnCPU(self):
with ops.Graph().as_default() as g, g.device(
graph_util.pin_variables_on_cpu):
const_a = constant_op.constant(5.0)
const_b = constant_op.constant(10.0)
add_c = const_a + const_b
var_v = state_ops.variable_op([], dtype=dtypes.float32)
assign_c_to_v = state_ops.assign(var_v, add_c)
dynamic_stitch_int_result = data_flow_ops.dynamic_stitch(
[[0, 1, 2], [2, 3]], [[12, 23, 34], [1, 2]])
dynamic_stitch_float_result = data_flow_ops.dynamic_stitch(
[[0, 1, 2], [2, 3]], [[12.0, 23.0, 34.0], [1.0, 2.0]])
# Non-variable ops shuld not specify a device
self.assertDeviceEqual(const_a.device, None)
self.assertDeviceEqual(const_b.device, None)
self.assertDeviceEqual(add_c.device, None)
# Variable ops specify a device
self.assertDeviceEqual(var_v.device, "/device:CPU:0")
self.assertDeviceEqual(assign_c_to_v.device, "/device:CPU:0")
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = state_ops.variable_op([1], dtype=dtypes.float32)
with g.device(graph_util.pin_variables_on_cpu):
var_1 = state_ops.variable_op([1], dtype=dtypes.float32)
var_2 = state_ops.variable_op([1], dtype=dtypes.float32)
var_3 = state_ops.variable_op([1], dtype=dtypes.float32)
with g.device(graph_util.pin_variables_on_cpu):
var_4 = state_ops.variable_op([1], dtype=dtypes.float32)
with g.device("/device:GPU:0"):
var_5 = state_ops.variable_op([1], dtype=dtypes.float32)
var_6 = state_ops.variable_op([1], dtype=dtypes.float32)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
def testNestedDeviceFunctions(self):
with tf.Graph().as_default():
var_0 = tf.Variable(0)
with tf.device(graph_util.pin_variables_on_cpu):
var_1 = tf.Variable(1)
with tf.device(lambda op: "/gpu:0"):
var_2 = tf.Variable(2)
with tf.device("/gpu:0"): # Implicit merging device function.
var_3 = tf.Variable(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
graph_util.pin_variables_on_cpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = tf.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testConvertVariablesToConsts(self):
with tf.Graph().as_default():
variable_node = tf.Variable(1.0, name="variable_node")
output_node = tf.mul(variable_node, 2.0, name="output_node")
with tf.Session() as sess:
init = tf.initialize_all_variables()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
variable_graph_def = sess.graph.as_graph_def()
constant_graph_def = graph_util.convert_variables_to_constants(
sess, variable_graph_def, ["output_node"])
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with tf.Graph().as_default():
_ = tf.import_graph_def(constant_graph_def, name="")
self.assertEqual(4, len(constant_graph_def.node))
for node in constant_graph_def.node:
self.assertNotEqual("Variable", node.op)
with tf.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 3,647,947,949,996,753,000 | 41.91866 | 80 | 0.651839 | false |
timorieber/wagtail | wagtail/admin/api/views.py | 7 | 2676 | from collections import OrderedDict
from rest_framework.authentication import SessionAuthentication
from wagtail.api.v2.views import PagesAPIViewSet
from wagtail.core.models import Page
from .filters import ForExplorerFilter, HasChildrenFilter
from .serializers import AdminPageSerializer
class PagesAdminAPIViewSet(PagesAPIViewSet):
base_serializer_class = AdminPageSerializer
authentication_classes = [SessionAuthentication]
# Add has_children and for_explorer filters
filter_backends = PagesAPIViewSet.filter_backends + [
HasChildrenFilter,
ForExplorerFilter,
]
meta_fields = PagesAPIViewSet.meta_fields + [
'latest_revision_created_at',
'status',
'children',
'descendants',
'parent',
'ancestors',
]
body_fields = PagesAPIViewSet.body_fields + [
'admin_display_title',
]
listing_default_fields = PagesAPIViewSet.listing_default_fields + [
'latest_revision_created_at',
'status',
'children',
'admin_display_title',
]
# Allow the parent field to appear on listings
detail_only_fields = []
known_query_parameters = PagesAPIViewSet.known_query_parameters.union([
'for_explorer',
'has_children'
])
def get_root_page(self):
"""
Returns the page that is used when the `&child_of=root` filter is used.
"""
return Page.get_first_root_node()
def get_base_queryset(self):
"""
Returns a queryset containing all pages that can be seen by this user.
This is used as the base for get_queryset and is also used to find the
parent pages when using the child_of and descendant_of filters as well.
"""
return Page.objects.all()
def get_queryset(self):
queryset = super().get_queryset()
# Hide root page
# TODO: Add "include_root" flag
queryset = queryset.exclude(depth=1).specific()
return queryset
def get_type_info(self):
types = OrderedDict()
for name, model in self.seen_types.items():
types[name] = OrderedDict([
('verbose_name', model._meta.verbose_name),
('verbose_name_plural', model._meta.verbose_name_plural),
])
return types
def listing_view(self, request):
response = super().listing_view(request)
response.data['__types'] = self.get_type_info()
return response
def detail_view(self, request, pk):
response = super().detail_view(request, pk)
response.data['__types'] = self.get_type_info()
return response
| bsd-3-clause | -4,839,773,944,795,496,000 | 27.774194 | 79 | 0.633034 | false |
DivineHime/seishirou | lib/youtube_dl/extractor/tvplay.py | 26 | 15914 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
parse_iso8601,
qualities,
try_get,
update_url_query,
)
class TVPlayIE(InfoExtractor):
IE_NAME = 'mtg'
IE_DESC = 'MTG services'
_VALID_URL = r'''(?x)
(?:
mtg:|
https?://
(?:www\.)?
(?:
tvplay(?:\.skaties)?\.lv/parraides|
(?:tv3play|play\.tv3)\.lt/programos|
tv3play(?:\.tv3)?\.ee/sisu|
(?:tv(?:3|6|8|10)play|viafree)\.se/program|
(?:(?:tv3play|viasat4play|tv6play|viafree)\.no|(?:tv3play|viafree)\.dk)/programmer|
play\.novatv\.bg/programi
)
/(?:[^/]+/)+
)
(?P<id>\d+)
'''
_TESTS = [
{
'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true',
'md5': 'a1612fe0849455423ad8718fe049be21',
'info_dict': {
'id': '418113',
'ext': 'mp4',
'title': 'Kādi ir īri? - Viņas melo labāk',
'description': 'Baiba apsmej īrus, kādi tie ir un ko viņi dara.',
'series': 'Viņas melo labāk',
'season': '2.sezona',
'season_number': 2,
'duration': 25,
'timestamp': 1406097056,
'upload_date': '20140723',
},
},
{
'url': 'http://play.tv3.lt/programos/moterys-meluoja-geriau/409229?autostart=true',
'info_dict': {
'id': '409229',
'ext': 'flv',
'title': 'Moterys meluoja geriau',
'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e',
'series': 'Moterys meluoja geriau',
'episode_number': 47,
'season': '1 sezonas',
'season_number': 1,
'duration': 1330,
'timestamp': 1403769181,
'upload_date': '20140626',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true',
'info_dict': {
'id': '238551',
'ext': 'flv',
'title': 'Kodu keset linna 398537',
'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701',
'duration': 1257,
'timestamp': 1292449761,
'upload_date': '20101215',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true',
'info_dict': {
'id': '395385',
'ext': 'mp4',
'title': 'Husräddarna S02E07',
'description': 'md5:f210c6c89f42d4fc39faa551be813777',
'duration': 2574,
'timestamp': 1400596321,
'upload_date': '20140520',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true',
'info_dict': {
'id': '266636',
'ext': 'mp4',
'title': 'Den sista dokusåpan S01E08',
'description': 'md5:295be39c872520221b933830f660b110',
'duration': 1492,
'timestamp': 1330522854,
'upload_date': '20120229',
'age_limit': 18,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true',
'info_dict': {
'id': '282756',
'ext': 'mp4',
'title': 'Antikjakten S01E10',
'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8',
'duration': 2646,
'timestamp': 1348575868,
'upload_date': '20120925',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true',
'info_dict': {
'id': '230898',
'ext': 'mp4',
'title': 'Anna Anka søker assistent - Ep. 8',
'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474',
'duration': 2656,
'timestamp': 1277720005,
'upload_date': '20100628',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true',
'info_dict': {
'id': '21873',
'ext': 'mp4',
'title': 'Budbringerne program 10',
'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d',
'duration': 1297,
'timestamp': 1254205102,
'upload_date': '20090929',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true',
'info_dict': {
'id': '361883',
'ext': 'mp4',
'title': 'Hotelinspektør Alex Polizzi - Ep. 10',
'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81',
'duration': 2594,
'timestamp': 1393236292,
'upload_date': '20140224',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://play.novatv.bg/programi/zdravei-bulgariya/624952?autostart=true',
'info_dict': {
'id': '624952',
'ext': 'flv',
'title': 'Здравей, България (12.06.2015 г.) ',
'description': 'md5:99f3700451ac5bb71a260268b8daefd7',
'duration': 8838,
'timestamp': 1434100372,
'upload_date': '20150612',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://tvplay.skaties.lv/parraides/vinas-melo-labak/418113?autostart=true',
'only_matching': True,
},
{
# views is null
'url': 'http://tvplay.skaties.lv/parraides/tv3-zinas/760183',
'only_matching': True,
},
{
'url': 'http://tv3play.tv3.ee/sisu/kodu-keset-linna/238551?autostart=true',
'only_matching': True,
},
{
'url': 'http://www.viafree.se/program/underhallning/i-like-radio-live/sasong-1/676869',
'only_matching': True,
},
{
'url': 'mtg:418113',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
geo_country = self._search_regex(
r'https?://[^/]+\.([a-z]{2})', url,
'geo country', default=None)
if geo_country:
self._initialize_geo_bypass([geo_country.upper()])
video = self._download_json(
'http://playapi.mtgx.tv/v3/videos/%s' % video_id, video_id, 'Downloading video JSON')
title = video['title']
try:
streams = self._download_json(
'http://playapi.mtgx.tv/v3/videos/stream/%s' % video_id,
video_id, 'Downloading streams JSON')
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
msg = self._parse_json(e.cause.read().decode('utf-8'), video_id)
raise ExtractorError(msg['msg'], expected=True)
raise
quality = qualities(['hls', 'medium', 'high'])
formats = []
for format_id, video_url in streams.get('streams', {}).items():
if not video_url or not isinstance(video_url, compat_str):
continue
ext = determine_ext(video_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {
'hdcore': '3.5.0',
'plugin': 'aasp-3.5.0.151.81'
}), video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
fmt = {
'format_id': format_id,
'quality': quality(format_id),
'ext': ext,
}
if video_url.startswith('rtmp'):
m = re.search(
r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url)
if not m:
continue
fmt.update({
'ext': 'flv',
'url': m.group('url'),
'app': m.group('app'),
'play_path': m.group('playpath'),
})
else:
fmt.update({
'url': video_url,
})
formats.append(fmt)
if not formats and video.get('is_geo_blocked'):
self.raise_geo_restricted(
'This content might not be available in your country due to copyright reasons')
self._sort_formats(formats)
# TODO: webvtt in m3u8
subtitles = {}
sami_path = video.get('sami_path')
if sami_path:
lang = self._search_regex(
r'_([a-z]{2})\.xml', sami_path, 'lang',
default=compat_urlparse.urlparse(url).netloc.rsplit('.', 1)[-1])
subtitles[lang] = [{
'url': sami_path,
}]
series = video.get('format_title')
episode_number = int_or_none(video.get('format_position', {}).get('episode'))
season = video.get('_embedded', {}).get('season', {}).get('title')
season_number = int_or_none(video.get('format_position', {}).get('season'))
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'series': series,
'episode_number': episode_number,
'season': season,
'season_number': season_number,
'duration': int_or_none(video.get('duration')),
'timestamp': parse_iso8601(video.get('created_at')),
'view_count': try_get(video, lambda x: x['views']['total'], int),
'age_limit': int_or_none(video.get('age_limit', 0)),
'formats': formats,
'subtitles': subtitles,
}
class ViafreeIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
viafree\.
(?:
(?:dk|no)/programmer|
se/program
)
/(?:[^/]+/)+(?P<id>[^/?#&]+)
'''
_TESTS = [{
'url': 'http://www.viafree.se/program/livsstil/husraddarna/sasong-2/avsnitt-2',
'info_dict': {
'id': '395375',
'ext': 'mp4',
'title': 'Husräddarna S02E02',
'description': 'md5:4db5c933e37db629b5a2f75dfb34829e',
'series': 'Husräddarna',
'season': 'Säsong 2',
'season_number': 2,
'duration': 2576,
'timestamp': 1400596321,
'upload_date': '20140520',
},
'params': {
'skip_download': True,
},
'add_ie': [TVPlayIE.ie_key()],
}, {
# with relatedClips
'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-1',
'info_dict': {
'id': '758770',
'ext': 'mp4',
'title': 'Sommaren med YouTube-stjärnorna S01E01',
'description': 'md5:2bc69dce2c4bb48391e858539bbb0e3f',
'series': 'Sommaren med YouTube-stjärnorna',
'season': 'Säsong 1',
'season_number': 1,
'duration': 1326,
'timestamp': 1470905572,
'upload_date': '20160811',
},
'params': {
'skip_download': True,
},
'add_ie': [TVPlayIE.ie_key()],
}, {
# Different og:image URL schema
'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-2',
'only_matching': True,
}, {
'url': 'http://www.viafree.no/programmer/underholdning/det-beste-vorspielet/sesong-2/episode-1',
'only_matching': True,
}, {
'url': 'http://www.viafree.dk/programmer/reality/paradise-hotel/saeson-7/episode-5',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if TVPlayIE.suitable(url) else super(ViafreeIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(
self._search_regex(
r'(?s)window\.App\s*=\s*({.+?})\s*;\s*</script',
webpage, 'data', default='{}'),
video_id, transform_source=lambda x: re.sub(
r'(?s)function\s+[a-zA-Z_][\da-zA-Z_]*\s*\([^)]*\)\s*{[^}]*}\s*',
'null', x), fatal=False)
video_id = None
if data:
video_id = try_get(
data, lambda x: x['context']['dispatcher']['stores'][
'ContentPageProgramStore']['currentVideo']['id'],
compat_str)
# Fallback #1 (extract from og:image URL schema)
if not video_id:
thumbnail = self._og_search_thumbnail(webpage, default=None)
if thumbnail:
video_id = self._search_regex(
# Patterns seen:
# http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/inbox/765166/a2e95e5f1d735bab9f309fa345cc3f25.jpg
# http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/seasons/15204/758770/4a5ba509ca8bc043e1ebd1a76131cdf2.jpg
r'https?://[^/]+/imagecache/(?:[^/]+/)+(\d{6,})/',
thumbnail, 'video id', default=None)
# Fallback #2. Extract from raw JSON string.
# May extract wrong video id if relatedClips is present.
if not video_id:
video_id = self._search_regex(
r'currentVideo["\']\s*:\s*.+?["\']id["\']\s*:\s*["\'](\d{6,})',
webpage, 'video id')
return self.url_result('mtg:%s' % video_id, TVPlayIE.ie_key())
| gpl-3.0 | 3,830,792,828,342,559,000 | 36.013986 | 147 | 0.45028 | false |
abdoosh00/edraak | common/lib/xmodule/xmodule/tests/test_error_module.py | 37 | 5977 | """
Tests for ErrorModule and NonStaffErrorModule
"""
import unittest
from xmodule.tests import get_test_system
from xmodule.error_module import ErrorDescriptor, ErrorModule, NonStaffErrorDescriptor
from xmodule.modulestore.xml import CourseLocationGenerator
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from xmodule.x_module import XModuleDescriptor, XModule, STUDENT_VIEW
from mock import MagicMock, Mock, patch
from xblock.runtime import Runtime, IdReader
from xblock.field_data import FieldData
from xblock.fields import ScopeIds
from xblock.test.tools import unabc
class SetupTestErrorModules():
def setUp(self):
self.system = get_test_system()
self.course_id = SlashSeparatedCourseKey('org', 'course', 'run')
self.location = self.course_id.make_usage_key('foo', 'bar')
self.valid_xml = u"<problem>ABC \N{SNOWMAN}</problem>"
self.error_msg = "Error"
class TestErrorModule(unittest.TestCase, SetupTestErrorModules):
"""
Tests for ErrorModule and ErrorDescriptor
"""
def setUp(self):
SetupTestErrorModules.setUp(self)
def test_error_module_xml_rendering(self):
descriptor = ErrorDescriptor.from_xml(
self.valid_xml,
self.system,
CourseLocationGenerator(self.course_id),
self.error_msg
)
self.assertIsInstance(descriptor, ErrorDescriptor)
descriptor.xmodule_runtime = self.system
context_repr = self.system.render(descriptor, STUDENT_VIEW).content
self.assertIn(self.error_msg, context_repr)
self.assertIn(repr(self.valid_xml), context_repr)
def test_error_module_from_descriptor(self):
descriptor = MagicMock([XModuleDescriptor],
runtime=self.system,
location=self.location,
_field_data=self.valid_xml)
error_descriptor = ErrorDescriptor.from_descriptor(
descriptor, self.error_msg)
self.assertIsInstance(error_descriptor, ErrorDescriptor)
error_descriptor.xmodule_runtime = self.system
context_repr = self.system.render(error_descriptor, STUDENT_VIEW).content
self.assertIn(self.error_msg, context_repr)
self.assertIn(repr(descriptor), context_repr)
class TestNonStaffErrorModule(unittest.TestCase, SetupTestErrorModules):
"""
Tests for NonStaffErrorModule and NonStaffErrorDescriptor
"""
def setUp(self):
SetupTestErrorModules.setUp(self)
def test_non_staff_error_module_create(self):
descriptor = NonStaffErrorDescriptor.from_xml(
self.valid_xml,
self.system,
CourseLocationGenerator(self.course_id)
)
self.assertIsInstance(descriptor, NonStaffErrorDescriptor)
def test_from_xml_render(self):
descriptor = NonStaffErrorDescriptor.from_xml(
self.valid_xml,
self.system,
CourseLocationGenerator(self.course_id)
)
descriptor.xmodule_runtime = self.system
context_repr = self.system.render(descriptor, STUDENT_VIEW).content
self.assertNotIn(self.error_msg, context_repr)
self.assertNotIn(repr(self.valid_xml), context_repr)
def test_error_module_from_descriptor(self):
descriptor = MagicMock([XModuleDescriptor],
runtime=self.system,
location=self.location,
_field_data=self.valid_xml)
error_descriptor = NonStaffErrorDescriptor.from_descriptor(
descriptor, self.error_msg)
self.assertIsInstance(error_descriptor, ErrorDescriptor)
error_descriptor.xmodule_runtime = self.system
context_repr = self.system.render(error_descriptor, STUDENT_VIEW).content
self.assertNotIn(self.error_msg, context_repr)
self.assertNotIn(str(descriptor), context_repr)
class BrokenModule(XModule):
def __init__(self, *args, **kwargs):
super(BrokenModule, self).__init__(*args, **kwargs)
raise Exception("This is a broken xmodule")
class BrokenDescriptor(XModuleDescriptor):
module_class = BrokenModule
class TestException(Exception):
"""An exception type to use to verify raises in tests"""
pass
@unabc("Tests should not call {}")
class TestRuntime(Runtime):
pass
class TestErrorModuleConstruction(unittest.TestCase):
"""
Test that error module construction happens correctly
"""
def setUp(self):
field_data = Mock(spec=FieldData)
self.descriptor = BrokenDescriptor(
TestRuntime(Mock(spec=IdReader), field_data),
field_data,
ScopeIds(None, None, None, Location('org', 'course', 'run', 'broken', 'name', None))
)
self.descriptor.xmodule_runtime = TestRuntime(Mock(spec=IdReader), field_data)
self.descriptor.xmodule_runtime.error_descriptor_class = ErrorDescriptor
self.descriptor.xmodule_runtime.xmodule_instance = None
def test_broken_module(self):
"""
Test that when an XModule throws an error during __init__, we
get an ErrorModule back from XModuleDescriptor._xmodule
"""
module = self.descriptor._xmodule
self.assertIsInstance(module, ErrorModule)
@patch.object(ErrorDescriptor, '__init__', Mock(side_effect=TestException))
def test_broken_error_descriptor(self):
"""
Test that a broken error descriptor doesn't cause an infinite loop
"""
with self.assertRaises(TestException):
module = self.descriptor._xmodule
@patch.object(ErrorModule, '__init__', Mock(side_effect=TestException))
def test_broken_error_module(self):
"""
Test that a broken error module doesn't cause an infinite loop
"""
with self.assertRaises(TestException):
module = self.descriptor._xmodule
| agpl-3.0 | -5,115,173,604,894,154,000 | 36.35625 | 96 | 0.666555 | false |
godiard/sugarlabs-calculate | rational.py | 1 | 4226 | # rational.py, rational number class Reinier Heeres <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Change log:
# 2007-07-03: rwh, first version
from decimal import Decimal
import logging
_logger = logging.getLogger('Rational')
class Rational:
def __init__(self, n=None, d=None):
self.n = 0
self.d = 0
if n is not None:
self.set(n, d)
def set(self, n, d=None):
if d is not None:
self.n = int(n)
self.d = int(d)
elif isinstance(n, tuple) or isinstance(n, list):
self.n = int(n[0])
self.d = int(n[1])
elif isinstance(n, bytes):
return
self._simplify()
def __str__(self):
if self.d == 1 or self.d == 0:
return "%d" % (self.n)
else:
return "%d/%d" % (self.n, self.d)
def __float__(self):
return float(self.n) / float(self.d)
def gcd(self, a, b):
if b == 0:
return a
else:
return self.gcd(b, a % b)
def _simplify(self):
if self.d == 0:
return
if self.n == self.d:
self.n = int(1)
self.d = int(1)
else:
gcd = self.gcd(self.n, self.d)
self.n /= gcd
self.d /= gcd
def __add__(self, rval):
if isinstance(rval, Rational):
ret = Rational(self.n * rval.d + self.d * rval.n, self.d * rval.d)
elif isinstance(rval, int) or isinstance(rval,
int):
ret = Rational(self.n + self.d * rval, self.d)
else:
ret = float(self) + rval
return ret
def __radd__(self, lval):
return self.__add__(lval)
def __sub__(self, rval):
if isinstance(rval, Rational):
ret = Rational(self.n * rval.d - self.d * rval.n, self.d * rval.d)
elif isinstance(rval, int) or isinstance(rval,
int):
ret = Rational(self.n - self.d * rval, self.d)
else:
ret = float(self) - rval
return ret
def __rsub__(self, lval):
return -self.__sub__(lval)
def __mul__(self, rval):
if isinstance(rval, Rational):
ret = Rational(self.n * rval.n, self.d * rval.d)
elif isinstance(rval, int) or isinstance(rval,
int):
ret = Rational(self.n * rval, self.d)
elif isinstance(rval, Decimal):
ret = rval * Decimal(str(float(self)))
else:
ret = rval * float(self)
return ret
def __rmul__(self, lval):
return self.__mul__(lval)
def __div__(self, rval):
if isinstance(rval, Rational):
ret = Rational(self.n * rval.d, self.d * rval.n)
elif isinstance(rval, int) or isinstance(rval,
int):
ret = Rational(self.n, self.d * rval)
else:
ret = float(self) / rval
return ret
def __rdiv__(self, lval):
return self.__div__(lval)
def __neg__(self):
return Rational(-self.n, self.d)
def __abs__(self):
self.n = abs(self.n)
self.d = abs(self.d)
def __pow__(self, rval):
if isinstance(rval, int) or isinstance(rval, int):
ret = Rational(self.n ** rval, self.d ** rval)
else:
ret = float(self.n) ** rval / float(self.d) ** rval
return ret
| gpl-2.0 | 2,628,252,699,635,604,000 | 28.971631 | 78 | 0.52035 | false |
codingcommando/tmtp | standalone/tests/P_spline_test.py | 1 | 7787 | #!/usr/bin/env python
#
# This is a test sample pattern distributed as part of the tmtp
# open fashion design project.
#
# This pattern tests the code which finds Bezier control points
# from points along a curve in order to implement a b-spline
#
from tmtpl.constants import *
from tmtpl.pattern import *
from tmtpl.document import *
from tmtpl.client import Client
from tmtpl.curves import GetCurveControlPoints, FudgeControlPoints
# Project specific
#from math import sin, cos, radians
from pysvg.filter import *
from pysvg.gradient import *
from pysvg.linking import *
from pysvg.script import *
from pysvg.shape import *
from pysvg.structure import *
from pysvg.style import *
from pysvg.text import *
from pysvg.builders import *
class PatternDesign():
def __init__(self):
self.styledefs = {}
return
def pattern(self):
"""
Method defining a pattern design. This is where the designer places
all elements of the design definition
"""
# The following attributes are set before calling this method:
#
# self.cd - Client Data, which has been loaded from the client data file
#
# self.styledefs - the style difinition dictionary, loaded from the styles file
#
# self.cfg - configuration settings from the main app framework
#
# TODO find a way to get this administrative cruft out of this pattern method
cd = self.cd
self.cfg['clientdata'] = cd
self.cfg['paper_width'] = ( 36 * IN_TO_PT )
self.cfg['border'] = ( 5 * CM_TO_PT ) # document borders
border = self.cfg['border']
# create the document info and fill it in
# TODO - abstract these into configuration file(s)
metainfo = {'companyName':'Test Company', # mandatory
'designerName':'Test Designer', # mandatory
'patternName':'Layout Test 1', # mandatory
'patternNumber':'1234567' # mandatory
}
self.cfg['metainfo'] = metainfo
# attributes for the entire svg document
docattrs = {'currentScale' : "0.05 : 1",
'fitBoxtoViewport' : "True",
'preserveAspectRatio' : "xMidYMid meet",
}
doc = Document(self.cfg, name = 'document', attributes = docattrs)
# Set up the title block
tb = TitleBlock('pattern', 'titleblock', self.cfg['border'], self.cfg['border'], stylename = 'titleblock_text_style')
doc.add(tb)
# The whole pattern
tp = Pattern('splines')
doc.add(tp)
# Set up styles dictionary in the pattern object
tp.styledefs.update(self.styledefs)
# Begin pattern piece
part = PatternPiece('pattern', 'parta', letter = 'A', fabric = 1, interfacing = 0, lining = 0)
tp.add(part)
part.label_x = 1 * IN_TO_PT
part.label_y = 10 * IN_TO_PT
#
# Create a spline through a number of points
#
# create a list of points through which the line will pass
pointlist = []
pnt = Point('reference', 'pointa', 1.0 * IN_TO_PT, 6.0 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
pnt = Point('reference', 'pointb', 0.5 * IN_TO_PT, 7.0 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
pnt = Point('reference', 'pointc', 1.5 * IN_TO_PT, 7.5 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
pnt = Point('reference', 'pointd', 3.0 * IN_TO_PT, 7.5 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
pnt = Point('reference', 'pointe', 4.0 * IN_TO_PT, 10.0 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
pnt = Point('reference', 'pointf', 4.0 * IN_TO_PT, 12.0 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
pnt = Point('reference', 'pointg', 5.0 * IN_TO_PT, 12.0 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
# get first and second control point lists, we supply a name for these
fcp, scp = GetCurveControlPoints('Froz', pointlist)
# dump them out if needed (Change False to True)
if False:
for i in range(0, len(fcp)):
print ' point: %f %f' % (pointlist[i].x / IN_TO_PT, pointlist[i].y / IN_TO_PT)
print ' fcp: %f %f' % (fcp[i].x / IN_TO_PT, fcp[i].y / IN_TO_PT)
print ' scp: %f %f' % (scp[i].x / IN_TO_PT, scp[i].y / IN_TO_PT)
print ' point: %f %f' % (pointlist[-1].x / IN_TO_PT, pointlist[-1].y / IN_TO_PT)
# EXPERIMENTAL - fudge the control points to adjust the length of the control vectors
(fcp, scp) = FudgeControlPoints(pointlist, fcp, scp, .3333)
# add them to the pattern piece (optional)
for pnt in fcp:
part.add(pnt)
for pnt in scp:
part.add(pnt)
# Now create a path using these points
testpath = path()
part.add(Path('pattern', 'path', 'Test Spline Path', testpath, 'seamline_style'))
# start at the first point in the list
testpath.appendMoveToPath(pointlist[0].x, pointlist[0].y, relative = False)
# Now for each additional original point in the list, add the derived control points and
for i in range (1, len(pointlist)):
testpath.appendCubicCurveToPath(fcp[i-1].x, fcp[i-1].y, scp[i-1].x, scp[i-1].y, pointlist[i].x, pointlist[i].y, relative = False)
#
# End of multi-point spline test data
#
#
# Create a second spline through only two points to test this special case
#
# create a list of points through which the line will pass
pointlist = []
pnt = Point('reference', 'pointx', 4.0 * IN_TO_PT, 6.0 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
pnt = Point('reference', 'pointy', 7.0 * IN_TO_PT, 7.0 * IN_TO_PT, 'point_style')
part.add(pnt)
pointlist.append(pnt)
# get first and second control point lists, we supply a name for these
fcp, scp = GetCurveControlPoints('Floob', pointlist)
# dump them out if needed (Change False to True)
if False:
for i in range(0, len(fcp)):
print ' point: %f %f' % (pointlist[i].x / IN_TO_PT, pointlist[i].y / IN_TO_PT)
print ' fcp: %f %f' % (fcp[i].x / IN_TO_PT, fcp[i].y / IN_TO_PT)
print ' scp: %f %f' % (scp[i].x / IN_TO_PT, scp[i].y / IN_TO_PT)
print ' point: %f %f' % (pointlist[-1].x / IN_TO_PT, pointlist[-1].y / IN_TO_PT)
# add them to the pattern piece (optional)
for pnt in fcp:
part.add(pnt)
for pnt in scp:
part.add(pnt)
# Now create a path using these points
testpath = path()
part.add(Path('pattern', 'path2', 'Second Test Spline Path', testpath, 'seamline_style'))
# start at the first point in the list
testpath.appendMoveToPath(pointlist[0].x, pointlist[0].y, relative = False)
# Now for each additional original point in the list, add the derived control points and
for i in range (1, len(pointlist)):
testpath.appendCubicCurveToPath(fcp[i-1].x, fcp[i-1].y, scp[i-1].x, scp[i-1].y, pointlist[i].x, pointlist[i].y, relative = False)
#
# End of second (two-point) spline test data
#
# call draw once for the entire pattern
doc.draw()
return
# vi:set ts=4 sw=4 expandtab:
| gpl-3.0 | 2,046,099,069,568,475,100 | 35.38785 | 142 | 0.577886 | false |
luser/socorro | socorro/unittest/external/postgresql/test_adi.py | 2 | 6614 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
from nose.tools import eq_, assert_raises
from socorrolib.lib import MissingArgumentError
from socorro.external.postgresql.adi import ADI
from unittestbase import PostgreSQLTestCase
class IntegrationTestADI(PostgreSQLTestCase):
def setUp(self):
"""Set up this test class by populating the reports table with fake
data. """
super(IntegrationTestADI, self).setUp()
self._truncate()
cursor = self.connection.cursor()
self.date = datetime.datetime(2015, 7, 1)
yesterday = self.date - datetime.timedelta(hours=24)
products = ('Firefox', 'Thunderbird')
versions = ('39.0b1', '40.0')
platforms = ('Linux', 'Windows')
channels = ('release', 'beta')
build_date = yesterday - datetime.timedelta(days=30)
sunset_date = yesterday + datetime.timedelta(days=30)
for platform in platforms:
cursor.execute("""
INSERT INTO os_names
(os_short_name, os_name)
VALUES
(%s, %s)
""", (platform.lower()[:3], platform))
cursor.execute("""
INSERT INTO os_name_matches
(match_string, os_name)
VALUES
(%s, %s)
""", (platform, platform))
adi_count = 1
product_version_id = 0
for product in products:
cursor.execute("""
INSERT INTO products
(product_name, sort, release_name)
VALUES
(%s, 1, %s)
""", (
product, product.lower()
))
cursor.execute("""
INSERT into product_productid_map (
product_name,
productid
) VALUES (
%s, %s
)
""", (
product,
product.lower() + '-guid',
))
for version in versions:
for platform in platforms:
for channel in channels:
cursor.execute("""
INSERT INTO raw_adi (
adi_count,
date,
product_name,
product_os_platform,
product_os_version,
product_version,
build,
product_guid,
update_channel,
received_at
)
VALUES (
%s, %s, %s, %s, %s, %s, %s, %s, %s, NOW()
)
""", (
adi_count,
yesterday,
product,
platform,
'1.0',
version,
'20140903141017',
'{abc}',
channel,
))
adi_count *= 2
product_version_id += 1
cursor.execute("""
INSERT INTO product_versions
(product_version_id, product_name, major_version,
release_version, version_string, version_sort,
build_date, sunset_date, featured_version,
build_type, build_type_enum)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, 't', %s, %s)
""", (
product_version_id,
product,
version,
version,
version,
'0000' + version.replace('.', ''),
build_date,
sunset_date,
'release',
'release',
))
cursor.callproc('update_adu', [yesterday.date()])
self.connection.commit()
cursor = self.connection.cursor()
cursor.execute('select count(*) from product_adu')
count, = cursor.fetchone()
# expect there to be 2 * 2 * 2 rows of product_adu
assert count == 8, count
def tearDown(self):
self._truncate()
super(IntegrationTestADI, self).tearDown()
def _truncate(self):
cursor = self.connection.cursor()
cursor.execute("""
TRUNCATE
raw_adi, products, product_versions, product_adu,
product_productid_map, os_names, os_name_matches
CASCADE
""")
self.connection.commit()
def test_get(self):
impl = ADI(config=self.config)
assert_raises(
MissingArgumentError,
impl.get
)
start = self.date - datetime.timedelta(days=1)
end = self.date
stats = impl.get(
start_date=start,
end_date=end,
product='Firefox',
versions=['42'],
platforms=['Linux', 'Windows'],
)
eq_(stats['hits'], [])
eq_(stats['total'], 0)
stats = impl.get(
start_date=start,
end_date=end,
product='Firefox',
versions=['40.0'],
platforms=['Linux', 'Windows'],
)
eq_(stats['total'], 1)
hit, = stats['hits']
eq_(hit, {
'adi_count': 64L + 16L,
'date': start.date(),
'version': '40.0',
'build_type': 'release'
})
stats = impl.get(
start_date=start,
end_date=end,
product='Firefox',
versions=['39.0b'],
platforms=['Linux', 'Windows'],
)
eq_(stats['total'], 1)
hit, = stats['hits']
eq_(hit, {
'adi_count': 4 + 1L,
'date': start.date(),
'version': '39.0b1',
'build_type': 'release'
})
stats = impl.get(
start_date=start,
end_date=end,
product='Firefox',
versions=['39.0b', '40.0'],
platforms=['Linux', 'Windows'],
)
eq_(stats['total'], 2)
| mpl-2.0 | 7,301,276,852,277,945,000 | 31.421569 | 75 | 0.423193 | false |
sloria/osf.io | api/base/exceptions.py | 13 | 10334 | import httplib as http
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException, AuthenticationFailed
def dict_error_formatting(errors, index=None):
"""
Formats all dictionary error messages for both single and bulk requests
"""
formatted_error_list = []
# Error objects may have the following members. Title and id removed to avoid clash with "title" and "id" field errors.
top_level_error_keys = ['links', 'status', 'code', 'detail', 'source', 'meta']
# Resource objects must contain at least 'id' and 'type'
resource_object_identifiers = ['type', 'id']
if index is None:
index = ''
else:
index = str(index) + '/'
for error_key, error_description in errors.iteritems():
if isinstance(error_description, basestring):
error_description = [error_description]
if error_key in top_level_error_keys:
formatted_error_list.extend({error_key: description} for description in error_description)
elif error_key in resource_object_identifiers:
formatted_error_list.extend([{'source': {'pointer': '/data/{}'.format(index) + error_key}, 'detail': reason} for reason in error_description])
elif error_key == 'non_field_errors':
formatted_error_list.extend([{'detail': description for description in error_description}])
else:
formatted_error_list.extend([{'source': {'pointer': '/data/{}attributes/'.format(index) + error_key}, 'detail': reason} for reason in error_description])
return formatted_error_list
def json_api_exception_handler(exc, context):
"""
Custom exception handler that returns errors object as an array
"""
# We're deliberately not stripping html from exception detail.
# This creates potential vulnerabilities to script injection attacks
# when returning raw user input into error messages.
#
# Fortunately, Django's templating language strips markup bu default,
# but if our frontend changes we may lose that protection.
# TODO: write tests to ensure our html frontend strips html
# Import inside method to avoid errors when the OSF is loaded without Django
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
errors = []
if response:
message = response.data
if isinstance(exc, TwoFactorRequiredError):
response['X-OSF-OTP'] = 'required; app'
if isinstance(exc, JSONAPIException):
errors.extend([{'source': exc.source or {}, 'detail': exc.detail, 'meta': exc.meta or {}}])
elif isinstance(message, dict):
errors.extend(dict_error_formatting(message, None))
else:
if isinstance(message, basestring):
message = [message]
for index, error in enumerate(message):
if isinstance(error, dict):
errors.extend(dict_error_formatting(error, index))
else:
errors.append({'detail': error})
response.data = {'errors': errors}
return response
class EndpointNotImplementedError(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = _('This endpoint is not yet implemented.')
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = _('Service is unavailable at this time.')
class JSONAPIException(APIException):
"""Inherits from the base DRF API exception and adds extra metadata to support JSONAPI error objects
:param str detail: a human-readable explanation specific to this occurrence of the problem
:param dict source: A dictionary containing references to the source of the error.
See http://jsonapi.org/format/#error-objects.
Example: ``source={'pointer': '/data/attributes/title'}``
:param dict meta: A meta object containing non-standard meta info about the error.
"""
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, source=None, meta=None):
super(JSONAPIException, self).__init__(detail=detail)
self.source = source
self.meta = meta
# Custom Exceptions the Django Rest Framework does not support
class Gone(JSONAPIException):
status_code = status.HTTP_410_GONE
default_detail = ('The requested resource is no longer available.')
def UserGone(user):
return Gone(detail='The requested user is no longer available.',
meta={'full_name': user.fullname, 'family_name': user.family_name, 'given_name': user.given_name,
'middle_names': user.middle_names, 'profile_image': user.profile_image_url()})
class Conflict(JSONAPIException):
status_code = status.HTTP_409_CONFLICT
default_detail = ('Resource identifier does not match server endpoint.')
class JSONAPIParameterException(JSONAPIException):
def __init__(self, detail=None, parameter=None):
source = {
'parameter': parameter
}
super(JSONAPIParameterException, self).__init__(detail=detail, source=source)
class JSONAPIAttributeException(JSONAPIException):
def __init__(self, detail=None, attribute=None):
source = {
'pointer': '/data/attributes/{}'.format(attribute)
}
super(JSONAPIAttributeException, self).__init__(detail=detail, source=source)
class InvalidQueryStringError(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query string parameter."""
default_detail = 'Query string contains an invalid value.'
status_code = http.BAD_REQUEST
class InvalidFilterOperator(JSONAPIParameterException):
"""Raised when client passes an invalid operator to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, valid_operators=('eq', 'lt', 'lte', 'gt', 'gte', 'contains', 'icontains')):
if value and not detail:
valid_operators = ', '.join(valid_operators)
detail = "Value '{0}' is not a supported filter operator; use one of {1}.".format(
value,
valid_operators
)
super(InvalidFilterOperator, self).__init__(detail=detail, parameter='filter')
class InvalidFilterValue(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, field_type=None):
if not detail:
detail = "Value '{0}' is not valid".format(value)
if field_type:
detail += ' for a filter on type {0}'.format(
field_type
)
detail += '.'
super(InvalidFilterValue, self).__init__(detail=detail, parameter='filter')
class InvalidFilterError(JSONAPIParameterException):
"""Raised when client passes an malformed filter in the query string."""
default_detail = _('Query string contains a malformed filter.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None):
super(InvalidFilterError, self).__init__(detail=detail, parameter='filter')
class InvalidFilterComparisonType(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not a date or number type"""
default_detail = _('Comparison operators are only supported for dates and numbers.')
status_code = http.BAD_REQUEST
class InvalidFilterMatchType(JSONAPIParameterException):
"""Raised when client tries to do a match filter on a field that is not a string or a list"""
default_detail = _('Match operators are only supported for strings and lists.')
status_code = http.BAD_REQUEST
class InvalidFilterFieldError(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not supported"""
default_detail = _('Query contained one or more filters for invalid fields.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None, parameter=None, value=None):
if value and not detail:
detail = "Value '{}' is not a filterable field.".format(value)
super(InvalidFilterFieldError, self).__init__(detail=detail, parameter=parameter)
class UnconfirmedAccountError(APIException):
status_code = 400
default_detail = _('Please confirm your account before using the API.')
class UnclaimedAccountError(APIException):
status_code = 400
default_detail = _('Please claim your account before using the API.')
class DeactivatedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a deactivated account is not allowed.')
class MergedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a merged account is not allowed.')
class InvalidAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with an invalid account is not allowed.')
class TwoFactorRequiredError(AuthenticationFailed):
default_detail = _('Must specify two-factor authentication OTP code.')
pass
class InvalidModelValueError(JSONAPIException):
status_code = 400
default_detail = _('Invalid value in POST/PUT/PATCH request.')
class TargetNotSupportedError(Exception):
"""Raised if a TargetField is used for a resource that isn't supported."""
pass
class RelationshipPostMakesNoChanges(Exception):
"""Raised when a post is on a relationship that already exists, so view can return a 204"""
pass
class NonDescendantNodeError(APIException):
"""Raised when a client attempts to associate a non-descendant node with a view only link"""
status_code = 400
default_detail = _('The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.')
def __init__(self, node_id, detail=None):
if not detail:
detail = self.default_detail.format(node_id)
super(NonDescendantNodeError, self).__init__(detail=detail)
| apache-2.0 | 2,549,332,674,260,103,000 | 38.143939 | 200 | 0.68473 | false |
romain-dartigues/ansible | lib/ansible/modules/cloud/google/gcp_compute_router.py | 7 | 16988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_router
description:
- Represents a Router resource.
short_description: Creates a GCP Router
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
name:
description:
- Name of the resource. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match the regular
expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must
be a lowercase letter, and all following characters must be a dash, lowercase
letter, or digit, except the last character, which cannot be a dash.
required: true
description:
description:
- An optional description of this resource.
required: false
network:
description:
- A reference to the network to which this router belongs.
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_compute_network
task and then set this network field to "{{ name-of-resource }}" Alternatively,
you can set this network to a dictionary with the selfLink key where the value
is the selfLink of your Network'
required: true
bgp:
description:
- BGP information specific to this router.
required: false
suboptions:
asn:
description:
- Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN,
either 16-bit or 32-bit. The value will be fixed for this router resource.
All VPN tunnels that link to this router will have the same local ASN.
required: true
advertise_mode:
description:
- User-specified flag to indicate which mode to use for advertisement.
- 'Valid values of this enum field are: DEFAULT, CUSTOM .'
required: false
default: DEFAULT
choices:
- DEFAULT
- CUSTOM
advertised_groups:
description:
- User-specified list of prefix groups to advertise in custom mode.
- This field can only be populated if advertiseMode is CUSTOM and is advertised
to all peers of the router. These groups will be advertised in addition
to any specified prefixes. Leave this field blank to advertise no custom
groups.
- 'This enum field has the one valid value: ALL_SUBNETS .'
required: false
advertised_ip_ranges:
description:
- User-specified list of individual IP ranges to advertise in custom mode.
This field can only be populated if advertiseMode is CUSTOM and is advertised
to all peers of the router. These IP ranges will be advertised in addition
to any specified groups.
- Leave this field blank to advertise no custom IP ranges.
required: false
suboptions:
range:
description:
- The IP range to advertise. The value must be a CIDR-formatted string.
required: false
description:
description:
- User-specified description for the IP range.
required: false
region:
description:
- Region where the router resides.
required: true
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/routers)'
- 'Google Cloud Router: U(https://cloud.google.com/router/docs/)'
'''
EXAMPLES = '''
- name: create a network
gcp_compute_network:
name: "network-router"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a router
gcp_compute_router:
name: "test_object"
network: "{{ network }}"
bgp:
asn: 64514
advertise_mode: CUSTOM
advertised_groups:
- ALL_SUBNETS
advertised_ip_ranges:
- range: 1.2.3.4
- range: 6.7.0.0/16
region: us-central1
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
id:
description:
- The unique identifier for the resource.
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
name:
description:
- Name of the resource. The name must be 1-63 characters long, and comply with RFC1035.
Specifically, the name must be 1-63 characters long and match the regular expression
`[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase
letter, and all following characters must be a dash, lowercase letter, or digit,
except the last character, which cannot be a dash.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
network:
description:
- A reference to the network to which this router belongs.
returned: success
type: dict
bgp:
description:
- BGP information specific to this router.
returned: success
type: complex
contains:
asn:
description:
- Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN,
either 16-bit or 32-bit. The value will be fixed for this router resource.
All VPN tunnels that link to this router will have the same local ASN.
returned: success
type: int
advertiseMode:
description:
- User-specified flag to indicate which mode to use for advertisement.
- 'Valid values of this enum field are: DEFAULT, CUSTOM .'
returned: success
type: str
advertisedGroups:
description:
- User-specified list of prefix groups to advertise in custom mode.
- This field can only be populated if advertiseMode is CUSTOM and is advertised
to all peers of the router. These groups will be advertised in addition to
any specified prefixes. Leave this field blank to advertise no custom groups.
- 'This enum field has the one valid value: ALL_SUBNETS .'
returned: success
type: list
advertisedIpRanges:
description:
- User-specified list of individual IP ranges to advertise in custom mode. This
field can only be populated if advertiseMode is CUSTOM and is advertised to
all peers of the router. These IP ranges will be advertised in addition to
any specified groups.
- Leave this field blank to advertise no custom IP ranges.
returned: success
type: complex
contains:
range:
description:
- The IP range to advertise. The value must be a CIDR-formatted string.
returned: success
type: str
description:
description:
- User-specified description for the IP range.
returned: success
type: str
region:
description:
- Region where the router resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
description=dict(type='str'),
network=dict(required=True, type='dict'),
bgp=dict(type='dict', options=dict(
asn=dict(required=True, type='int'),
advertise_mode=dict(default='DEFAULT', type='str', choices=['DEFAULT', 'CUSTOM']),
advertised_groups=dict(type='list', elements='str'),
advertised_ip_ranges=dict(type='list', elements='dict', options=dict(
range=dict(type='str'),
description=dict(type='str')
))
)),
region=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#router'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.patch(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#router',
u'region': module.params.get('region'),
u'name': module.params.get('name'),
u'description': module.params.get('description'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'bgp': RouterBgp(module.params.get('bgp', {}), module).to_request()
}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/routers/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/routers".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'id': response.get(u'id'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'name': module.params.get('name'),
u'description': response.get(u'description'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'bgp': RouterBgp(response.get(u'bgp', {}), module).from_response()
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#router')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class RouterBgp(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({
u'asn': self.request.get('asn'),
u'advertiseMode': self.request.get('advertise_mode'),
u'advertisedGroups': self.request.get('advertised_groups'),
u'advertisedIpRanges': RouterAdvertisediprangesArray(self.request.get('advertised_ip_ranges', []), self.module).to_request()
})
def from_response(self):
return remove_nones_from_dict({
u'asn': self.request.get(u'asn'),
u'advertiseMode': self.request.get(u'advertiseMode'),
u'advertisedGroups': self.request.get(u'advertisedGroups'),
u'advertisedIpRanges': RouterAdvertisediprangesArray(self.request.get(u'advertisedIpRanges', []), self.module).from_response()
})
class RouterAdvertisediprangesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({
u'range': item.get('range'),
u'description': item.get('description')
})
def _response_from_item(self, item):
return remove_nones_from_dict({
u'range': item.get(u'range'),
u'description': item.get(u'description')
})
if __name__ == '__main__':
main()
| gpl-3.0 | -4,769,275,863,563,409,000 | 33.044088 | 138 | 0.609783 | false |
cms-btv-pog/rootpy | rootpy/plotting/canvas.py | 4 | 9920 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
"""
This module implements python classes which inherit from
and extend the functionality of the ROOT canvas classes.
"""
from __future__ import absolute_import
import ROOT
from .base import convert_color
from ..base import NamedObject
from ..context import invisible_canvas
from ..decorators import snake_case_methods
from .. import QROOT, asrootpy
from ..memory.keepalive import keepalive
from array import array
__all__ = [
'Pad',
'Canvas',
]
class _PadBase(NamedObject):
def cd(self, *args):
pad = asrootpy(super(_PadBase, self).cd(*args))
if pad and pad is not self:
keepalive(self, pad)
return pad
def axes(self, ndim=1,
xlimits=None, ylimits=None, zlimits=None,
xbins=1, ybins=1, zbins=1):
"""
Create and return axes on this pad
"""
if xlimits is None:
xlimits = (0, 1)
if ylimits is None:
ylimits = (0, 1)
if zlimits is None:
zlimits = (0, 1)
if ndim == 1:
from .hist import Hist
hist = Hist(1, xlimits[0], xlimits[1])
elif ndim == 2:
from .hist import Hist2D
hist = Hist2D(1, xlimits[0], xlimits[1],
1, ylimits[0], ylimits[1])
elif ndim == 3:
from .hist import Hist3D
hist = Hist3D(1, xlimits[0], xlimits[1],
1, ylimits[0], ylimits[1],
1, zlimits[0], zlimits[1])
else:
raise ValueError("ndim must be 1, 2, or 3")
with self:
hist.Draw('AXIS')
xaxis = hist.xaxis
yaxis = hist.yaxis
if isinstance(xbins, (list, tuple)):
xbins = array('d', xbins)
if hasattr(xbins, '__iter__'):
xaxis.Set(len(xbins) - 1, xbins)
else:
xaxis.Set(xbins, *xlimits)
if ndim > 1:
if isinstance(ybins, (list, tuple)):
ybins = array('d', ybins)
if hasattr(ybins, '__iter__'):
yaxis.Set(len(ybins) - 1, ybins)
else:
yaxis.Set(ybins, *ylimits)
else:
yaxis.limits = ylimits
yaxis.range_user = ylimits
if ndim > 1:
zaxis = hist.zaxis
if ndim == 3:
if isinstance(zbins, (list, tuple)):
zbins = array('d', zbins)
if hasattr(zbins, '__iter__'):
zaxis.Set(len(zbins) - 1, zbins)
else:
zaxis.Set(zbins, *zlimits)
else:
zaxis.limits = zlimits
zaxis.range_user = zlimits
return xaxis, yaxis, zaxis
return xaxis, yaxis
@property
def primitives(self):
return asrootpy(self.GetListOfPrimitives())
def find_all_primitives(self):
"""
Recursively find all primities on a pad, even those hiding behind a
GetListOfFunctions() of a primitive
"""
# delayed import to avoid circular import
from .utils import find_all_primitives
return find_all_primitives(self)
@property
def canvas(self):
return asrootpy(self.GetCanvas())
@property
def mother(self):
return asrootpy(self.GetMother())
@property
def margin(self):
return (self.GetLeftMargin(), self.GetRightMargin(),
self.GetBottomMargin(), self.GetTopMargin())
@margin.setter
def margin(self, bounds):
left, right, bottom, top = bounds
super(_PadBase, self).SetMargin(left, right, bottom, top)
@property
def margin_pixels(self):
left, right, bottom, top = self.margin
width = self.width_pixels
height = self.height_pixels
return (int(left * width), int(right * width),
int(bottom * height), int(top * height))
@margin_pixels.setter
def margin_pixels(self, bounds):
left, right, bottom, top = bounds
width = float(self.width_pixels)
height = float(self.height_pixels)
super(_PadBase, self).SetMargin(left / width, right / width,
bottom / height, top / height)
@property
def range(self):
x1, y1 = ROOT.Double(), ROOT.Double()
x2, y2 = ROOT.Double(), ROOT.Double()
super(_PadBase, self).GetRange(x1, y1, x2, y2)
return x1, y1, x2, y2
@range.setter
def range(self, bounds):
x1, y1, x2, y2 = bounds
super(_PadBase, self).Range(x1, y1, x2, y2)
@property
def range_axis(self):
x1, y1 = ROOT.Double(), ROOT.Double()
x2, y2 = ROOT.Double(), ROOT.Double()
super(_PadBase, self).GetRangeAxis(x1, y1, x2, y2)
return x1, y1, x2, y2
@range_axis.setter
def range_axis(self, bounds):
x1, y1, x2, y2 = bounds
super(_PadBase, self).RangeAxis(x1, y1, x2, y2)
def __enter__(self):
self._prev_pad = ROOT.gPad.func()
self.cd()
return self
def __exit__(self, type, value, traceback):
# similar to preserve_current_canvas in rootpy/context.py
if self._prev_pad:
self._prev_pad.cd()
elif ROOT.gPad.func():
# Put things back how they were before.
with invisible_canvas():
# This is a round-about way of resetting gPad to None.
# No other technique I tried could do it.
pass
self._prev_pad = None
return False
@snake_case_methods
class Pad(_PadBase, QROOT.TPad):
_ROOT = QROOT.TPad
def __init__(self, xlow, ylow, xup, yup,
color=-1,
bordersize=-1,
bordermode=-2,
name=None,
title=None):
color = convert_color(color, 'root')
super(Pad, self).__init__(xlow, ylow, xup, yup,
color, bordersize, bordermode,
name=name,
title=title)
def Draw(self, *args):
ret = super(Pad, self).Draw(*args)
canvas = self.GetCanvas()
keepalive(canvas, self)
return ret
@property
def width(self):
return self.GetWNDC()
@property
def height(self):
return self.GetHNDC()
@property
def width_pixels(self):
mother = self.mother
canvas = self.canvas
w = self.GetWNDC()
while mother is not canvas:
w *= mother.GetWNDC()
mother = mother.mother
return int(w * mother.width)
@property
def height_pixels(self):
mother = self.mother
canvas = self.canvas
h = self.GetHNDC()
while mother is not canvas:
h *= mother.GetHNDC()
mother = mother.mother
return int(h * mother.height)
@snake_case_methods
class Canvas(_PadBase, QROOT.TCanvas):
_ROOT = QROOT.TCanvas
def __init__(self,
width=None, height=None,
x=None, y=None,
name=None, title=None,
size_includes_decorations=False):
# The following line will trigger finalSetup and start the graphics
# thread if not started already
style = ROOT.gStyle
if width is None:
width = style.GetCanvasDefW()
if height is None:
height = style.GetCanvasDefH()
if x is None:
x = style.GetCanvasDefX()
if y is None:
y = style.GetCanvasDefY()
super(Canvas, self).__init__(x, y, width, height,
name=name, title=title)
if not size_includes_decorations:
# Canvas dimensions include the window manager's decorations by
# default in vanilla ROOT. I think this is a bad default.
# Since in the most common case I don't care about the window
# decorations, the default will be to set the dimensions of the
# paintable area of the canvas.
if self.IsBatch():
self.SetCanvasSize(width, height)
else:
self.SetWindowSize(width + (width - self.GetWw()),
height + (height - self.GetWh()))
self.size_includes_decorations = size_includes_decorations
@property
def width(self):
return self.GetWw()
@width.setter
def width(self, value):
value = int(value)
if self.IsBatch():
self.SetCanvasSize(value, self.GetWh())
else:
curr_height = self.GetWh()
self.SetWindowSize(value, curr_height)
if not getattr(self, 'size_includes_decorations', False):
self.SetWindowSize(value + (value - self.GetWw()),
curr_height + (curr_height - self.GetWh()))
@property
def width_pixels(self):
return self.GetWw()
@width_pixels.setter
def width_pixels(self, value):
self.width = value
@property
def height(self):
return self.GetWh()
@height.setter
def height(self, value):
value = int(value)
if self.IsBatch():
self.SetCanvasSize(self.GetWw(), value)
else:
curr_width = self.GetWw()
self.SetWindowSize(curr_width, value)
if not getattr(self, 'size_includes_decorations', False):
self.SetWindowSize(curr_width + (curr_width - self.GetWw()),
value + (value - self.GetWh()))
@property
def height_pixels(self):
return self.GetWh()
@height_pixels.setter
def height_pixels(self, value):
self.height = value
| gpl-3.0 | -1,152,103,213,300,127,900 | 30.293375 | 78 | 0.538407 | false |
ErinCall/sync-engine | inbox/auth/gmail.py | 3 | 10335 | import requests
from sqlalchemy.orm.exc import NoResultFound
from imapclient import IMAPClient
from inbox.models import Namespace
from inbox.models.backends.gmail import GmailAccount
from inbox.models.backends.gmail import GmailAuthCredentials
from inbox.models.backends.gmail import g_token_manager
from inbox.config import config
from inbox.auth.oauth import OAuthAuthHandler
from inbox.basicauth import (OAuthError, ImapSupportDisabledError)
from inbox.util.url import url_concat
from inbox.providers import provider_info
from inbox.crispin import GmailCrispinClient
from nylas.logging import get_logger
log = get_logger()
PROVIDER = 'gmail'
AUTH_HANDLER_CLS = 'GmailAuthHandler'
# Google OAuth app credentials
OAUTH_CLIENT_ID = config.get_required('GOOGLE_OAUTH_CLIENT_ID')
OAUTH_CLIENT_SECRET = config.get_required('GOOGLE_OAUTH_CLIENT_SECRET')
OAUTH_REDIRECT_URI = config.get_required('GOOGLE_OAUTH_REDIRECT_URI')
OAUTH_AUTHENTICATE_URL = 'https://accounts.google.com/o/oauth2/auth'
OAUTH_ACCESS_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
OAUTH_TOKEN_VALIDATION_URL = 'https://www.googleapis.com/oauth2/v1/tokeninfo'
OAUTH_USER_INFO_URL = 'https://www.googleapis.com/oauth2/v1/userinfo'
# NOTE: urls for email address and G+ profile are deprecated
OAUTH_SCOPE = ' '.join([
'https://www.googleapis.com/auth/userinfo.email', # email address
'https://www.googleapis.com/auth/userinfo.profile', # G+ profile
'https://mail.google.com/', # email
'https://www.google.com/m8/feeds', # contacts
'https://www.googleapis.com/auth/calendar' # calendar
])
class GmailAuthHandler(OAuthAuthHandler):
OAUTH_CLIENT_ID = OAUTH_CLIENT_ID
OAUTH_CLIENT_SECRET = OAUTH_CLIENT_SECRET
OAUTH_REDIRECT_URI = OAUTH_REDIRECT_URI
OAUTH_AUTHENTICATE_URL = OAUTH_AUTHENTICATE_URL
OAUTH_ACCESS_TOKEN_URL = OAUTH_ACCESS_TOKEN_URL
OAUTH_TOKEN_VALIDATION_URL = OAUTH_TOKEN_VALIDATION_URL
OAUTH_USER_INFO_URL = OAUTH_USER_INFO_URL
OAUTH_SCOPE = OAUTH_SCOPE
def _authenticate_IMAP_connection(self, account, conn):
"""
Overrides the same method in OAuthAuthHandler so that
we can choose a token w/ the appropriate scope.
"""
host, port = account.imap_endpoint
try:
token = g_token_manager.get_token_for_email(account)
conn.oauth2_login(account.email_address, token)
except IMAPClient.Error as exc:
exc = _process_imap_exception(exc)
# Raise all imap disabled errors except authentication_failed
# error, swhich we handle differently
if isinstance(exc, ImapSupportDisabledError) and \
exc.reason != 'authentication_failed':
raise exc
log.error('Error during IMAP XOAUTH2 login',
account_id=account.id, email=account.email_address,
host=host, port=port, error=exc)
if not isinstance(exc, ImapSupportDisabledError):
raise # Unknown IMAPClient error, reraise
# If we got an AUTHENTICATIONFAILED response, force a token refresh
# and try again. If IMAP auth still fails, it's likely that IMAP
# access is disabled, so propagate that errror.
token = g_token_manager.get_token_for_email(
account, force_refresh=True)
try:
conn.oauth2_login(account.email_address, token)
except IMAPClient.Error as exc:
exc = _process_imap_exception(exc)
if not isinstance(exc, ImapSupportDisabledError) or \
exc.reason != 'authentication_failed':
raise exc
else:
# Instead of authentication_failed, report imap disabled
raise ImapSupportDisabledError('imap_disabled_for_account')
def create_account(self, db_session, email_address, response):
email_address = response.get('email')
# See if the account exists in db, otherwise create it
try:
account = db_session.query(GmailAccount) \
.filter_by(email_address=email_address).one()
except NoResultFound:
namespace = Namespace()
account = GmailAccount(namespace=namespace)
# We only get refresh tokens on initial login (or failed credentials)
# otherwise, we don't force the login screen and therefore don't get a
# refresh token back from google.
new_refresh_token = response.get('refresh_token')
if new_refresh_token:
account.refresh_token = new_refresh_token
else:
if (len(account.valid_auth_credentials) == 0 or
account.sync_state == 'invalid'):
# We got a new auth without a refresh token, so we need to back
# out and force the auth flow, since we don't already have
# a refresh (or the ones we have don't work.)
raise OAuthError("No valid refresh tokens")
account.email_address = email_address
account.family_name = response.get('family_name')
account.given_name = response.get('given_name')
account.name = response.get('name')
account.gender = response.get('gender')
account.g_id = response.get('id')
account.g_user_id = response.get('user_id')
account.link = response.get('link')
account.locale = response.get('locale')
account.picture = response.get('picture')
account.home_domain = response.get('hd')
account.sync_email = (account.sync_email or
response.get('sync_email', True))
account.sync_contacts = (account.sync_contacts or
response.get('contacts', True))
account.sync_events = (account.sync_events or
response.get('events', True))
# These values are deprecated and should not be used, along
# with the account's refresh_token. Access all these values
# through the GmailAuthCredentials objects instead.
account.client_id = response.get('client_id')
account.client_secret = response.get('client_secret')
account.scope = response.get('scope')
account.g_id_token = response.get('id_token')
# Don't need to actually save these now
# tok = response.get('access_token')
# expires_in = response.get('expires_in')
client_id = response.get('client_id') or OAUTH_CLIENT_ID
client_secret = response.get('client_secret') or OAUTH_CLIENT_SECRET
if new_refresh_token:
# See if we already have credentials for this client_id/secret
# pair. If those don't exist, make a new GmailAuthCredentials
auth_creds = next(
(auth_creds for auth_creds in account.auth_credentials
if (auth_creds.client_id == client_id and
auth_creds.client_secret == client_secret)),
GmailAuthCredentials())
auth_creds.gmailaccount = account
auth_creds.scopes = response.get('scope')
auth_creds.g_id_token = response.get('id_token')
auth_creds.client_id = client_id
auth_creds.client_secret = client_secret
auth_creds.refresh_token = new_refresh_token
auth_creds.is_valid = True
try:
self.verify_config(account)
except ImapSupportDisabledError:
if account.sync_email:
raise
# Ensure account has sync enabled.
account.enable_sync()
return account
def validate_token(self, access_token):
response = requests.get(self.OAUTH_TOKEN_VALIDATION_URL,
params={'access_token': access_token})
validation_dict = response.json()
if 'error' in validation_dict:
raise OAuthError(validation_dict['error'])
return validation_dict
def verify_config(self, account):
"""Verifies configuration, specifically presence of 'All Mail' folder.
Will raise an inbox.crispin.GmailSettingError if not present.
"""
conn = self.connect_account(account)
# make a crispin client and check the folders
client = GmailCrispinClient(account.id,
provider_info('gmail'),
account.email_address,
conn,
readonly=True)
client.sync_folders()
conn.logout()
return True
def interactive_auth(self, email_address=None):
url_args = {'redirect_uri': self.OAUTH_REDIRECT_URI,
'client_id': self.OAUTH_CLIENT_ID,
'response_type': 'code',
'scope': self.OAUTH_SCOPE,
'access_type': 'offline'}
if email_address:
url_args['login_hint'] = email_address
url = url_concat(self.OAUTH_AUTHENTICATE_URL, url_args)
print 'To authorize Inbox, visit this URL and follow the directions:'
print '\n{}'.format(url)
while True:
auth_code = raw_input('Enter authorization code: ').strip()
try:
auth_response = self._get_authenticated_user(auth_code)
auth_response['contacts'] = True
auth_response['events'] = True
return auth_response
except OAuthError:
print "\nInvalid authorization code, try again...\n"
auth_code = None
def _process_imap_exception(exc):
if 'Lookup failed' in exc.message:
# Gmail is disabled for this apps account
return ImapSupportDisabledError('gmail_disabled_for_domain')
elif 'IMAP access is disabled for your domain.' in exc.message:
# IMAP is disabled for this domain
return ImapSupportDisabledError('imap_disabled_for_domain')
elif exc.message.startswith('[AUTHENTICATIONFAILED] Invalid credentials '
'(Failure)'):
return ImapSupportDisabledError('authentication_failed')
else:
# Unknown IMAPClient error
return exc
| agpl-3.0 | 3,955,052,443,224,165,400 | 42.242678 | 79 | 0.619545 | false |
cneill/designate | designate/api/v2/controllers/rest.py | 2 | 5787 | # flake8: noqa
# Copyright (c) <2011>, Jonathan LaCour
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
import pecan
import pecan.rest
import pecan.routing
from oslo_log import log as logging
from designate import exceptions
from designate.central import rpcapi as central_rpcapi
from designate.i18n import _
LOG = logging.getLogger(__name__)
class RestController(pecan.rest.RestController):
"""
Extension for Pecan's RestController to better handle POST/PUT/PATCH
requests.
Ideally, we get these additions merged upstream.
"""
# default sort_keys. The Controllers can override this.
SORT_KEYS = ['created_at', 'id']
@property
def central_api(self):
return central_rpcapi.CentralAPI.get_instance()
def _apply_filter_params(self, params, accepted_filters, criterion):
for k in accepted_filters:
if k in params:
criterion[k] = params[k].replace("*", "%")
return criterion
def _handle_post(self, method, remainder):
'''
Routes ``POST`` actions to the appropriate controller.
'''
# route to a post_all or get if no additional parts are available
if not remainder or remainder == ['']:
controller = self._find_controller('post_all', 'post')
if controller:
return controller, []
pecan.abort(405)
controller = getattr(self, remainder[0], None)
if controller and not inspect.ismethod(controller):
return pecan.routing.lookup_controller(controller, remainder[1:])
# finally, check for the regular post_one/post requests
controller = self._find_controller('post_one', 'post')
if controller:
return controller, remainder
pecan.abort(405)
def _handle_patch(self, method, remainder):
'''
Routes ``PATCH`` actions to the appropriate controller.
'''
# route to a patch_all or get if no additional parts are available
if not remainder or remainder == ['']:
controller = self._find_controller('patch_all', 'patch')
if controller:
return controller, []
pecan.abort(405)
controller = getattr(self, remainder[0], None)
if controller and not inspect.ismethod(controller):
return pecan.routing.lookup_controller(controller, remainder[1:])
# finally, check for the regular patch_one/patch requests
controller = self._find_controller('patch_one', 'patch')
if controller:
return controller, remainder
pecan.abort(405)
def _handle_put(self, method, remainder):
'''
Routes ``PUT`` actions to the appropriate controller.
'''
# route to a put_all or get if no additional parts are available
if not remainder or remainder == ['']:
controller = self._find_controller('put_all', 'put')
if controller:
return controller, []
pecan.abort(405)
controller = getattr(self, remainder[0], None)
if controller and not inspect.ismethod(controller):
return pecan.routing.lookup_controller(controller, remainder[1:])
# finally, check for the regular put_one/put requests
controller = self._find_controller('put_one', 'put')
if controller:
return controller, remainder
pecan.abort(405)
def _handle_delete(self, method, remainder):
'''
Routes ``DELETE`` actions to the appropriate controller.
'''
# route to a delete_all or get if no additional parts are available
if not remainder or remainder == ['']:
controller = self._find_controller('delete_all', 'delete')
if controller:
return controller, []
pecan.abort(405)
controller = getattr(self, remainder[0], None)
if controller and not inspect.ismethod(controller):
return pecan.routing.lookup_controller(controller, remainder[1:])
# finally, check for the regular delete_one/delete requests
controller = self._find_controller('delete_one', 'delete')
if controller:
return controller, remainder
pecan.abort(405)
| apache-2.0 | 2,190,192,917,001,468,000 | 37.324503 | 79 | 0.658891 | false |
t3dev/odoo | addons/payment/controllers/portal.py | 1 | 11876 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import http, _
from odoo.http import request
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
from datetime import datetime, timedelta
_logger = logging.getLogger(__name__)
class PaymentProcessing(http.Controller):
@staticmethod
def remove_payment_transaction(transactions):
tx_ids_list = request.session.get("__payment_tx_ids__", [])
if transactions:
for tx in transactions:
if tx.id in tx_ids_list:
tx_ids_list.remove(tx.id)
else:
return False
request.session["__payment_tx_ids__"] = tx_ids_list
return True
@staticmethod
def add_payment_transaction(transactions):
if not transactions:
return False
tx_ids_list = set(request.session.get("__payment_tx_ids__", [])) | set(transactions.ids)
request.session["__payment_tx_ids__"] = tx_ids_list
return True
@staticmethod
def get_payment_transaction_ids():
# return the ids and not the recordset, since we might need to
# sudo the browse to access all the record
# I prefer to let the controller chose when to access to payment.transaction using sudo
return request.session.get("__payment_tx_ids__", [])
@http.route(['/payment/process'], type="http", auth="public", website=True)
def payment_status_page(self, **kwargs):
# When the customer is redirect to this website page,
# we retrieve the payment transaction list from his session
tx_ids_list = self.get_payment_transaction_ids()
payment_transaction_ids = request.env['payment.transaction'].sudo().browse(tx_ids_list).exists()
render_ctx = {
'payment_tx_ids': payment_transaction_ids.ids,
}
return request.render("payment.payment_process_page", render_ctx)
@http.route(['/payment/process/poll'], type="json", auth="public")
def payment_status_poll(self):
# retrieve the transactions
tx_ids_list = self.get_payment_transaction_ids()
payment_transaction_ids = request.env['payment.transaction'].sudo().search([
('id', 'in', list(tx_ids_list)),
('date', '>=', (datetime.now() - timedelta(days=1)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
])
if not payment_transaction_ids:
return {
'success': False,
'error': 'no_tx_found',
}
processed_tx = payment_transaction_ids.filtered('is_processed')
self.remove_payment_transaction(processed_tx)
# create the returned dictionnary
result = {
'success': True,
'transactions': [],
}
# populate the returned dictionnary with the transactions data
for tx in payment_transaction_ids:
message_to_display = tx.acquirer_id[tx.state + '_msg'] if tx.state in ['done', 'pending', 'cancel', 'error'] else None
result['transactions'].append({
'reference': tx.reference,
'state': tx.state,
'return_url': tx.return_url,
'is_processed': tx.is_processed,
'state_message': tx.state_message,
'message_to_display': message_to_display,
'amount': tx.amount,
'currency': tx.currency_id.name,
'acquirer_provider': tx.acquirer_id.provider,
})
tx_to_process = payment_transaction_ids.filtered(lambda x: x.state == 'done' and x.is_processed is False)
try:
tx_to_process._post_process_after_done()
except Exception as e:
request.env.cr.rollback()
result['success'] = False
result['error'] = str(e)
_logger.error("Error while processing transaction(s) %s, exception \"%s\"", tx_to_process.ids, str(e))
return result
class WebsitePayment(http.Controller):
@http.route(['/my/payment_method'], type='http', auth="user", website=True)
def payment_method(self, **kwargs):
acquirers = list(request.env['payment.acquirer'].search([
('website_published', '=', True), ('registration_view_template_id', '!=', False),
('payment_flow', '=', 's2s'), ('company_id', '=', request.env.user.company_id.id)
]))
partner = request.env.user.partner_id
payment_tokens = partner.payment_token_ids
payment_tokens |= partner.commercial_partner_id.sudo().payment_token_ids
return_url = request.params.get('redirect', '/my/payment_method')
values = {
'pms': payment_tokens,
'acquirers': acquirers,
'error_message': [kwargs['error']] if kwargs.get('error') else False,
'return_url': return_url,
'bootstrap_formatting': True,
'partner_id': partner.id
}
return request.render("payment.pay_methods", values)
@http.route(['/website_payment/pay'], type='http', auth='public', website=True)
def pay(self, reference='', order_id=None, amount=False, currency_id=None, acquirer_id=None, **kw):
env = request.env
user = env.user.sudo()
# Default values
values = {
'amount': 0.0,
'currency': user.company_id.currency_id,
}
# Check sale order
if order_id:
try:
order_id = int(order_id)
order = env['sale.order'].browse(order_id)
values.update({
'currency': order.currency_id,
'amount': order.amount_total,
'order_id': order_id
})
except:
order_id = None
# Check currency
if currency_id:
try:
currency_id = int(currency_id)
values['currency'] = env['res.currency'].browse(currency_id)
except:
pass
# Check amount
if amount:
try:
amount = float(amount)
values['amount'] = amount
except:
pass
# Check reference
reference_values = order_id and {'sale_order_ids': [(4, order_id)]} or {}
values['reference'] = env['payment.transaction']._compute_reference(values=reference_values, prefix=reference)
# Check acquirer
acquirers = None
if acquirer_id:
acquirers = env['payment.acquirer'].browse(int(acquirer_id))
if not acquirers:
acquirers = env['payment.acquirer'].search([('website_published', '=', True), ('company_id', '=', user.company_id.id)])
# Check partner
partner_id = user.partner_id.id if not user._is_public() else False
values.update({
'partner_id': partner_id,
'bootstrap_formatting': True,
'error_msg': kw.get('error_msg')
})
values['acquirers'] = [acq for acq in acquirers if acq.payment_flow in ['form', 's2s']]
values['pms'] = request.env['payment.token'].search([('acquirer_id', 'in', acquirers.filtered(lambda x: x.payment_flow == 's2s').ids)])
return request.render('payment.pay', values)
@http.route(['/website_payment/transaction/<string:reference>/<string:amount>/<string:currency_id>',
'/website_payment/transaction/v2/<string:amount>/<string:currency_id>/<path:reference>',], type='json', auth='public')
def transaction(self, acquirer_id, reference, amount, currency_id, **kwargs):
partner_id = request.env.user.partner_id.id if not request.env.user._is_public() else False
acquirer = request.env['payment.acquirer'].browse(acquirer_id)
order_id = kwargs.get('order_id')
reference_values = order_id and {'sale_order_ids': [(4, order_id)]} or {}
reference = request.env['payment.transaction']._compute_reference(values=reference_values, prefix=reference)
values = {
'acquirer_id': int(acquirer_id),
'reference': reference,
'amount': float(amount),
'currency_id': int(currency_id),
'partner_id': partner_id,
'type': 'form_save' if acquirer.save_token != 'none' and partner_id else 'form',
}
if order_id:
values['sale_order_ids'] = [(6, 0, [order_id])]
reference_values = order_id and {'sale_order_ids': [(4, order_id)]} or {}
reference_values.update(acquirer_id=int(acquirer_id))
values['reference'] = request.env['payment.transaction']._compute_reference(values=reference_values, prefix=reference)
tx = request.env['payment.transaction'].sudo().with_context(lang=None).create(values)
tx.return_url = '/website_payment/confirm?tx_id=%d' % tx.id
PaymentProcessing.add_payment_transaction(tx)
render_values = {
'partner_id': partner_id,
}
return acquirer.sudo().render(tx.reference, float(amount), int(currency_id), values=render_values)
@http.route(['/website_payment/token/<string:reference>/<string:amount>/<string:currency_id>',
'/website_payment/token/v2/<string:amount>/<string:currency_id>/<path:reference>'], type='http', auth='public', website=True)
def payment_token(self, pm_id, reference, amount, currency_id, return_url=None, **kwargs):
token = request.env['payment.token'].browse(int(pm_id))
order_id = kwargs.get('order_id')
if not token:
return request.redirect('/website_payment/pay?error_msg=%s' % _('Cannot setup the payment.'))
partner_id = request.env.user.partner_id.id if not request.env.user._is_public() else False
values = {
'acquirer_id': token.acquirer_id.id,
'reference': reference,
'amount': float(amount),
'currency_id': int(currency_id),
'partner_id': partner_id,
'payment_token_id': pm_id,
'type': 'form_save' if token.acquirer_id.save_token != 'none' and partner_id else 'form',
'return_url': return_url,
}
if order_id:
values['sale_order_ids'] = [(6, 0, [order_id])]
tx = request.env['payment.transaction'].sudo().with_context(lang=None).create(values)
PaymentProcessing.add_payment_transaction(tx)
try:
res = tx.s2s_do_transaction()
if tx.state == 'done':
tx.return_url = return_url or '/website_payment/confirm?tx_id=%d' % tx.id
valid_state = 'authorized' if tx.acquirer_id.capture_manually else 'done'
if not res or tx.state != valid_state:
tx.return_url = '/website_payment/pay?error_msg=%s' % _('Payment transaction failed.')
return request.redirect('/payment/process')
except Exception as e:
return request.redirect('/payment/process')
@http.route(['/website_payment/confirm'], type='http', auth='public', website=True)
def confirm(self, **kw):
tx_id = int(kw.get('tx_id', 0))
if tx_id:
tx = request.env['payment.transaction'].browse(tx_id)
if tx.state == 'done':
status = 'success'
message = tx.acquirer_id.done_msg
elif tx.state == 'pending':
status = 'warning'
message = tx.acquirer_id.pending_msg
else:
status = 'danger'
message = tx.acquirer_id.error_msg
PaymentProcessing.remove_payment_transaction(tx)
return request.render('payment.confirm', {'tx': tx, 'status': status, 'message': message})
else:
return request.redirect('/my/home')
| gpl-3.0 | -7,932,493,856,523,600,000 | 41.113475 | 143 | 0.579909 | false |
tximikel/kuma | kuma/wiki/content.py | 9 | 40715 | # -*- coding: utf-8 -*-
from collections import defaultdict
import re
import urllib
from urllib import urlencode
from urlparse import urlparse
import html5lib
from html5lib.filters._base import Filter as html5lib_Filter
import newrelic.agent
from lxml import etree
from pyquery import PyQuery as pq
from tower import ugettext as _
from kuma.core.urlresolvers import reverse
from .utils import locale_and_slug_from_path
# A few regex patterns for various parsing efforts in this file
MACRO_RE = re.compile(r'\{\{\s*([^\(\} ]+)', re.MULTILINE)
LEVEL_RE = re.compile(r'^h(\d)$')
TEMPLATE_PARAMS_RE = re.compile(r'''^template\(['"]([^'"]+)['"],\s*\[([^\]]+)]''', re.I)
TEMPLATE_RE = re.compile(r'''^template\(['"]([^'"]+)['"]''', re.I)
# Regex to extract language from MindTouch code elements' function attribute
MT_SYNTAX_RE = re.compile(r'syntax\.(\w+)')
# map for mt syntax values that should turn into new brush values
MT_SYNTAX_BRUSH_MAP = {
'javascript': 'js',
}
# List of tags supported for section editing. A subset of everything that could
# be considered an HTML5 section
SECTION_TAGS = ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hgroup', 'section')
HEAD_TAGS = ('h1', 'h2', 'h3', 'h4', 'h5', 'h6')
# Head tags to be included in the table of contents
HEAD_TAGS_TOC = ('h2', 'h3', 'h4')
# Allowed tags in the table of contents list
TAGS_IN_TOC = ('code')
# Special paths within /docs/ URL-space that do not represent documents for the
# purposes of link annotation. Doesn't include everything from urls.py, but
# just the likely candidates for links.
DOC_SPECIAL_PATHS = ('new', 'tag', 'feeds', 'templates', 'needs-review')
@newrelic.agent.function_trace()
def parse(src, is_full_document=False):
return ContentSectionTool(src, is_full_document)
@newrelic.agent.function_trace()
def get_content_sections(src=''):
"""
Gets sections in a document
"""
sections = []
if src:
attr = '[id]'
selector = (attr + ',').join(SECTION_TAGS) + attr
try:
document = pq(src)
except etree.ParserError:
pass
else:
for element in document.find(selector):
sections.append({'title': element.text,
'id': element.attrib.get('id')})
return sections
@newrelic.agent.function_trace()
def get_seo_description(content, locale=None, strip_markup=True):
# Create an SEO summary
# TODO: Google only takes the first 180 characters, so maybe we find a
# logical way to find the end of sentence before 180?
seo_summary = ''
if content:
# Try constraining the search for summary to an explicit "Summary"
# section, if any.
summary_section = (parse(content).extractSection('Summary')
.serialize())
if summary_section:
content = summary_section
# Need to add a BR to the page content otherwise pyQuery wont find
# a <p></p> element if it's the only element in the doc_html
seo_analyze_doc_html = content + '<br />'
page = pq(seo_analyze_doc_html)
# Look for the SEO summary class first
summaryClasses = page.find('.seoSummary')
if len(summaryClasses):
if strip_markup:
seo_summary = summaryClasses.text()
else:
seo_summary = summaryClasses.html()
else:
paragraphs = page.find('p')
if paragraphs.length:
for p in range(len(paragraphs)):
item = paragraphs.eq(p)
if strip_markup:
text = item.text()
else:
text = item.html()
# Checking for a parent length of 2
# because we don't want p's wrapped
# in DIVs ("<div class='warning'>") and pyQuery adds
# "<html><div>" wrapping to entire document
if (text and len(text) and
'Redirect' not in text and
text.find(u'«') == -1 and
text.find('«') == -1 and
item.parents().length == 2):
seo_summary = text.strip()
break
if strip_markup:
# Post-found cleanup
# remove markup chars
seo_summary = seo_summary.replace('<', '').replace('>', '')
# remove spaces around some punctuation added by PyQuery
if locale == 'en-US':
seo_summary = re.sub(r' ([,\)\.])', r'\1', seo_summary)
seo_summary = re.sub(r'(\() ', r'\1', seo_summary)
return seo_summary
@newrelic.agent.function_trace()
def filter_out_noinclude(src):
"""
Quick and dirty filter to remove <div class="noinclude"> blocks
"""
# NOTE: This started as an html5lib filter, but it started getting really
# complex. Seems like pyquery works well enough without corrupting
# character encoding.
if not src:
return ''
doc = pq(src)
doc.remove('*[class=noinclude]')
return doc.html()
@newrelic.agent.function_trace()
def extract_code_sample(id, src):
"""
Extract a dict containing the html, css, and js listings for a given
code sample identified by ID.
This should be pretty agnostic to markup patterns, since it just requires a
parent container with an DID and 3 child elements somewhere within with
class names "html", "css", and "js" - and our syntax highlighting already
does that with <pre>'s
"""
parts = ('html', 'css', 'js')
data = dict((x, None) for x in parts)
if not src:
return data
section = parse(src).extractSection(id).serialize()
if section:
# HACK: Ensure the extracted section has a container, in case it
# consists of a single element.
sample = pq('<section>%s</section>' % section)
else:
# If no section, fall back to plain old ID lookup
sample = pq(src).find('[id="%s"]' % id)
selector_templates = (
'.%s',
# HACK: syntaxhighlighter (ab)uses the className as a
# semicolon-separated options list...
'pre[class*="brush:%s"]',
'pre[class*="%s;"]'
)
for part in parts:
selector = ','.join(selector_template % part
for selector_template in selector_templates)
src = sample.find(selector).text()
if src is not None:
# Bug 819999: gets decoded to \xa0, which trips up CSS
src = src.replace(u'\xa0', u' ')
if src:
data[part] = src
return data
@newrelic.agent.function_trace()
def extract_css_classnames(content):
"""
Extract the unique set of class names used in the content
"""
classnames = set()
for element in pq(content).find('*'):
css_classes = element.attrib.get('class')
if css_classes:
classnames.update(css_classes.split(' '))
return list(classnames)
@newrelic.agent.function_trace()
def extract_html_attributes(content):
"""
Extract the unique set of HTML attributes used in the content
"""
try:
attribs = []
for token in parse(content).stream:
if token['type'] == 'StartTag':
for (namespace, name), value in token['data'].items():
attribs.append((name, value))
return ['%s="%s"' % (k, v) for k, v in attribs]
except:
return []
@newrelic.agent.function_trace()
def extract_kumascript_macro_names(content):
"""
Extract a unique set of KumaScript macro names used in the content
"""
names = set()
try:
txt = []
for token in parse(content).stream:
if token['type'] in ('Characters', 'SpaceCharacters'):
txt.append(token['data'])
txt = ''.join(txt)
names.update(MACRO_RE.findall(txt))
except:
pass
return list(names)
class ContentSectionTool(object):
def __init__(self, src=None, is_full_document=False):
self.tree = html5lib.treebuilders.getTreeBuilder("etree")
self.parser = html5lib.HTMLParser(tree=self.tree,
namespaceHTMLElements=False)
self._serializer = None
self._default_serializer_options = {
'omit_optional_tags': False, 'quote_attr_values': True,
'escape_lt_in_attrs': True}
self._serializer_options = None
self.walker = html5lib.treewalkers.getTreeWalker("etree")
self.src = ''
self.doc = None
self.stream = []
if src:
self.parse(src, is_full_document)
@newrelic.agent.function_trace()
def parse(self, src, is_full_document):
self.src = src
if is_full_document:
self.doc = self.parser.parse(self.src, parseMeta=True)
else:
self.doc = self.parser.parseFragment(self.src)
self.stream = self.walker(self.doc)
return self
def _get_serializer(self, **options):
soptions = self._default_serializer_options.copy()
soptions.update(options)
if not (self._serializer and self._serializer_options == soptions):
self._serializer = html5lib.serializer.htmlserializer.HTMLSerializer(
**soptions)
self._serializer_options = soptions
return self._serializer
def serialize(self, stream=None, **options):
if stream is None:
stream = self.stream
return u"".join(self._get_serializer(**options).serialize(stream))
def __unicode__(self):
return self.serialize()
def filter(self, filter_cls):
self.stream = filter_cls(self.stream)
return self
@newrelic.agent.function_trace()
def injectSectionIDs(self):
self.stream = SectionIDFilter(self.stream)
return self
@newrelic.agent.function_trace()
def injectSectionEditingLinks(self, slug, locale):
self.stream = SectionEditLinkFilter(self.stream, slug, locale)
return self
@newrelic.agent.function_trace()
def absolutizeAddresses(self, base_url, tag_attributes):
self.stream = URLAbsolutionFilter(self.stream, base_url, tag_attributes)
return self
@newrelic.agent.function_trace()
def annotateLinks(self, base_url):
self.stream = LinkAnnotationFilter(self.stream, base_url)
return self
@newrelic.agent.function_trace()
def filterIframeHosts(self, hosts):
self.stream = IframeHostFilter(self.stream, hosts)
return self
@newrelic.agent.function_trace()
def filterEditorSafety(self):
self.stream = EditorSafetyFilter(self.stream)
return self
@newrelic.agent.function_trace()
def extractSection(self, id, ignore_heading=False):
self.stream = SectionFilter(self.stream, id,
ignore_heading=ignore_heading)
return self
@newrelic.agent.function_trace()
def replaceSection(self, id, replace_src, ignore_heading=False):
replace_stream = self.walker(self.parser.parseFragment(replace_src))
self.stream = SectionFilter(self.stream, id, replace_stream,
ignore_heading=ignore_heading)
return self
class URLAbsolutionFilter(html5lib_Filter):
"""
Filter which turns relative links into absolute links.
Originally created for generating sphinx templates.
"""
def __init__(self, source, base_url, tag_attributes):
html5lib_Filter.__init__(self, source)
self.base_url = base_url
self.tag_attributes = tag_attributes
def __iter__(self):
input = html5lib_Filter.__iter__(self)
for token in input:
if (token['type'] == 'StartTag' and
token['name'] in self.tag_attributes):
attrs = dict(token['data'])
# If the element has the attribute we're looking for
desired_attr = self.tag_attributes[token['name']]
for (namespace, name), value in attrs.items():
if desired_attr == name:
if not value.startswith('http'):
if value.startswith('//') or value.startswith('{{'):
# Do nothing for absolute addresses or apparent
# template variable output
attrs[(namespace, name)] = value
elif value.startswith('/'):
# Starts with "/", so just add the base url
attrs[(namespace, name)] = self.base_url + value
else:
attrs[(namespace, name)] = self.base_url + '/' + value
token['data'] = attrs
break
yield token
class LinkAnnotationFilter(html5lib_Filter):
"""
Filter which annotates links to indicate things like whether they're
external, if they point to non-existent wiki pages, etc.
"""
# TODO: Need more external link prefixes, here?
EXTERNAL_PREFIXES = ('http:', 'https:', 'ftp:',)
def __init__(self, source, base_url):
html5lib_Filter.__init__(self, source)
self.base_url = base_url
self.base_url_parsed = urlparse(base_url)
def __iter__(self):
from kuma.wiki.models import Document
input = html5lib_Filter.__iter__(self)
# Pass #1: Gather all the link URLs and prepare annotations
links = {}
buffer = []
for token in input:
buffer.append(token)
if token['type'] == 'StartTag' and token['name'] == 'a':
for (namespace, name), value in token['data'].items():
if name == 'href':
href = value
href_parsed = urlparse(href)
if href_parsed.netloc == self.base_url_parsed.netloc:
# Squash site-absolute URLs to site-relative paths.
href = href_parsed.path
# Prepare annotations record for this path.
links[href] = {'classes': []}
needs_existence_check = defaultdict(lambda: defaultdict(set))
# Run through all the links and check for annotatable conditions.
for href in links.keys():
# Is this an external URL?
is_external = False
for prefix in self.EXTERNAL_PREFIXES:
if href.startswith(prefix):
is_external = True
break
if is_external:
links[href]['classes'].append('external')
continue
# TODO: Should this also check for old-school mindtouch URLs? Or
# should we encourage editors to convert to new-style URLs to take
# advantage of link annotation? (I'd say the latter)
# Is this a kuma doc URL?
if '/docs/' in href:
# Check if this is a special docs path that's exempt from "new"
skip = False
for path in DOC_SPECIAL_PATHS:
if '/docs/%s' % path in href:
skip = True
if skip:
continue
href_locale, href_path = href.split(u'/docs/', 1)
if href_locale.startswith(u'/'):
href_locale = href_locale[1:]
if '#' in href_path:
# If present, discard the hash anchor
href_path, _, _ = href_path.partition('#')
# Handle any URL-encoded UTF-8 characters in the path
href_path = href_path.encode('utf-8', 'ignore')
href_path = urllib.unquote(href_path)
href_path = href_path.decode('utf-8', 'ignore')
# Try to sort out the locale and slug through some of our
# redirection logic.
locale, slug, needs_redirect = (
locale_and_slug_from_path(href_path,
path_locale=href_locale))
# Gather up this link for existence check
needs_existence_check[locale.lower()][slug.lower()].add(href)
# Perform existence checks for all the links, using one DB query per
# locale for all the candidate slugs.
for locale, slug_hrefs in needs_existence_check.items():
existing_slugs = (Document.objects
.filter(locale=locale,
slug__in=slug_hrefs.keys())
.values_list('slug', flat=True))
# Remove the slugs that pass existence check.
for slug in existing_slugs:
lslug = slug.lower()
if lslug in slug_hrefs:
del slug_hrefs[lslug]
# Mark all the links whose slugs did not come back from the DB
# query as "new"
for slug, hrefs in slug_hrefs.items():
for href in hrefs:
links[href]['classes'].append('new')
# Pass #2: Filter the content, annotating links
for token in buffer:
if token['type'] == 'StartTag' and token['name'] == 'a':
attrs = dict(token['data'])
names = [name for (namespace, name) in attrs.keys()]
for (namespace, name), value in attrs.items():
if name == 'href':
href = value
href_parsed = urlparse(value)
if href_parsed.netloc == self.base_url_parsed.netloc:
# Squash site-absolute URLs to site-relative paths.
href = href_parsed.path
if href in links:
# Update class names on this link element.
if 'class' in names:
classes = set(attrs[(namespace, 'class')].split(u' '))
else:
classes = set()
classes.update(links[href]['classes'])
if classes:
attrs[(namespace, u'class')] = u' '.join(classes)
token['data'] = attrs
yield token
class SectionIDFilter(html5lib_Filter):
"""
Filter which ensures section-related elements have unique IDs
"""
def __init__(self, source):
html5lib_Filter.__init__(self, source)
self.id_cnt = 0
self.known_ids = set()
def gen_id(self):
"""Generate a unique ID"""
while True:
self.id_cnt += 1
id = 'sect%s' % self.id_cnt
if id not in self.known_ids:
self.known_ids.add(id)
return id
# MindTouch encodes these characters, so we have to encode them
# too.
non_url_safe = ['"', '#', '$', '%', '&', '+',
',', '/', ':', ';', '=', '?',
'@', '[', '\\', ']', '^', '`',
'{', '|', '}', '~']
def slugify(self, text):
"""
Turn the text content of a header into a slug for use in an ID
"""
non_safe = [c for c in text if c in self.non_url_safe]
if non_safe:
for c in non_safe:
text = text.replace(c, '')
# Strip leading, trailing and multiple whitespace, convert remaining whitespace to _
text = u'_'.join(text.split())
return text
def process_header(self, token, buffer):
# If we get into this code, 'token' will be the start tag of a
# header element. We're going to grab its text contents to
# generate a slugified ID for it, add that ID in, and then
# spit it back out. 'buffer' is the list of tokens we were in
# the process of handling when we hit this header.
start, text, tmp = token, [], []
attrs = dict(token['data'])
while len(buffer):
# Loop through successive tokens in the stream of HTML
# until we find our end tag, building up in 'tmp' a list
# of those tokens to emit later, and in 'text' a list of
# the text content we see along the way.
next_token = buffer.pop(0)
tmp.append(next_token)
if next_token['type'] in ('Characters', 'SpaceCharacters'):
text.append(next_token['data'])
elif (next_token['type'] == 'EndTag' and
next_token['name'] == start['name']):
# Note: This is naive, and doesn't track other
# start/end tags nested in the header. Odd things might
# happen in a case like <h1><h1></h1></h1>. But, that's
# invalid markup and the worst case should be a
# truncated ID because all the text wasn't accumulated.
break
# Slugify the text we found inside the header, generate an ID
# as a last resort.
slug = self.slugify(u''.join(text))
if not slug:
slug = self.gen_id()
else:
# Create unique slug for heading tags with the same content
start_inc = 2
slug_base = slug
while slug in self.known_ids:
slug = u'{0}_{1}'.format(slug_base, start_inc)
start_inc += 1
attrs[(None, u'id')] = slug
start['data'] = attrs
self.known_ids.add(slug)
# Hand back buffer minus the bits we yanked out of it, and the
# new ID-ified header start tag and contents.
return buffer, [start] + tmp
def __iter__(self):
input = html5lib_Filter.__iter__(self)
# First, collect all ID values already in the source HTML.
buffer = []
for token in input:
buffer.append(token)
if token['type'] == 'StartTag':
attrs = dict(token['data'])
for (namespace, name), value in attrs.items():
# Collect both 'name' and 'id' attributes since
# 'name' gets treated as a manual override to
# specify an ID.
if name == 'id' and token['name'] not in HEAD_TAGS:
self.known_ids.add(value)
if name == 'name':
self.known_ids.add(value)
# Then walk the tree again identifying elements in need of IDs
# and adding them.
while len(buffer):
token = buffer.pop(0)
if not (token['type'] == 'StartTag' and
token['name'] in SECTION_TAGS):
# If this token isn't the start tag of a section or
# header, we don't add an ID and just short-circuit
# out to return the token as-is.
yield token
else:
# Potential bug warning: there may not be any
# attributes, so doing a for loop over them to look
# for existing ID/name values is unsafe. Instead we
# dict-ify the attrs, and then check directly for the
# things we care about instead of iterating all
# attributes and waiting for one we care about to show
# up.
attrs = dict(token['data'])
# First check for a 'name' attribute; if it's present,
# treat it as a manual override by the author and make
# that value be the ID.
if (None, 'name') in attrs:
attrs[(None, u'id')] = attrs[(None, 'name')]
token['data'] = attrs
yield token
continue
# Next look for <section> tags which don't have an ID
# set; since we don't generate an ID for them from
# their text contents, they just get a numeric one
# from gen_id().
if token['name'] not in HEAD_TAGS:
if (None, 'id') not in attrs:
attrs[(None, u'id')] = self.gen_id()
token['data'] = attrs
yield token
continue
# If we got here, we're looking at the start tag of a
# header which had no 'name' attribute set. We're
# going to pop out the text contents of the header,
# use them to generate a slugified ID for it, and
# return it with that ID added in.
buffer, header_tokens = self.process_header(token, buffer)
for t in header_tokens:
yield t
class SectionEditLinkFilter(html5lib_Filter):
"""
Filter which injects editing links for sections with IDs
"""
def __init__(self, source, slug, locale):
html5lib_Filter.__init__(self, source)
self.slug = slug
self.locale = locale
def __iter__(self):
input = html5lib_Filter.__iter__(self)
for token in input:
yield token
if (token['type'] == 'StartTag' and
token['name'] in SECTION_TAGS):
attrs = dict(token['data'])
for (namespace, name), value in attrs.items():
if name == 'id' and value:
ts = ({'type': 'StartTag',
'name': 'a',
'data': {
(None, u'title'): _('Edit section'),
(None, u'class'): 'edit-section',
(None, u'data-section-id'): value,
(None, u'data-section-src-url'): u'%s?%s' % (
reverse('wiki.document',
args=[self.slug],
locale=self.locale),
urlencode({'section': value.encode('utf-8'),
'raw': 'true'})
),
(None, u'href'): u'%s?%s' % (
reverse('wiki.edit_document',
args=[self.slug],
locale=self.locale),
urlencode({'section': value.encode('utf-8'),
'edit_links': 'true'})
)
}},
{'type': 'Characters',
'data': _(u'Edit')},
{'type': 'EndTag', 'name': 'a'})
for t in ts:
yield t
class SectionTOCFilter(html5lib_Filter):
"""
Filter which builds a TOC tree of sections with headers
"""
def __init__(self, source):
html5lib_Filter.__init__(self, source)
self.level = 2
self.in_header = False
self.open_level = 0
self.in_hierarchy = False
self.max_level = 6
def __iter__(self):
input = html5lib_Filter.__iter__(self)
self.skip_header = False
for token in input:
if (token['type'] == 'StartTag' and
token['name'] in HEAD_TAGS_TOC):
level_match = LEVEL_RE.match(token['name'])
level = int(level_match.group(1))
if level > self.max_level:
self.skip_header = True
continue
self.in_header = True
out = []
if level > self.level:
diff = level - self.level
for i in range(diff):
if (not self.in_hierarchy and i % 2 == 0):
out.append({'type': 'StartTag',
'name': 'li',
'data': {}})
out.append({'type': 'StartTag',
'name': 'ol',
'data': {}})
if (diff > 1 and i % 2 == 0 and i != diff - 1):
out.append({'type': 'StartTag',
'name': 'li',
'data': {}})
self.open_level += 1
self.level = level
elif level < self.level:
diff = self.level - level
for i in range(diff):
out.extend([{'type': 'EndTag',
'name': 'ol'},
{'type': 'EndTag',
'name': 'li'}])
self.open_level -= 1
self.level = level
attrs = dict(token['data'])
id = attrs.get((None, 'id'), None)
if id:
out.extend([
{'type': 'StartTag', 'name': 'li', 'data': {}},
{'type': 'StartTag', 'name': 'a',
'data': {(None, u'rel'): 'internal',
(None, u'href'): '#%s' % id}},
])
self.in_hierarchy = True
for t in out:
yield t
elif (token['type'] == 'StartTag' and
token['name'] in TAGS_IN_TOC and
self.in_header and
not self.skip_header):
yield token
elif (token['type'] in ("Characters", "SpaceCharacters")
and self.in_header):
yield token
elif (token['type'] == 'EndTag' and
token['name'] in TAGS_IN_TOC and
self.in_header):
yield token
elif (token['type'] == 'EndTag' and
token['name'] in HEAD_TAGS_TOC):
level_match = LEVEL_RE.match(token['name'])
level = int(level_match.group(1))
if level > self.max_level:
self.skip_header = False
continue
self.in_header = False
yield {'type': 'EndTag', 'name': 'a'}
if self.open_level > 0:
out = []
for i in range(self.open_level):
out.extend([{'type': 'EndTag', 'name': 'ol'},
{'type': 'EndTag', 'name': 'li'}])
for t in out:
yield t
class H2TOCFilter(SectionTOCFilter):
def __init__(self, source):
html5lib_Filter.__init__(self, source)
self.level = 2
self.max_level = 2
self.in_header = False
self.open_level = 0
self.in_hierarchy = False
class H3TOCFilter(SectionTOCFilter):
def __init__(self, source):
html5lib_Filter.__init__(self, source)
self.level = 2
self.max_level = 3
self.in_header = False
self.open_level = 0
self.in_hierarchy = False
class SectionFilter(html5lib_Filter):
"""
Filter which can either extract the fragment representing a section by
ID, or substitute a replacement stream for a section. Loosely based on
HTML5 outline algorithm
"""
HEADING_TAGS = ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hgroup')
SECTION_TAGS = ('article', 'aside', 'nav', 'section', 'blockquote',
'body', 'details', 'fieldset', 'figure', 'table', 'div')
def __init__(self, source, id, replace_source=None, ignore_heading=False):
html5lib_Filter.__init__(self, source)
self.replace_source = replace_source
self.ignore_heading = ignore_heading
self.section_id = id
self.heading = None
self.heading_rank = None
self.open_level = 0
self.parent_level = None
self.in_section = False
self.heading_to_ignore = None
self.already_ignored_header = False
self.next_in_section = False
self.replacement_emitted = False
def __iter__(self):
input = html5lib_Filter.__iter__(self)
for token in input:
# Section start was deferred, so start it now.
if self.next_in_section:
self.next_in_section = False
self.in_section = True
if token['type'] == 'StartTag':
attrs = dict(token['data'])
self.open_level += 1
# Have we encountered the section or heading element we're
# looking for?
if self.section_id in attrs.values():
# If we encounter a section element that matches the ID,
# then we'll want to scoop up all its children as an
# explicit section.
if (self.parent_level is None and self._isSection(token)):
self.parent_level = self.open_level
# Defer the start of the section, so the section parent
# itself isn't included.
self.next_in_section = True
# If we encounter a heading element that matches the ID, we
# start an implicit section.
elif (self.heading is None and self._isHeading(token)):
self.heading = token
self.heading_rank = self._getHeadingRank(token)
self.parent_level = self.open_level - 1
self.in_section = True
# If started an implicit section, these rules apply to
# siblings...
elif (self.heading is not None and
self.open_level - 1 == self.parent_level):
# The implicit section should stop if we hit another
# sibling heading whose rank is equal or higher, since that
# starts a new implicit section
if (self._isHeading(token) and
self._getHeadingRank(token) <= self.heading_rank):
self.in_section = False
# If this is the first heading of the section and we want to
# omit it, note that we've found it
if (self.in_section and
self.ignore_heading and
not self.already_ignored_header and
not self.heading_to_ignore and
self._isHeading(token)):
self.heading_to_ignore = token
elif token['type'] == 'EndTag':
self.open_level -= 1
# If the parent of the section has ended, end the section.
# This applies to both implicit and explicit sections.
if (self.parent_level is not None and
self.open_level < self.parent_level):
self.in_section = False
# If there's no replacement source, then this is a section
# extraction. So, emit tokens while we're in the section, as long
# as we're also not in the process of ignoring a heading
if not self.replace_source:
if self.in_section and not self.heading_to_ignore:
yield token
# If there is a replacement source, then this is a section
# replacement. Emit tokens of the source stream until we're in the
# section, then emit the replacement stream and ignore the rest of
# the source stream for the section. Note that an ignored heading
# is *not* replaced.
else:
if not self.in_section or self.heading_to_ignore:
yield token
elif not self.replacement_emitted:
for r_token in self.replace_source:
yield r_token
self.replacement_emitted = True
# If this looks like the end of a heading we were ignoring, clear
# the ignoring condition.
if (token['type'] == 'EndTag' and
self.in_section and
self.ignore_heading and
not self.already_ignored_header and
self.heading_to_ignore and
self._isHeading(token) and
token['name'] == self.heading_to_ignore['name']):
self.heading_to_ignore = None
self.already_ignored_header = True
def _isHeading(self, token):
"""Is this token a heading element?"""
return token['name'] in self.HEADING_TAGS
def _isSection(self, token):
"""Is this token a section element?"""
return token['name'] in self.SECTION_TAGS
def _getHeadingRank(self, token):
"""Calculate the heading rank of this token"""
if not self._isHeading(token):
return None
if token['name'] != 'hgroup':
return int(token['name'][1])
else:
# FIXME: hgroup rank == highest rank of headers contained
# But, we'd need to track the hgroup and then any child headers
# encountered in the stream. Not doing that right now.
# For now, just assume an hgroup is equivalent to h1
return 1
class CodeSyntaxFilter(html5lib_Filter):
"""
Filter which ensures section-related elements have unique IDs
"""
def __iter__(self):
for token in html5lib_Filter.__iter__(self):
if token['type'] == 'StartTag' and token['name'] == 'pre':
attrs = dict(token['data'])
for (namespace, name), value in attrs.items():
if name == 'function' and value:
m = MT_SYNTAX_RE.match(value)
if m:
lang = m.group(1).lower()
brush = MT_SYNTAX_BRUSH_MAP.get(lang, lang)
attrs[(namespace, u'class')] = "brush: %s" % brush
del attrs[(None, 'function')]
token['data'] = attrs
yield token
class EditorSafetyFilter(html5lib_Filter):
"""
Minimal filter meant to strip out harmful attributes and elements before
rendering HTML for use in CKEditor
"""
def __iter__(self):
for token in html5lib_Filter.__iter__(self):
if token['type'] == 'StartTag':
# Strip out any attributes that start with "on"
attrs = {}
for (namespace, name), value in token['data'].items():
if name.startswith('on'):
continue
attrs[(namespace, name)] = value
token['data'] = attrs
yield token
class IframeHostFilter(html5lib_Filter):
"""
Filter which scans through <iframe> tags and strips the src attribute if
it doesn't contain a URL whose host matches a given list of allowed
hosts. Also strips any markup found within <iframe></iframe>.
"""
def __init__(self, source, hosts):
html5lib_Filter.__init__(self, source)
self.hosts = hosts
def __iter__(self):
in_iframe = False
for token in html5lib_Filter.__iter__(self):
if token['type'] == 'StartTag' and token['name'] == 'iframe':
in_iframe = True
attrs = dict(token['data'])
for (namespace, name), value in attrs.items():
if name == 'src' and value:
if not re.search(self.hosts, value):
attrs[(namespace, 'src')] = ''
token['data'] = attrs
yield token
if token['type'] == 'EndTag' and token['name'] == 'iframe':
in_iframe = False
if not in_iframe:
yield token
| mpl-2.0 | -3,269,024,955,448,995,000 | 37.998084 | 92 | 0.508695 | false |
ghyde/letsencrypt | letsencrypt/tests/crypto_util_test.py | 20 | 8468 | """Tests for letsencrypt.crypto_util."""
import logging
import shutil
import tempfile
import unittest
import OpenSSL
import mock
import zope.component
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt.tests import test_util
RSA256_KEY = test_util.load_vector('rsa256_key.pem')
RSA512_KEY = test_util.load_vector('rsa512_key.pem')
CERT_PATH = test_util.vector_path('cert.pem')
CERT = test_util.load_vector('cert.pem')
SAN_CERT = test_util.load_vector('cert-san.pem')
class InitSaveKeyTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.init_save_key."""
def setUp(self):
logging.disable(logging.CRITICAL)
zope.component.provideUtility(
mock.Mock(strict_permissions=True), interfaces.IConfig)
self.key_dir = tempfile.mkdtemp('key_dir')
def tearDown(self):
logging.disable(logging.NOTSET)
shutil.rmtree(self.key_dir)
@classmethod
def _call(cls, key_size, key_dir):
from letsencrypt.crypto_util import init_save_key
return init_save_key(key_size, key_dir, 'key-letsencrypt.pem')
@mock.patch('letsencrypt.crypto_util.make_key')
def test_success(self, mock_make):
mock_make.return_value = 'key_pem'
key = self._call(1024, self.key_dir)
self.assertEqual(key.pem, 'key_pem')
self.assertTrue('key-letsencrypt.pem' in key.file)
@mock.patch('letsencrypt.crypto_util.make_key')
def test_key_failure(self, mock_make):
mock_make.side_effect = ValueError
self.assertRaises(ValueError, self._call, 431, self.key_dir)
class InitSaveCSRTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.init_save_csr."""
def setUp(self):
zope.component.provideUtility(
mock.Mock(strict_permissions=True), interfaces.IConfig)
self.csr_dir = tempfile.mkdtemp('csr_dir')
def tearDown(self):
shutil.rmtree(self.csr_dir)
@mock.patch('letsencrypt.crypto_util.make_csr')
@mock.patch('letsencrypt.crypto_util.le_util.make_or_verify_dir')
def test_it(self, unused_mock_verify, mock_csr):
from letsencrypt.crypto_util import init_save_csr
mock_csr.return_value = ('csr_pem', 'csr_der')
csr = init_save_csr(
mock.Mock(pem='dummy_key'), 'example.com', self.csr_dir,
'csr-letsencrypt.pem')
self.assertEqual(csr.data, 'csr_der')
self.assertTrue('csr-letsencrypt.pem' in csr.file)
class MakeCSRTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.make_csr."""
@classmethod
def _call(cls, *args, **kwargs):
from letsencrypt.crypto_util import make_csr
return make_csr(*args, **kwargs)
def test_san(self):
from letsencrypt.crypto_util import get_sans_from_csr
# TODO: Fails for RSA256_KEY
csr_pem, csr_der = self._call(
RSA512_KEY, ['example.com', 'www.example.com'])
self.assertEqual(
['example.com', 'www.example.com'], get_sans_from_csr(csr_pem))
self.assertEqual(
['example.com', 'www.example.com'], get_sans_from_csr(
csr_der, OpenSSL.crypto.FILETYPE_ASN1))
class ValidCSRTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.valid_csr."""
@classmethod
def _call(cls, csr):
from letsencrypt.crypto_util import valid_csr
return valid_csr(csr)
def test_valid_pem_true(self):
self.assertTrue(self._call(test_util.load_vector('csr.pem')))
def test_valid_pem_san_true(self):
self.assertTrue(self._call(test_util.load_vector('csr-san.pem')))
def test_valid_der_false(self):
self.assertFalse(self._call(test_util.load_vector('csr.der')))
def test_valid_der_san_false(self):
self.assertFalse(self._call(test_util.load_vector('csr-san.der')))
def test_empty_false(self):
self.assertFalse(self._call(''))
def test_random_false(self):
self.assertFalse(self._call('foo bar'))
class CSRMatchesPubkeyTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.csr_matches_pubkey."""
@classmethod
def _call(cls, *args, **kwargs):
from letsencrypt.crypto_util import csr_matches_pubkey
return csr_matches_pubkey(*args, **kwargs)
def test_valid_true(self):
self.assertTrue(self._call(
test_util.load_vector('csr.pem'), RSA512_KEY))
def test_invalid_false(self):
self.assertFalse(self._call(
test_util.load_vector('csr.pem'), RSA256_KEY))
class MakeKeyTest(unittest.TestCase): # pylint: disable=too-few-public-methods
"""Tests for letsencrypt.crypto_util.make_key."""
def test_it(self): # pylint: disable=no-self-use
from letsencrypt.crypto_util import make_key
# Do not test larger keys as it takes too long.
OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, make_key(1024))
class ValidPrivkeyTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.valid_privkey."""
@classmethod
def _call(cls, privkey):
from letsencrypt.crypto_util import valid_privkey
return valid_privkey(privkey)
def test_valid_true(self):
self.assertTrue(self._call(RSA256_KEY))
def test_empty_false(self):
self.assertFalse(self._call(''))
def test_random_false(self):
self.assertFalse(self._call('foo bar'))
class GetSANsFromCertTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.get_sans_from_cert."""
@classmethod
def _call(cls, *args, **kwargs):
from letsencrypt.crypto_util import get_sans_from_cert
return get_sans_from_cert(*args, **kwargs)
def test_single(self):
self.assertEqual([], self._call(test_util.load_vector('cert.pem')))
def test_san(self):
self.assertEqual(
['example.com', 'www.example.com'],
self._call(test_util.load_vector('cert-san.pem')))
class GetSANsFromCSRTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.get_sans_from_csr."""
@classmethod
def _call(cls, *args, **kwargs):
from letsencrypt.crypto_util import get_sans_from_csr
return get_sans_from_csr(*args, **kwargs)
def test_extract_one_san(self):
self.assertEqual(['example.com'], self._call(
test_util.load_vector('csr.pem')))
def test_extract_two_sans(self):
self.assertEqual(['example.com', 'www.example.com'], self._call(
test_util.load_vector('csr-san.pem')))
def test_extract_six_sans(self):
self.assertEqual(self._call(test_util.load_vector('csr-6sans.pem')),
["example.com", "example.org", "example.net",
"example.info", "subdomain.example.com",
"other.subdomain.example.com"])
def test_parse_non_csr(self):
self.assertRaises(OpenSSL.crypto.Error, self._call, "hello there")
def test_parse_no_sans(self):
self.assertEqual(
[], self._call(test_util.load_vector('csr-nosans.pem')))
class CertLoaderTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.pyopenssl_load_certificate"""
def test_load_valid_cert(self):
from letsencrypt.crypto_util import pyopenssl_load_certificate
cert, file_type = pyopenssl_load_certificate(CERT)
self.assertEqual(cert.digest('sha1'),
OpenSSL.crypto.load_certificate(file_type, CERT).digest('sha1'))
def test_load_invalid_cert(self):
from letsencrypt.crypto_util import pyopenssl_load_certificate
bad_cert_data = CERT.replace("BEGIN CERTIFICATE", "ASDFASDFASDF!!!")
self.assertRaises(
errors.Error, pyopenssl_load_certificate, bad_cert_data)
class NotBeforeTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.notBefore"""
def test_notBefore(self):
from letsencrypt.crypto_util import notBefore
self.assertEqual(notBefore(CERT_PATH).isoformat(),
'2014-12-11T22:34:45+00:00')
class NotAfterTest(unittest.TestCase):
"""Tests for letsencrypt.crypto_util.notAfter"""
def test_notAfter(self):
from letsencrypt.crypto_util import notAfter
self.assertEqual(notAfter(CERT_PATH).isoformat(),
'2014-12-18T22:34:45+00:00')
if __name__ == '__main__':
unittest.main() # pragma: no cover
| apache-2.0 | -5,730,138,421,561,995,000 | 32.338583 | 89 | 0.650449 | false |
followthesheep/galpy | galpy/orbit_src/integrateFullOrbit.py | 1 | 10899 | import sys
import sysconfig
import warnings
import numpy as nu
import ctypes
import ctypes.util
from numpy.ctypeslib import ndpointer
import os
from galpy import potential
from galpy.util import galpyWarning
from galpy.orbit_src.integratePlanarOrbit import _parse_integrator, _parse_tol
#Find and load the library
_lib= None
outerr= None
PY3= sys.version > '3'
if PY3: #pragma: no cover
_ext_suffix= sysconfig.get_config_var('EXT_SUFFIX')
else:
_ext_suffix= '.so'
for path in sys.path:
try:
_lib = ctypes.CDLL(os.path.join(path,'galpy_integrate_c%s' % _ext_suffix))
except OSError as e:
if os.path.exists(os.path.join(path,'galpy_integrate_c%s' % _ext_suffix)): #pragma: no cover
outerr= e
_lib = None
else:
break
if _lib is None: #pragma: no cover
if not outerr is None:
warnings.warn("integrateFullOrbit_c extension module not loaded, because of error '%s' " % outerr,
galpyWarning)
else:
warnings.warn("integrateFullOrbit_c extension module not loaded, because galpy_integrate_c%s image was not found" % _ext_suffix,
galpyWarning)
_ext_loaded= False
else:
_ext_loaded= True
def _parse_pot(pot,potforactions=False):
"""Parse the potential so it can be fed to C"""
#Figure out what's in pot
if not isinstance(pot,list):
pot= [pot]
#Initialize everything
pot_type= []
pot_args= []
npot= len(pot)
for p in pot:
if isinstance(p,potential.LogarithmicHaloPotential):
pot_type.append(0)
pot_args.extend([p._amp,p._q,p._core2])
elif isinstance(p,potential.MiyamotoNagaiPotential):
pot_type.append(5)
pot_args.extend([p._amp,p._a,p._b])
elif isinstance(p,potential.PowerSphericalPotential):
pot_type.append(7)
pot_args.extend([p._amp,p.alpha])
elif isinstance(p,potential.HernquistPotential):
pot_type.append(8)
pot_args.extend([p._amp,p.a])
elif isinstance(p,potential.NFWPotential):
pot_type.append(9)
pot_args.extend([p._amp,p.a])
elif isinstance(p,potential.JaffePotential):
pot_type.append(10)
pot_args.extend([p._amp,p.a])
elif isinstance(p,potential.DoubleExponentialDiskPotential):
pot_type.append(11)
pot_args.extend([p._amp,p._alpha,p._beta,p._kmaxFac,
p._nzeros,p._glorder])
pot_args.extend([p._glx[ii] for ii in range(p._glorder)])
pot_args.extend([p._glw[ii] for ii in range(p._glorder)])
pot_args.extend([p._j0zeros[ii] for ii in range(p._nzeros+1)])
pot_args.extend([p._dj0zeros[ii] for ii in range(p._nzeros+1)])
pot_args.extend([p._j1zeros[ii] for ii in range(p._nzeros+1)])
pot_args.extend([p._dj1zeros[ii] for ii in range(p._nzeros+1)])
pot_args.extend([p._kp._amp,p._kp.alpha])
elif isinstance(p,potential.FlattenedPowerPotential):
pot_type.append(12)
pot_args.extend([p._amp,p.alpha,p.q2,p.core2])
elif isinstance(p,potential.interpRZPotential):
pot_type.append(13)
pot_args.extend([len(p._rgrid),len(p._zgrid)])
if p._logR:
pot_args.extend([p._logrgrid[ii] for ii in range(len(p._rgrid))])
else:
pot_args.extend([p._rgrid[ii] for ii in range(len(p._rgrid))])
pot_args.extend([p._zgrid[ii] for ii in range(len(p._zgrid))])
if potforactions:
pot_args.extend([x for x in p._potGrid_splinecoeffs.flatten(order='C')])
else:
pot_args.extend([x for x in p._rforceGrid_splinecoeffs.flatten(order='C')])
pot_args.extend([x for x in p._zforceGrid_splinecoeffs.flatten(order='C')])
pot_args.extend([p._amp,int(p._logR)])
elif isinstance(p,potential.IsochronePotential):
pot_type.append(14)
pot_args.extend([p._amp,p.b])
elif isinstance(p,potential.PowerSphericalPotentialwCutoff):
pot_type.append(15)
pot_args.extend([p._amp,p.alpha,p.rc])
elif isinstance(p,potential.MN3ExponentialDiskPotential):
# Three Miyamoto-Nagai disks
npot+= 2
pot_type.extend([5,5,5])
pot_args.extend([p._amp*p._mn3[0]._amp,
p._mn3[0]._a,p._mn3[0]._b,
p._amp*p._mn3[1]._amp,
p._mn3[1]._a,p._mn3[1]._b,
p._amp*p._mn3[2]._amp,
p._mn3[2]._a,p._mn3[2]._b])
elif isinstance(p,potential.KuzminKutuzovStaeckelPotential):
pot_type.append(16)
pot_args.extend([p._amp,p._ac,p._Delta])
elif isinstance(p,potential.PlummerPotential):
pot_type.append(17)
pot_args.extend([p._amp,p._b])
pot_type= nu.array(pot_type,dtype=nu.int32,order='C')
pot_args= nu.array(pot_args,dtype=nu.float64,order='C')
return (npot,pot_type,pot_args)
def integrateFullOrbit_c(pot,yo,t,int_method,rtol=None,atol=None,dt=None):
"""
NAME:
integrateFullOrbit_c
PURPOSE:
C integrate an ode for a FullOrbit
INPUT:
pot - Potential or list of such instances
yo - initial condition [q,p]
t - set of times at which one wants the result
int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'
rtol, atol
dt= (None) force integrator to use this stepsize (default is to automatically determine one))
OUTPUT:
(y,err)
y : array, shape (len(y0), len(t))
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
err: error message, if not zero: 1 means maximum step reduction happened for adaptive integrators
HISTORY:
2011-11-13 - Written - Bovy (IAS)
"""
rtol, atol= _parse_tol(rtol,atol)
npot, pot_type, pot_args= _parse_pot(pot)
int_method_c= _parse_integrator(int_method)
if dt is None:
dt= -9999.99
#Set up result array
result= nu.empty((len(t),6))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
integrationFunc= _lib.integrateFullOrbit
integrationFunc.argtypes= [ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=nu.int32,flags=ndarrayFlags),
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int),
ctypes.c_int]
#Array requirements, first store old order
f_cont= [yo.flags['F_CONTIGUOUS'],
t.flags['F_CONTIGUOUS']]
yo= nu.require(yo,dtype=nu.float64,requirements=['C','W'])
t= nu.require(t,dtype=nu.float64,requirements=['C','W'])
result= nu.require(result,dtype=nu.float64,requirements=['C','W'])
#Run the C code
integrationFunc(yo,
ctypes.c_int(len(t)),
t,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(dt),
ctypes.c_double(rtol),ctypes.c_double(atol),
result,
ctypes.byref(err),
ctypes.c_int(int_method_c))
#Reset input arrays
if f_cont[0]: yo= nu.asfortranarray(yo)
if f_cont[1]: t= nu.asfortranarray(t)
return (result,err.value)
def integrateFullOrbit_dxdv_c(pot,yo,dyo,t,int_method,rtol=None,atol=None): #pragma: no cover because not included in v1, uncover when included
"""
NAME:
integrateFullOrbit_dxdv_c
PURPOSE:
C integrate an ode for a planarOrbit+phase space volume dxdv
INPUT:
pot - Potential or list of such instances
yo - initial condition [q,p]
dyo - initial condition [dq,dp]
t - set of times at which one wants the result
int_method= 'leapfrog_c', 'rk4_c', 'rk6_c', 'symplec4_c'
rtol, atol
OUTPUT:
(y,err)
y : array, shape (len(y0), len(t))
Array containing the value of y for each desired time in t, \
with the initial value y0 in the first row.
err: error message if not zero, 1: maximum step reduction happened for adaptive integrators
HISTORY:
2011-11-13 - Written - Bovy (IAS)
"""
rtol, atol= _parse_tol(rtol,atol)
npot, pot_type, pot_args= _parse_pot(pot)
int_method_c= _parse_integrator(int_method)
yo= nu.concatenate((yo,dyo))
#Set up result array
result= nu.empty((len(t),12))
err= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
integrationFunc= _lib.integrateFullOrbit_dxdv
integrationFunc.argtypes= [ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=nu.int32,flags=ndarrayFlags),
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.c_double,
ctypes.c_double,
ndpointer(dtype=nu.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int),
ctypes.c_int]
#Array requirements, first store old order
f_cont= [yo.flags['F_CONTIGUOUS'],
t.flags['F_CONTIGUOUS']]
yo= nu.require(yo,dtype=nu.float64,requirements=['C','W'])
t= nu.require(t,dtype=nu.float64,requirements=['C','W'])
result= nu.require(result,dtype=nu.float64,requirements=['C','W'])
#Run the C code
integrationFunc(yo,
ctypes.c_int(len(t)),
t,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(rtol),ctypes.c_double(atol),
result,
ctypes.byref(err),
ctypes.c_int(int_method_c))
#Reset input arrays
if f_cont[0]: yo= nu.asfortranarray(yo)
if f_cont[1]: t= nu.asfortranarray(t)
return (result,err.value)
| bsd-3-clause | 3,197,401,538,208,799,000 | 40.128302 | 143 | 0.560143 | false |
mitchcapper/mythbox | resources/lib/tvrage/tvrage/exceptions.py | 4 | 1937 | #!/usr/bin/env python
# Copyright (c) 2010, Christian Kreutzer
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class BaseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ShowHasEnded(BaseError):
pass
class NoNewEpisodesAnnounced(BaseError):
pass
class FinaleMayNotBeAnnouncedYet(BaseError):
pass
class ShowNotFound(BaseError):
pass
| gpl-2.0 | -6,643,528,417,361,746,000 | 35.54717 | 79 | 0.757357 | false |
kit-cel/gr-lte | python/qa_pre_decoder_vcvc.py | 3 | 5073 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT)
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import lte_swig as lte
import lte_test
class qa_pre_decoder_vcvc(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
N_ant = 2
vlen = 240
style = 'tx_diversity'
intu1 = [0] * vlen
intu2 = [0] * vlen
intu3 = [0] * vlen
self.src1 = blocks.vector_source_c(intu1, False, vlen)
self.src2 = blocks.vector_source_c(intu2, False, vlen)
self.src3 = blocks.vector_source_c(intu3, False, vlen)
self.pd = lte.pre_decoder_vcvc(1, N_ant, vlen, style)
self.snk = blocks.vector_sink_c(vlen)
self.tb.connect(self.src1, (self.pd, 0))
self.tb.connect(self.src2, (self.pd, 1))
self.tb.connect(self.src3, (self.pd, 2))
self.tb.connect(self.pd, self.snk)
def tearDown(self):
self.tb = None
def test_001_generated(self):
print "\ntest_001_generated"
cell_id = 124
N_ant = 2
style = "tx_diversity"
mib = lte_test.pack_mib(50, 0, 1.0, 511)
bch = lte_test.encode_bch(mib, N_ant)
scrambled = lte_test.pbch_scrambling(bch, cell_id)
qpsk_modulated = lte_test.qpsk_modulation(scrambled)
#print np.shape(qpsk_modulated)
layer_mapped = lte_test.layer_mapping(qpsk_modulated, N_ant, style)
pre_coded = lte_test.pre_coding(layer_mapped, N_ant, style)
#print np.shape(pre_coded)
h0 = [complex(1, 0)] * len(pre_coded[0])
h1 = [complex(1, 0)] * len(pre_coded[1])
stream = [pre_coded[0][i] + pre_coded[1][i] for i in range(len(pre_coded[0]))]
self.src1.set_data(stream)
self.src2.set_data(h0)
self.src3.set_data(h1)
self.tb.run()
res = self.snk.data()
exp_res = []
for i in range(len(stream) / 240):
print i
lay0 = layer_mapped[0][i * 120:(i + 1) * 120]
lay1 = layer_mapped[1][i * 120:(i + 1) * 120]
comb = [lay0, lay1]
exp_res.extend(lte_test.prepare_for_demapper_block(comb, N_ant, style))
print "test 001 final ASSERT!"
print self.assertComplexTuplesAlmostEqual(res, exp_res)
def test_002_pcfich(self):
print "test_002_pcfich"
# some constants
cell_id = 124
N_ant = 2
style = "tx_diversity"
vlen = 16
ns = 0
# new top_block because even the interface changes
self.tb2 = gr.top_block()
# generate test data together with the expected output
data = []
exp_res = []
for cfi in range(4):
cfi_seq = lte_test.get_cfi_sequence(cfi + 1)
scr_cfi_seq = lte_test.scramble_cfi_sequence(cfi_seq, cell_id, ns)
mod_cfi_seq = lte_test.qpsk_modulation(scr_cfi_seq)
lay_cfi_seq = lte_test.layer_mapping(mod_cfi_seq, N_ant, style)
lay_cfi_prep = lte_test.prepare_for_demapper_block(lay_cfi_seq, N_ant, style)
exp_res.extend(lay_cfi_prep)
pc_cfi_seq = lte_test.pre_coding(lay_cfi_seq, N_ant, style)
pc_cfi_seq = [pc_cfi_seq[0][i] + pc_cfi_seq[1][i] for i in range(len(pc_cfi_seq[0]))]
data.extend(pc_cfi_seq)
# dummy channel estimates
intu2 = [complex(1, 0)] * len(data)
intu3 = [complex(1, 0)] * len(data)
# get blocks
self.src1 = blocks.vector_source_c(data, False, vlen)
self.src2 = blocks.vector_source_c(intu2, False, vlen)
self.src3 = blocks.vector_source_c(intu3, False, vlen)
self.pd = lte.pre_decoder_vcvc(1, 1, vlen, style)
self.snk = blocks.vector_sink_c(vlen)
# connect all blocks
self.tb2.connect(self.src1, (self.pd, 0))
self.tb2.connect(self.src2, (self.pd, 1))
self.tb2.connect(self.src3, (self.pd, 2))
self.tb2.connect(self.pd, self.snk)
self.pd.set_N_ant(N_ant)
# run flowgraph
self.tb2.run()
# compare result with expected result
res = self.snk.data()
self.assertComplexTuplesAlmostEqual(res, exp_res)
if __name__ == '__main__':
gr_unittest.run(qa_pre_decoder_vcvc, "qa_pre_decoder_vcvc.xml")
| gpl-3.0 | -7,386,275,104,010,072,000 | 33.510204 | 97 | 0.599054 | false |
edx/edx-platform | openedx/features/enterprise_support/tasks.py | 4 | 1447 | """
Tasks for Enterprise.
"""
import logging
from celery import shared_task
from edx_django_utils.monitoring import set_code_owner_attribute
from enterprise.models import EnterpriseCourseEnrollment
from openedx.features.enterprise_support.utils import clear_data_consent_share_cache
log = logging.getLogger('edx.celery.task')
@shared_task(name='openedx.features.enterprise_support.tasks.clear_enterprise_customer_data_consent_share_cache')
@set_code_owner_attribute
def clear_enterprise_customer_data_consent_share_cache(enterprise_customer_uuid):
"""
clears data_sharing_consent_needed cache for whole enterprise
"""
enterprise_course_enrollments = EnterpriseCourseEnrollment.objects.filter(
enterprise_customer_user__enterprise_customer__uuid=enterprise_customer_uuid
)
count = enterprise_course_enrollments.count()
log.info(
'Stated Clearing {count} data_sharing_consent_needed cache for enterprise customer {uuid}'.format(
count=count,
uuid=enterprise_customer_uuid,
)
)
for enrollment in enterprise_course_enrollments:
clear_data_consent_share_cache(
enrollment.enterprise_customer_user.user_id,
enrollment.course_id,
enterprise_customer_uuid,
)
log.info('Ended Clearing data_sharing_consent_needed cache for enterprise customer {uuid}'.format(
uuid=enterprise_customer_uuid,
))
| agpl-3.0 | -6,306,246,466,646,541,000 | 34.292683 | 113 | 0.725639 | false |
ericzundel/mvn2pants | test/python/squarepants_test/test_pom_utils.py | 1 | 2314 | # Tests for code in squarepants/src/main/python/squarepants/pom_utils.py
#
# Run with:
# ./pants test squarepants/src/test/python/squarepants_test:pom_utils
import logging
import unittest2 as unittest
from squarepants.pom_utils import PomUtils
# TODO(Eric Ayers) Refactor PomUtils so we can point it at a dummy directory of pom files
class PomUtilsTest(unittest.TestCase):
# Test singletons
def test_dependency_management_Finder(self):
dmf = PomUtils.dependency_management_finder()
self.assertIsNotNone(dmf)
self.assertIs(dmf, PomUtils.dependency_management_finder()) # should be a singleton
def test_pom_provides_target(self):
ppt = PomUtils.pom_provides_target()
self.assertIsNotNone(ppt)
self.assertIs(ppt, PomUtils.pom_provides_target()) # should be a singleton
def test_local_dep_targets(self):
ldt = PomUtils.local_dep_targets()
self.assertIsNotNone(ldt)
self.assertIs(ldt, PomUtils.local_dep_targets()) # should be a singleton
def test_third_party_dep_targets(self):
tpdt = PomUtils.third_party_dep_targets()
self.assertIsNotNone(tpdt)
self.assertIs(tpdt, PomUtils.third_party_dep_targets())
def test_top_pom_content_handler(self):
tpch = PomUtils.top_pom_content_handler()
self.assertIsNotNone(tpch)
self.assertIs(tpch, PomUtils.top_pom_content_handler())
def test_external_protos_content_handler(self):
epch = PomUtils.external_protos_content_handler()
self.assertIsNotNone(epch)
self.assertIs(epch, PomUtils.external_protos_content_handler())
def test_get_modules(self):
top_modules = PomUtils.top_pom_content_handler()
self.assertIsNotNone(top_modules)
def test_common_usage(self):
# nothing really to test here, it just prints a message to sdout.
PomUtils.common_usage()
def test_parse_common_args(self):
unprocessed = PomUtils.parse_common_args(['-ldebug', 'unused'])
self.assertEquals(['unused'], unprocessed)
self.assertTrue(logging.DEBUG, logging.getLogger().getEffectiveLevel())
def test_is_local_dep(self):
self.assertFalse(PomUtils.is_local_dep('bogus-dep'))
def test_is_third_party_dep(self):
self.assertFalse(PomUtils.is_third_party_dep('bogus-dep'))
def test_is_external_dep(self):
self.assertTrue(PomUtils.is_external_dep('bogus-dep'))
| apache-2.0 | -7,195,754,187,928,690,000 | 34.060606 | 89 | 0.735091 | false |
amyvmiwei/chromium | third_party/scons/scons-local/SCons/Node/Python.py | 3 | 3966 | """scons.Node.Python
Python nodes.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Python.py 3897 2009/01/13 06:45:54 scons"
import SCons.Node
class ValueNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return Value(s)
class ValueBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Value(SCons.Node.Node):
"""A class for Python variables, typically passed on the command line
or generated by a script, but not from a file or some other source.
"""
NodeInfo = ValueNodeInfo
BuildInfo = ValueBuildInfo
def __init__(self, value, built_value=None):
SCons.Node.Node.__init__(self)
self.value = value
if not built_value is None:
self.built_value = built_value
def str_for_display(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def make_ready(self):
self.get_csig()
def build(self, **kw):
if not hasattr(self, 'built_value'):
apply (SCons.Node.Node.build, (self,), kw)
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Value nodes get built regardless of
# what directory scons was run from. Value nodes
# are outside the filesystem:
return 1
def write(self, built_value):
"""Set the value of the node."""
self.built_value = built_value
def read(self):
"""Return the value. If necessary, the value is built."""
self.build()
if not hasattr(self, 'built_value'):
self.built_value = self.value
return self.built_value
def get_contents(self):
"""By the assumption that the node.built_value is a
deterministic product of the sources, the contents of a Value
are the concatenation of all the contents of its sources. As
the value need not be built when get_contents() is called, we
cannot use the actual node.built_value."""
contents = str(self.value)
for kid in self.children(None):
contents = contents + kid.get_contents()
return contents
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def get_csig(self, calc=None):
"""Because we're a Python value node and don't have a real
timestamp, we get to ignore the calculator and just use the
value contents."""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
self.get_ninfo().csig = contents
return contents
| bsd-3-clause | -8,590,415,794,620,605,000 | 32.327731 | 89 | 0.659355 | false |
gfreed/android_external_chromium-org | build/android/pylib/cmd_helper.py | 23 | 3329 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper for subprocess to make calling shell commands easier."""
import os
import logging
import pipes
import signal
import subprocess
import tempfile
import constants
def _Call(args, stdout=None, stderr=None, shell=None, cwd=None):
return subprocess.call(
args=args, cwd=cwd, stdout=stdout, stderr=stderr,
shell=shell, close_fds=True,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL))
def RunCmd(args, cwd=None):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Return code from the command execution.
"""
logging.info(str(args) + ' ' + (cwd or ''))
return _Call(args, cwd=cwd)
def GetCmdOutput(args, cwd=None, shell=False):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
(_, output) = GetCmdStatusAndOutput(args, cwd, shell)
return output
def GetCmdStatusAndOutput(args, cwd=None, shell=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
The tuple (exit code, output).
"""
if isinstance(args, basestring):
args_repr = args
if not shell:
raise Exception('string args must be run with shell=True')
elif shell:
raise Exception('array args must be run with shell=False')
else:
args_repr = ' '.join(map(pipes.quote, args))
s = '[host]'
if cwd:
s += ':' + cwd
s += '> ' + args_repr
logging.info(s)
tmpout = tempfile.TemporaryFile(bufsize=0)
tmperr = tempfile.TemporaryFile(bufsize=0)
exit_code = _Call(args, cwd=cwd, stdout=tmpout, stderr=tmperr, shell=shell)
tmperr.seek(0)
stderr = tmperr.read()
tmperr.close()
if stderr:
logging.critical(stderr)
tmpout.seek(0)
stdout = tmpout.read()
tmpout.close()
if len(stdout) > 4096:
logging.debug('Truncated output:')
logging.debug(stdout[:4096])
return (exit_code, stdout)
class OutDirectory(object):
_out_directory = os.path.join(constants.DIR_SOURCE_ROOT,
os.environ.get('CHROMIUM_OUT_DIR','out'))
@staticmethod
def set(out_directory):
OutDirectory._out_directory = out_directory
@staticmethod
def get():
return OutDirectory._out_directory
| bsd-3-clause | -8,354,268,949,655,893,000 | 29.263636 | 80 | 0.69991 | false |
sunjeammy/tornado | tornado/web.py | 2 | 119945 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import tornado
import traceback
import types
from io import BytesIO
from tornado.concurrent import Future, is_future
from tornado import escape
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado import locale
from tornado.log import access_log, app_log, gen_log
from tornado import stack_context
from tornado import template
from tornado.escape import utf8, _unicode
from tornado.util import import_object, ObjectDict, raise_exc_info, unicode_type, _websocket_mask
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
from urllib import urlencode # py2
except ImportError:
from urllib.parse import urlencode # py3
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overrided by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
"""Subclass this class and define `get()` or `post()` to make a handler.
If you want to support more methods than the standard GET/HEAD/POST, you
should override the class variable ``SUPPORTED_METHODS`` in your
`RequestHandler` subclass.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # {path: template.BaseLoader}
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d", status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(br"[\x00-\x1f]")
def _convert_header_value(self, value):
if isinstance(value, bytes):
pass
elif isinstance(value, unicode_type):
value = value.encode('utf-8')
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to
# prevent obviously erroneous values.
if (len(value) > 4000 or
RequestHandler._INVALID_HEADER_CHAR_RE.search(value)):
raise ValueError("Unsafe header value %r", value)
return value
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments, strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for `self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
return create_signed_value(self.application.settings["cookie_secret"],
name, value, version=version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", urlparse.urljoin(utf8(self.request.uri),
utf8(url)))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish(). May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if not isinstance(chunk, (bytes, unicode_type, dict)):
raise TypeError("write() only accepts bytes, unicode, and dict objects")
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
js = ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
css = ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` application setting. If a ``template_loader``
application setting is supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers, chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine(self.request.version,
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice. May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if self._status_code == 304:
assert not self._write_buffer, "Cannot send body with 304"
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
self.finish()
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is a cached version of `get_current_user`, which you can
override to set the user based on, e.g., a cookie. If that
method is not overridden, this method always returns None.
We lazy-load the current user the first time this method is called
and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
return None, None, None
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
etag = self._headers.get("Etag")
inm = utf8(self.request.headers.get("If-None-Match", ""))
return bool(etag and inm and inm.find(etag) >= 0)
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if is_future(result):
result = yield result
if result is not None:
raise TypeError("Expected None, got %r" % result)
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
self._handle_request_exception(e)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return self.request.method + " " + self.request.uri + \
" (" + self.request.remote_ip + ")"
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish()
return
self.log_exception(*sys.exc_info())
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is unnecessary if the method is also decorated with
``@gen.coroutine`` (it is legal but unnecessary to use the two
decorators together, in which case ``@asynchronous`` must be
first).
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example::
class MyRequestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. versionadded:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
"""
# Delay the IOLoop import because it's not available on app engine.
from tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if isinstance(result, Future):
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
There is a subtle interaction between ``data_received`` and asynchronous
``prepare``: The first call to ``data_recieved`` may occur at any point
after the call to ``prepare`` has returned *or yielded*.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class Application(httputil.HTTPServerConnectionDelegate):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.instance().start()
The constructor for this class takes in a list of `URLSpec` objects
or (regexp, request_class) tuples. When we receive requests, we
iterate over the list in order and instantiate an instance of the
first request class whose regexp matches the request path.
The request class can be specified as either a class object or a
(fully-qualified) name.
Each tuple can contain additional elements, which correspond to the
arguments to the `URLSpec` constructor. (Prior to Tornado 3.2, this
only tuples of two or three elements were allowed).
A dictionary may be passed as the third element of the tuple,
which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
"""
def __init__(self, handlers=None, default_host="", transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.handlers = []
self.named_handlers = {}
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if handlers:
self.add_handlers(".*$", handlers)
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
# Automatically reload modified modules
if self.settings.get('autoreload'):
from tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.instance().start()`` to start the server.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
if not host_pattern.endswith("$"):
host_pattern += "$"
handlers = []
# The handlers with the wildcard host_pattern are a special
# case - they're added in the constructor but should have lower
# precedence than the more-precise handlers added later.
# If a wildcard handler group exists, it should always be last
# in the list, so insert new groups just before it.
if self.handlers and self.handlers[-1][0].pattern == '.*$':
self.handlers.insert(-1, (re.compile(host_pattern), handlers))
else:
self.handlers.append((re.compile(host_pattern), handlers))
for spec in host_handlers:
if isinstance(spec, (tuple, list)):
assert len(spec) in (2, 3, 4)
spec = URLSpec(*spec)
handlers.append(spec)
if spec.name:
if spec.name in self.named_handlers:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
spec.name)
self.named_handlers[spec.name] = spec
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _get_host_handlers(self, request):
host = request.host.lower().split(':')[0]
matches = []
for pattern, handlers in self.handlers:
if pattern.match(host):
matches.extend(handlers)
# Look for default host if not behind load balancer (for debugging)
if not matches and "X-Real-Ip" not in request.headers:
for pattern, handlers in self.handlers:
if pattern.match(self.default_host):
matches.extend(handlers)
return matches or None
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def start_request(self, connection):
# Modern HTTPServer interface
return _RequestDispatcher(self, connection)
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = _RequestDispatcher(self, None)
dispatcher.set_request(request)
return dispatcher.execute()
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
if name in self.named_handlers:
return self.named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _RequestDispatcher(httputil.HTTPMessageDelegate):
def __init__(self, application, connection):
self.application = application
self.connection = connection
self.request = None
self.chunks = []
self.handler_class = None
self.handler_kwargs = None
self.path_args = []
self.path_kwargs = {}
def headers_received(self, start_line, headers):
self.set_request(httputil.HTTPServerRequest(
connection=self.connection, start_line=start_line, headers=headers))
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def set_request(self, request):
self.request = request
self._find_handler()
self.stream_request_body = _has_stream_request_body(self.handler_class)
def _find_handler(self):
# Identify the handler to use as soon as we have the request.
# Save url path arguments for later.
app = self.application
handlers = app._get_host_handlers(self.request)
if not handlers:
self.handler_class = RedirectHandler
self.handler_kwargs = dict(url="%s://%s/" % (self.request.protocol, app.default_host))
return
for spec in handlers:
match = spec.regex.match(self.request.path)
if match:
self.handler_class = spec.handler_class
self.handler_kwargs = spec.kwargs
if spec.regex.groups:
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups, we want to use either groups
# or groupdict but not both.
if spec.regex.groupindex:
self.path_kwargs = dict(
(str(k), _unquote_or_none(v))
for (k, v) in match.groupdict().items())
else:
self.path_args = [_unquote_or_none(s)
for s in match.groups()]
return
if app.settings.get('default_handler_class'):
self.handler_class = app.settings['default_handler_class']
self.handler_kwargs = app.settings.get(
'default_handler_args', {})
else:
self.handler_class = ErrorHandler
self.handler_kwargs = dict(status_code=404)
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future.
self.handler._execute(transforms, *self.path_args, **self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will end
(calling `RequestHandler.finish` if it hasn't already been called),
but the outgoing response will not be modified and the error-handling
methods (including `RequestHandler.write_error`) will not be called.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self):
self.redirect(self._url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video), but this handler should not be used with
files that are too large to fit comfortably in memory.
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {}
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified, content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
root = os.path.abspath(root)
# os.path.abspath strips a trailing /
# it needs to be temporarily added back for requests to root/
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
return mime_type
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml"])
MIN_LENGTH = 5
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if 'Vary' in headers:
headers['Vary'] += b', Accept-Encoding'
else:
headers['Vary'] = b'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Overridden in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Returns a JavaScript string that will be embedded in the page."""
return None
def javascript_files(self):
"""Returns a list of JavaScript files required by this module."""
return None
def embedded_css(self):
"""Returns a CSS string that will be embedded in the page."""
return None
def css_files(self):
"""Returns a list of CSS files required by this module."""
return None
def html_head(self):
"""Returns a CSS string that will be put in the <head/> element"""
return None
def html_body(self):
"""Returns an HTML string that will be put in the <body/> element"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
class URLSpec(object):
"""Specifies mappings between URLs and handlers."""
def __init__(self, pattern, handler, kwargs=None, name=None):
"""Parameters:
* ``pattern``: Regular expression to be matched. Any groups
in the regex will be passed in to the handler's get/post/etc
methods as arguments.
* ``handler``: `RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`Application.reverse_url`.
"""
if not pattern.endswith('$'):
pattern += '$'
self.regex = re.compile(pattern)
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
if isinstance(handler, str):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
handler = import_object(handler)
self.handler_class = handler
self.kwargs = kwargs or {}
self.name = name
self._path, self._group_count = self._find_groups()
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return (None, None)
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
pieces.append(fragment)
return (''.join(pieces), self.regex.groups)
def reverse(self, *args):
assert self._path is not None, \
"Cannot reverse url regex " + self.regex.pattern
assert len(args) == self._group_count, "required number of arguments "\
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes)):
a = str(a)
converted_args.append(escape.url_escape(utf8(a), plus=False))
return self._path % tuple(converted_args)
url = URLSpec
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (currently 0; reserved for future key rotation features)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2|1:0",
format_field(timestamp),
format_field(name),
format_field(value),
b''])
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def decode_signed_value(secret, name, value, max_age_days=31, clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
# Figure out what version this is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
value = utf8(value)
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value, max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value, max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r", value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
try:
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, rest = _consume_field(rest)
except ValueError:
return None
passed_sig = rest
signed_string = value[:-len(passed_sig)]
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def _unquote_or_none(s):
"""None-safe wrapper around url_unescape to handle unamteched optional
groups correctly.
Note that args are passed as bytes so the handler can decide what
encoding to use.
"""
if s is None:
return s
return escape.url_unescape(s, encoding=None, plus=False)
| apache-2.0 | 6,208,286,463,689,521,000 | 38.546653 | 98 | 0.599566 | false |
McDermott-Group/LabRAD | LabRAD/Measurements/IV/measureIV.py | 1 | 19990 | import matplotlib as mpl
mpl.use('TkAgg')
import pylab, numpy as np
import Tkinter as tk
import ttk
import tkFileDialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import os, sys
import niPCI6221 as ni
import threading
# Check out:
# https://pythonhosted.org/PyDAQmx/callback.html
# TODO:
# DC sweep?
# change extension to .pyw to prevent window from opening
# record all parameters used in notes
class MeasureIV(tk.Tk):
def __init__(self,parent):
tk.Tk.__init__(self,parent)
self.parent = parent
self.running = True
self.initParams()
self.initializeWindow()
self.initializeACWaves()
self.initializeDCWave()
self.lock = threading.Lock()
self.cond = threading.Condition(threading.Lock())
def initParams(self):
self.RACIn = tk.DoubleVar()
self.RDCIn = tk.DoubleVar()
self.ROut = tk.DoubleVar()
self.portACIn = tk.IntVar()
self.portDCIn = tk.IntVar()
self.portOut = tk.IntVar()
self.amp = tk.DoubleVar()
self.ACFreq = tk.DoubleVar()
self.ACAmp = tk.DoubleVar()
self.DCAmp = tk.DoubleVar()
self.sampRate = tk.IntVar()
self.savePath = tk.StringVar()
self.fileName = tk.StringVar()
self.RACIn.set(100)
self.RDCIn.set(100)
self.ROut.set(100)
self.portACIn.set(0)
self.portDCIn.set(1)
self.portOut.set(0)
self.amp.set(1)
self.ACFreq.set(1)
self.ACAmp.set(0.0)
self.DCAmp.set(0.0)
self.sampRate.set(10000)
self.portDCIn.trace('w',self.changeDCOutput)
self.DCAmp.trace('w',self.changeDCOutput)
self.portOut.trace('w',self.changeACWaves)
self.portACIn.trace('w',self.changeACWaves)
self.ACFreq.trace('w',self.changeACWaves)
self.ACAmp.trace('w',self.changeACWaves)
self.sampRate.trace('w',self.changeACWaves)
self.averages = tk.IntVar()
self.totalAverages = tk.IntVar()
self.averages.set(0)
self.totalAverages.set(1)
self.averaging = False
self.VAverages = 0
self.IAverages = 0
self.savePath.set('.')
def initializeWindow(self):
"""Creates the GUI."""
root = self
#set up window
root.wm_title('Measure IV')
root.title('Measure IV')
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
#root.geometry("%dx%d+0+0" % (w/2, 0.9*h))
leftFrame = tk.Frame(root)
leftFrame.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
rightFrame = tk.Frame(root, width=600)
rightFrame.pack(side=tk.LEFT)
### LEFT SIDE ###
#notes box
self.comments = tk.Text(master=leftFrame, height=5)
self.comments.pack(side=tk.TOP, fill=tk.X)
# IV plot
self.fig = pylab.figure()
self.ax = self.fig.add_subplot(111)
self.ax.set_title('Realtime IV Measurement')
# self.ax.set_xlabel('Time')
# self.ax.set_ylabel('Temparture [K]')
self.plotPoints, = self.ax.plot([],[],'-')
self.canvas = FigureCanvasTkAgg(self.fig, master=leftFrame)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
#temp plot toolbar at bottom
self.toolbar = NavigationToolbar2TkAgg( self.canvas, leftFrame )
self.toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
# this/is/a/long/file/path/[name].iv
fileFrame = tk.Frame(leftFrame)
fileFrame.pack(side=tk.TOP)
tk.Button(master=fileFrame,text='Select Path',command=self.chooseSaveDirectory).pack(side=tk.LEFT)
tk.Label(fileFrame,textvariable=self.savePath).pack(side=tk.LEFT)
tk.Label(fileFrame,text='/').pack(side=tk.LEFT)
tk.Entry(fileFrame, width=10, textvariable=self.fileName).pack(side=tk.LEFT)
tk.Label(fileFrame,text="_#.iv").pack(side=tk.LEFT)
# (Average and Save||Cancel Averaging) Averages: 0/[#]
averageFrame = tk.Frame(leftFrame)
averageFrame.pack(side=tk.TOP)
self.avgButton = tk.Button(master=averageFrame,text='Average',command=self.averageAndSave)
self.avgButton.pack(side=tk.LEFT)
tk.Label(averageFrame,text="Averages: ").pack(side=tk.LEFT)
tk.Label(averageFrame,textvariable=self.averages).pack(side=tk.LEFT)
tk.Label(averageFrame,text=" / ").pack(side=tk.LEFT)
tk.Entry(averageFrame, width=8, textvariable=self.totalAverages).pack(side=tk.LEFT)
#self.fig.tight_layout()
### RIGHT SIDE ###
self.measurementTabs = ttk.Notebook(rightFrame, width=600)
self.measurementTabs.pack(side=tk.TOP)
frame2wire = ttk.Frame(self.measurementTabs)
frame3wire = ttk.Frame(self.measurementTabs)
frame4wire = ttk.Frame(self.measurementTabs)
frameVPhi = ttk.Frame(self.measurementTabs)
self.measurementTabs.add(frame2wire, text='2-Wire')
self.measurementTabs.add(frame3wire, text='3-Wire')
self.measurementTabs.add(frame4wire, text='4-Wire')
self.measurementTabs.add(frameVPhi, text='V-Phi')
bgimg = tk.PhotoImage(file="TwoWire.gif")
bglabel2 = tk.Label(frame2wire, image=bgimg)
bglabel2.image = bgimg
bglabel2.pack()
bgimg = tk.PhotoImage(file="ThreeWire.gif")
bglabel3 = tk.Label(frame3wire, image=bgimg)
bglabel3.image = bgimg
bglabel3.pack()
bgimg = tk.PhotoImage(file="FourWire.gif")
bglabel4 = tk.Label(frame4wire, image=bgimg)
bglabel4.image = bgimg
bglabel4.pack()
bgimg = tk.PhotoImage(file="VPhi.gif")
bglabelVPhi = tk.Label(frameVPhi, image=bgimg)
bglabelVPhi.image = bgimg
bglabelVPhi.pack()
tk.OptionMenu(frame2wire, self.portACIn, 0,1).place(relx=70/597., rely=261/578., anchor=tk.CENTER)
tk.Entry(frame2wire, width=8, textvariable=self.ACFreq).place(relx=70/597., rely=285/578., anchor=tk.CENTER)
tk.Entry(frame2wire, width=8, textvariable=self.ACAmp).place(relx=70/597., rely=308/578., anchor=tk.CENTER)
tk.Entry(frame2wire, width=8, textvariable=self.RACIn).place(relx=305/597., rely=176/578., anchor=tk.CENTER)
tk.Entry(frame2wire, width=8, textvariable=self.amp).place(relx=450/597., rely=335/578., anchor=tk.CENTER)
tk.OptionMenu(frame2wire, self.portOut, 0,1,2,3,4,5,6,7).place(relx=531/597., rely=410/578., anchor=tk.CENTER)
tk.OptionMenu(frame3wire, self.portACIn, 0,1).place(relx=86/597., rely=58/578., anchor=tk.CENTER)
tk.Entry(frame3wire, width=8, textvariable=self.ACFreq).place(relx=86/597., rely=81/578., anchor=tk.CENTER)
tk.Entry(frame3wire, width=8, textvariable=self.ACAmp).place(relx=86/597., rely=105/578., anchor=tk.CENTER)
tk.Entry(frame3wire, width=8, textvariable=self.RACIn).place(relx=144/597., rely=176/578., anchor=tk.CENTER)
tk.Entry(frame3wire, width=8, textvariable=self.ROut).place(relx=405/597., rely=176/578., anchor=tk.CENTER)
tk.Entry(frame3wire, width=8, textvariable=self.amp).place(relx=411/597., rely=35/578., anchor=tk.CENTER)
tk.OptionMenu(frame3wire, self.portOut, 0,1,2,3,4,5,6,7).place(relx=545/597., rely=80/578., anchor=tk.CENTER)
tk.OptionMenu(frame4wire, self.portACIn, 0,1).place(relx=41/597., rely=158/578., anchor=tk.CENTER)
tk.Entry(frame4wire, width=8, textvariable=self.ACFreq).place(relx=41/597., rely=182/578., anchor=tk.CENTER)
tk.Entry(frame4wire, width=8, textvariable=self.ACAmp).place(relx=41/597., rely=205/578., anchor=tk.CENTER)
tk.Entry(frame4wire, width=8, textvariable=self.RACIn).place(relx=38/597., rely=268/578., anchor=tk.CENTER)
tk.Entry(frame4wire, width=8, textvariable=self.ROut).place(relx=220/597., rely=268/578., anchor=tk.CENTER)
tk.OptionMenu(frame4wire, self.portOut, 0,1,2,3,4,5,6,7).place(relx=551/597., rely=94/578., anchor=tk.CENTER)
tk.OptionMenu(frameVPhi, self.portDCIn, 0,1).place(relx=94/597., rely=80/578., anchor=tk.CENTER)
tk.OptionMenu(frameVPhi, self.portACIn, 0,1).place(relx=34/597., rely=194/578., anchor=tk.CENTER)
tk.Entry(frameVPhi, width=8, textvariable=self.ACFreq).place(relx=34/597., rely=218/578., anchor=tk.CENTER)
tk.Entry(frameVPhi, width=8, textvariable=self.ACAmp).place(relx=34/597., rely=241/578., anchor=tk.CENTER)
tk.Entry(frameVPhi, width=8, textvariable=self.DCAmp).place(relx=94/597., rely=105/578., anchor=tk.CENTER)
tk.Entry(frameVPhi, width=8, textvariable=self.RDCIn).place(relx=144/597., rely=156/578., anchor=tk.CENTER)
tk.Entry(frameVPhi, width=8, textvariable=self.RACIn).place(relx=94/597., rely=306/578., anchor=tk.CENTER)
tk.Entry(frameVPhi, width=8, textvariable=self.ROut).place(relx=405/597., rely=156/578., anchor=tk.CENTER)
tk.Entry(frameVPhi, width=8, textvariable=self.amp).place(relx=411/597., rely=36/578., anchor=tk.CENTER)
tk.OptionMenu(frameVPhi, self.portOut, 0,1,2,3,4,5,6,7).place(relx=545/597., rely=80/578., anchor=tk.CENTER)
self.measurementTabs.select(1)
self.currentTab = 1
self.measurementTabs.bind_all("<<NotebookTabChanged>>", self.tabChangedEvent)
root.protocol("WM_DELETE_WINDOW", self._quit) #X BUTTON
def tabChangedEvent(self, event):
tabid = self.measurementTabs.select()
self.currentTab = self.measurementTabs.index(tabid)
def genWave(self, amp, freq):
"""
Creates an output wave vector.
Returns wave in np.float64 array.
"""
# Setting freq to 0 is just a DC output.
# Number of samples doesn't really matter in
# that case, so just set to the sample rate.
if freq is not 0:
samps = int(float(self.sampRate.get()) / freq)
else:
samps = int(self.sampRate.get())
# Generate empty wave of correct size
wave = np.zeros((samps,),dtype=np.float64)
# Sample the wave at sampRate. Use cos such
# that the case of freq=0 will return DC amp.
for n in range(samps):
wave[n] = amp * np.cos(2*np.pi*n/samps)
# Return the wave to the caller
return wave
def initializeACWaves(self):
try:
writeBuf = self.genWave(self.ACAmp.get(), self.ACFreq.get())
self.waveInput = ni.CallbackTask()
self.waveInput.configureCallbackTask("Dev1/ai"+str(self.portOut.get()),
self.sampRate.get(),
len(writeBuf))
self.waveInput.setCallback(self.updateData)
triggerName = self.waveInput.getTrigName()
self.waveOutput = ni.acAnalogOutputTask()
self.waveOutput.configureAcAnalogOutputTask("Dev1/ao"+str(self.portACIn.get()),
self.sampRate.get(),
writeBuf,
trigName=triggerName)
self.waveOutput.StartTask()
self.waveInput.StartTask()
print "started AC waves"
except ValueError:
pass #invalid value often happens before typing has fully finished
except Exception as e:
print 'Error initializing wave output:\n' + str(e)
def initializeDCWave(self):
try:
self.DCOutput = ni.dcAnalogOutputTask()
self.DCOutput.configureDcAnalogOutputTask("Dev1/ao"+str(self.portDCIn.get()),self.DCAmp.get())
self.DCOutput.StartTask()
print "started DC output"
except ValueError:
pass #invalid value often happens before typing has fully finished
except Exception as e:
print 'Error initializing DC output:\n' + str(e)
def updateData(self, data):
try: self.ACAmp.get(), self.ACFreq.get()
except ValueError: return
self.cond.acquire()
try: newdata = data
except Exception as e:
print 'failed to aquire data'
newdata = []
#this is dummy data. Uncomment the line above
#newdata = [10 * np.random.random_sample(10),10* np.random.random_sample(10)]
currents = np.array(self.genWave(self.ACAmp.get(),self.ACFreq.get()))
voltages = np.array(newdata)
#print 'tab id',self.measurementTabs.select(), self.measurementTabs.index(self.measurementTabs.select())
#tabid = self.measurementTabs.select()
currentTab = self.currentTab#measurementTabs.index(tabid)
self.ax.set_xlabel('Voltage [V]')
self.ax.set_ylabel('Current [A]')
if currentTab == 0: # 2 wire
try:
currents = (currents-voltages/self.amp.get())/self.RACIn.get()
voltages = voltages/self.amp.get()
currents, voltages = voltages, currents
self.ax.set_xlabel('Current [A]')
self.ax.set_ylabel('Voltage [V]')
except ValueError: pass # in case the fields have bad values or are not finished typing
elif currentTab == 1: # 3 wire
try:
currents = currents/self.RACIn.get()/1000
voltages = voltages/self.amp.get()
except ValueError: pass
elif currentTab == 2: # 4 wire
try:
currents = currents/(self.RACIn.get() + self.ROut.get())/1000
voltages = voltages/self.amp.get()
except ValueError: pass
elif currentTab == 3: # V-Phi
try:
currents = currents/self.RACIn.get()/1000
voltages = voltages/self.amp.get()
currents, voltages = voltages, currents
self.ax.set_xlabel('$\Phi/L$ [A]')
self.ax.set_ylabel('Voltage [V]')
except ValueError: pass
# average data if selected
if self.averaging is True and self.averages.get() < self.totalAverages.get():
self.VAverages = (self.VAverages*self.averages.get() + voltages)/(self.averages.get()+1.)
self.IAverages = (self.IAverages*self.averages.get() + currents)/(self.averages.get()+1.)
self.averages.set(self.averages.get()+1)
if self.averages.get() == self.totalAverages.get(): # save and re-initialize
self.saveAveragedData()
self.cancelAveraging()
else:
self.VAverages = voltages
self.IAverages = currents
self.plotPoints.set_xdata(self.VAverages)
self.plotPoints.set_ydata(self.IAverages)
self.cond.notify()
self.cond.release()
self.ax.relim()
self.ax.autoscale_view()
self.fig.canvas.draw()
def averageAndSave(self):
self.averaging = True
self.avgButton.config(text="Cancel Averaging",command=self.cancelAveraging)
def cancelAveraging(self):
self.averaging = False
self.averages.set( 0 )
if self.savePath.get() != '.': btntext = 'Average and Save'
else: btntext = 'Average'
self.avgButton.config(text=btntext,command=self.averageAndSave)
def parametersAsText(self):
measurement = ['2-wire','3-wire','4-wire','v-phi'][self.currentTab]
eqn2 = ''
eqn3 = ''
eqn4 = ''
eqnvphi = ''
params = {
'Measurement Type': measurement,
'AC Resistance In [kOhms]': self.RACIn.get(),
'DC Resistance In [kOhms]': self.RDCIn.get(),
'Output Resistance [kOhms]': self.ROut.get(),
'AC Generator Port': self.portACIn.get(),
'DC Generator Port': self.portDCIn.get(),
'Signal Read Port': self.portOut.get(),
'Apmlification': self.amp.get(),
'Generator Frequency [Hz]': self.ACFreq.get(),
'AC Generator Amplitude [V]': self.ACAmp.get(),
'DC Generator Amplitude [V]': self.DCAmp.get(),
'Sampling Rate [Hz]': self.sampRate.get(),
'Number of Averages': self.totalAverages.get(),
'Equation': [eqn2,eqn3,eqn4,eqnvphi][self.currentTab],
'Comments': self.comments.get(1.0, tk.END)
}
textParams = '\n'.join( [k + ': ' + str(v) for k,v in params.items()] )
return textParams+'\n'
def saveAveragedData(self):
if self.savePath.get() != '.':
i = 1
while True:
fullSavePath = os.path.join(self.savePath.get(),(self.fileName.get()+'_%03d.iv'%i))
if not os.path.exists(fullSavePath): break
i += 1
with open(fullSavePath,'a') as f:
dataToSave = np.transpose(np.asarray([self.IAverages,self.VAverages]))
f.write( self.parametersAsText() )
np.savetxt(f,dataToSave)
def chooseSaveDirectory(self):
chooseDirOpts = {}
currentTab = self.currentTab
if currentTab == 0:
chooseDirOpts['initialdir'] = 'Z:\\mcdermott-group\\Data\\Suttle Data\\Nb\\Gen5D\\08122016PE600\\IV'
else:
chooseDirOpts['initialdir'] = self.savePath.get()
chooseDirOpts['mustexist'] = True
chooseDirOpts['title'] = 'Choose base data directory...'
self.savePath.set( tkFileDialog.askdirectory(**chooseDirOpts) )
self.avgButton.config(text="Average and Save")
def changeACWaves(self,*args):
"""This should be called (by a listener) every time any of the BNC output port variables change."""
try:
self.waveOutput.StopTask()
self.waveOutput.ClearTask()
self.waveInput.StopTask()
self.waveInput.ClearTask()
except: print 'failed to end wave'
# if port is changed, we should automatically switch AC and DC ports
if self.portACIn.get() == self.portDCIn.get():
self.portDCIn.set((self.portACIn.get()+1)%2)
try:
self.ACAmp.get(), self.ACFreq.get() #raise error if cell is not valid float
self.initializeACWaves()
except ValueError: pass # if cell is not valid float
except Exception as e: print 'failed to start wave', str(e)
def changeDCOutput(self,*args):
try:
self.DCOutput.StopTask()
self.DCOutput.ClearTask()
except: print 'failed to end DC wave'
# if port is changed, we should automatically switch AC and DC ports
if self.portACIn.get() == self.portDCIn.get():
self.portACIn.set((self.portDCIn.get()+1)%2)
try:
self.DCAmp.get() # raise error if cell is not valid float
self.initializeDCWave()
except ValueError: pass # if cell is not valid float
except Exception as e: print 'failed to start DC wave', str(e)
def _quit(self):
""" called when the window is closed."""
self.ACAmp.set(0)
self.DCAmp.set(0)
self.running = False
self.quit() # stops mainloop
self.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
self.waveOutput.StopTask()
self.waveOutput.ClearTask()
self.waveInput.StopTask()
self.waveInput.ClearTask()
self.DCOutput.StopTask()
self.DCOutput.ClearTask()
#os._exit(1)
if __name__ == "__main__":
app = MeasureIV(None)
app.title("Measure IV")
app.mainloop() | gpl-2.0 | -3,675,403,846,407,559,000 | 44.745995 | 118 | 0.601501 | false |
Cysu/Person-Reid | reid/optimization/sgd.py | 1 | 10831 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
import glob
import time
import cPickle
import numpy
import theano
import theano.tensor as T
def train(evaluator, datasets, learning_rate=1e-4, momentum=0.9,
batch_size=10, n_epoch=100,
improvement=1-1e-3, patience_incr=2.0, learning_rate_decr=0.95,
never_stop=False):
"""Train model with batched Stochastic Gradient Descent(SGD) algorithm
Args:
evaluator: An Evaluator object that provides cost, updates and error
datasets: A Dataset object that provides training, validation and
testing data
learning_rate: The initial learning rate
momentum: The coefficient of momentum term
batch_size: The batch size
n_epoch: The number of epoch
improvement, patience_incr, learning_rate_decr:
If ``current_valid_error < best_valid_error * improvement``,
the patience will be updated to ``current_iter * patience_incr``,
and the learning_rate will be updated to
``current_learning_rate * learning_rate_decr``.
never_stop: When set to True, the training will not stop until user
interrupts. Otherwise, the training will stop either when all
the epoch finishes or the patience is consumed.
"""
# Setup parameters
n_train_batches = (datasets.train_x.get_value(borrow=True).shape[0]-1) // batch_size + 1
n_valid_batches = (datasets.valid_x.get_value(borrow=True).shape[0]-1) // batch_size + 1
n_test_batches = (datasets.test_x.get_value(borrow=True).shape[0]-1) // batch_size + 1
# Setup training, validation and testing functions
X = T.matrix('X') # input data
Y = T.matrix('Y') # corresponding targets
i = T.lscalar('i') # batch index
alpha = T.scalar('alpha') # learning rate
dummy = T.scalar('dummy') # for param update
# Compute the cost, updates and error
cost, inc_updates, param_updates = evaluator.get_cost_updates(X, Y, alpha, momentum)
error = evaluator.get_error(X, Y)
# Build training, validation and testing functions
inc_update_func = theano.function(
inputs=[i, alpha], outputs=cost, updates=inc_updates,
givens={
X: datasets.train_x[i*batch_size : (i+1)*batch_size],
Y: datasets.train_y[i*batch_size : (i+1)*batch_size]
})
param_update_func = theano.function(
inputs=[dummy], outputs=dummy, updates=param_updates)
valid_func = theano.function(
inputs=[i], outputs=error,
givens={
X: datasets.valid_x[i*batch_size : (i+1)*batch_size],
Y: datasets.valid_y[i*batch_size : (i+1)*batch_size]
})
test_func = theano.function(
inputs=[i], outputs=error,
givens={
X: datasets.test_x[i*batch_size : (i+1)*batch_size],
Y: datasets.test_y[i*batch_size : (i+1)*batch_size]
})
# Start training
best_valid_error = numpy.inf
test_error = numpy.inf
valid_freq = n_train_batches
patience = 20 * n_train_batches
done_looping = False
print "Start training ..."
begin_time = time.clock()
for epoch in xrange(n_epoch):
print "epoch {0}".format(epoch)
if done_looping: break
try:
for j in xrange(n_train_batches):
cur_iter = epoch * n_train_batches + j
# train
batch_cost = inc_update_func(j, learning_rate)
param_update_func(0)
print "[train] batch {0}/{1}, iter {2}, cost {3}".format(
j+1, n_train_batches, cur_iter, batch_cost)
# validate
if (cur_iter + 1) % valid_freq == 0:
valid_error = numpy.mean(
[valid_func(k) for k in xrange(n_valid_batches)])
print "[valid] error {0}".format(valid_error)
if type(valid_error) is numpy.ndarray:
valid_error = valid_error.mean()
# test
if valid_error < best_valid_error:
if valid_error < best_valid_error * improvement:
patience = max(patience, cur_iter * patience_incr)
learning_rate = learning_rate * learning_rate_decr
print "Update patience {0}, learning_rate {1}".format(
patience, learning_rate)
best_valid_error = valid_error
test_error = numpy.mean(
[test_func(k) for k in xrange(n_test_batches)])
print "[test] error {0}".format(test_error)
# early stoping
if cur_iter > patience and not never_stop:
done_looping = True
break
except KeyboardInterrupt:
print "Keyboard interrupt. Stop training."
done_looping = True
end_time = time.clock()
print "Training complete, time {0}".format(end_time - begin_time)
print "Best validation error {0}, test error {1}".format(
best_valid_error, test_error)
def train_batch(evaluator, batch_dir, learning_rate=1e-4, momentum=0.9,
batch_size=10, n_epoch=100,
improvement=1-1e-3, patience_incr=2.0, learning_rate_decr=0.95,
never_stop=False):
"""Train model with batched Stochastic Gradient Descent(SGD) algorithm
Args:
evaluator: An Evaluator object that provides cost, updates and error
datasets: A Dataset object that provides training, validation and
testing data
learning_rate: The initial learning rate
momentum: The coefficient of momentum term
batch_size: The batch size
n_epoch: The number of epoch
improvement, patience_incr, learning_rate_decr:
If ``current_valid_error < best_valid_error * improvement``,
the patience will be updated to ``current_iter * patience_incr``,
and the learning_rate will be updated to
``current_learning_rate * learning_rate_decr``.
never_stop: When set to True, the training will not stop until user
interrupts. Otherwise, the training will stop either when all
the epoch finishes or the patience is consumed.
"""
train_files = glob.glob(os.path.join(batch_dir, 'train_*.pkl'))
valid_files = glob.glob(os.path.join(batch_dir, 'valid_*.pkl'))
test_files = glob.glob(os.path.join(batch_dir, 'test_*.pkl'))
train_files.sort()
valid_files.sort()
test_files.sort()
# Setup parameters
n_train_batches = len(train_files)
n_valid_batches = len(valid_files)
n_test_batches = len(test_files)
# Setup training, validation and testing functions
X = T.matrix('X') # input data
Y = T.matrix('Y') # corresponding targets
alpha = T.scalar('alpha') # learning rate
dummy = T.scalar('dummy') # for param update
# Compute the cost, updates and error
gpu_X = theano.shared(numpy.zeros((batch_size, 57600), dtype='float32'), borrow=True)
gpu_Y = theano.shared(numpy.zeros((batch_size, 107), dtype='float32'), borrow=True)
cost, inc_updates, param_updates = evaluator.get_cost_updates(X, Y, alpha, momentum)
error = evaluator.get_error(X, Y)
def set_gpu(fn):
with open(fn, 'rb') as f:
cpu_X, cpu_Y = cPickle.load(f)
gpu_X.set_value(cpu_X, borrow=True)
gpu_Y.set_value(cpu_Y, borrow=True)
# Build training, validation and testing functions
inc_update_func = theano.function(
inputs=[alpha], outputs=cost, updates=inc_updates,
givens={
X: gpu_X,
Y: gpu_Y
})
param_update_func = theano.function(
inputs=[dummy], outputs=dummy, updates=param_updates)
valid_func = theano.function(
inputs=[], outputs=error,
givens={
X: gpu_X,
Y: gpu_Y
})
test_func = theano.function(
inputs=[], outputs=error,
givens={
X: gpu_X,
Y: gpu_Y
})
# Start training
best_valid_error = numpy.inf
test_error = numpy.inf
valid_freq = n_train_batches
patience = 20 * n_train_batches
done_looping = False
print "Start training ..."
begin_time = time.clock()
for epoch in xrange(n_epoch):
print "epoch {0}".format(epoch)
if done_looping: break
try:
for j in xrange(n_train_batches):
cur_iter = epoch * n_train_batches + j
# train
set_gpu(train_files[j])
batch_cost = inc_update_func(learning_rate)
param_update_func(0)
print "[train] batch {0}/{1}, iter {2}, cost {3}".format(
j+1, n_train_batches, cur_iter, batch_cost)
# validate
if (cur_iter + 1) % valid_freq == 0:
valid_error = []
for k in xrange(n_valid_batches):
set_gpu(valid_files[k])
valid_error.append(valid_func())
valid_error = numpy.mean(valid_error)
print "[valid] error {0}".format(valid_error)
if type(valid_error) is numpy.ndarray:
valid_error = valid_error.mean()
# test
if valid_error < best_valid_error:
if valid_error < best_valid_error * improvement:
patience = max(patience, cur_iter * patience_incr)
learning_rate = learning_rate * learning_rate_decr
print "Update patience {0}, learning_rate {1}".format(
patience, learning_rate)
best_valid_error = valid_error
test_error = []
for k in xrange(n_test_batches):
set_gpu(test_files[k])
test_error.append(test_func())
test_error = numpy.mean(test_error)
print "[test] error {0}".format(test_error)
# early stoping
if cur_iter > patience and not never_stop:
done_looping = True
break
except KeyboardInterrupt:
print "Keyboard interrupt. Stop training."
done_looping = True
end_time = time.clock()
print "Training complete, time {0}".format(end_time - begin_time)
print "Best validation error {0}, test error {1}".format(
best_valid_error, test_error) | mit | -6,390,550,585,923,986,000 | 34.986711 | 92 | 0.559782 | false |
cisco-openstack/tempest | tempest/lib/services/compute/security_group_rules_client.py | 3 | 1912 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import \
security_groups as schema
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class SecurityGroupRulesClient(base_compute_client.BaseComputeClient):
def create_security_group_rule(self, **kwargs):
"""Create a new security group rule.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/compute/#create-security-group-rule
"""
post_body = json.dumps({'security_group_rule': kwargs})
url = 'os-security-group-rules'
resp, body = self.post(url, post_body)
body = json.loads(body)
self.validate_response(schema.create_security_group_rule, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_security_group_rule(self, group_rule_id):
"""Deletes the provided Security Group rule."""
resp, body = self.delete('os-security-group-rules/%s' %
group_rule_id)
self.validate_response(schema.delete_security_group_rule, resp, body)
return rest_client.ResponseBody(resp, body)
| apache-2.0 | -9,056,867,639,184,239,000 | 41.488889 | 78 | 0.698222 | false |
iulian787/spack | var/spack/repos/builtin/packages/httping/package.py | 2 | 1360 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Httping(AutotoolsPackage):
"""Httping is like 'ping' but for http-requests. Give it an url,
and it'll show you how long it takes to connect, send a request
and retrivee the reply(only the headers), Be aware that the
transmission across the network also takes time! So it measures
the latency. of the webserver + network. It supports, of course,
IPv6. httping was analyzed by Coverity Scan for software defects. """
homepage = "http://www.vanheusden.com/httping/"
url = "https://github.com/flok99/httping/archive/2.5.tar.gz"
version('2.5', sha256='2ad423097fa7a0d2d20a387050e34374326a703dddce897e152a8341e47ea500')
version('2.3.4', sha256='45ed71a72fd8c9c3975e49706c739395f75e3977b91f96e7e25652addfa0f242')
version('2.3.3', sha256='b76ec14cb4f6cd29b60a974254f4be37ed721c1660ecde9f6aac516ba521ab86')
version('2.3.1', sha256='90e86ca98f6c6bd33bd23a0eeda6f994dd8d147971d402da2733746c9b6ee61c')
version('2.3', sha256='5d87e59e5d9e216346769471b581f289eac5e49cfc969407c199761367553ca8')
def install(self, spec, prefix):
make('install', 'PREFIX={0}'.format(prefix))
| lgpl-2.1 | -3,384,257,749,615,424,500 | 49.37037 | 95 | 0.754412 | false |
pep-dortmund/mindstorms | python-examples/tankbot/read_sensors.py | 1 | 1270 | # -*- ecoding: utf-8 -*-
'''
This Program just shows the sensor values of robot
It works for the TankBot configuration we used in the school
'''
from __future__ import print_function, division, unicode_literals
from ev3.ev3dev import Motor
from ev3.lego import GyroSensor, UltrasonicSensor, ColorSensor
from time import sleep
from blessings import Terminal
term = Terminal()
# Setup sensors
gyro = GyroSensor(port=4)
gyro.start_value = gyro.ang
sonic_sensor = UltrasonicSensor(port=3)
color_sensor = ColorSensor(port=1)
def print_sensor_values():
angle = '{:5d}'.format(gyro.ang - gyro.start_value)
distance = '{:3.1f}'.format(sonic_sensor.dist_cm)
color = '{}'.format(color_sensor.colors[color_sensor.color])
rgb = color_sensor.rgb
print(term.move(0, 0) + '{:<10} = {:>5}°'.format('Winkel', angle))
print(term.move(1, 0) + '{:<10} = {:>5} cm'.format('Abstand', distance))
print(term.move(2, 0) + '{:<10} = {:<10}'.format('Farbe', color))
print(term.move(3, 0) + '{:<10} = {:03d} {:03d} {:03d}'.format('RGB', *rgb))
if __name__ == '__main__':
try:
with term.fullscreen():
while True:
print_sensor_values()
sleep(0.01)
except KeyboardInterrupt, SystemExit:
pass
| mit | -8,042,733,166,512,992,000 | 29.95122 | 80 | 0.631994 | false |
nnmware/nnmware | core/constants.py | 1 | 3845 | # nnmware(c)2012-2020
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
STATUS_UNKNOWN = 0
STATUS_DELETE = 1
STATUS_LOCKED = 2
STATUS_PUBLISHED = 3
STATUS_STICKY = 4
STATUS_MODERATION = 5
STATUS_DRAFT = 6
STATUS_CHOICES = (
(STATUS_UNKNOWN, _('Unknown')),
(STATUS_DELETE, _('Deleted')),
(STATUS_LOCKED, _('Locked')),
(STATUS_PUBLISHED, _('Published')),
(STATUS_STICKY, _('Sticky')),
(STATUS_MODERATION, _('Moderation')),
(STATUS_DRAFT, _('Draft')),
)
CONTENT_UNKNOWN = 0
CONTENT_TEXT = 1
CONTENT_IMAGE = 2
CONTENT_VIDEO = 3
CONTENT_CODE = 4
CONTENT_QUOTE = 5
CONTENT_URL = 6
CONTENT_RAW = 7
CONTENT_CHOICES = (
(CONTENT_UNKNOWN, _('Unknown')),
(CONTENT_TEXT, _('Text')),
(CONTENT_IMAGE, _('Image')),
(CONTENT_VIDEO, _('Video')),
(CONTENT_CODE, _('Code')),
(CONTENT_QUOTE, _('Quote')),
(CONTENT_URL, _('Url')),
(CONTENT_RAW, _('Raw input')),
)
NOTICE_UNKNOWN = 0
NOTICE_SYSTEM = 1
NOTICE_VIDEO = 2
NOTICE_TAG = 3
NOTICE_ACCOUNT = 4
NOTICE_PROFILE = 5
NOTICE_CHOICES = (
(NOTICE_UNKNOWN, _('Unknown')),
(NOTICE_SYSTEM, _('System')),
(NOTICE_VIDEO, _('Video')),
(NOTICE_TAG, _('Tag')),
(NOTICE_ACCOUNT, _('Account')),
(NOTICE_PROFILE, _('Profile')),
)
ACTION_UNKNOWN = 0
ACTION_SYSTEM = 1
ACTION_ADDED = 2
ACTION_COMMENTED = 3
ACTION_FOLLOWED = 4
ACTION_LIKED = 5
ACTION_EDITED = 6
ACTION_DELETED = 7
ACTION_UPDATED = 8
ACTION_CHOICES = (
(ACTION_UNKNOWN, _('Unknown')),
(ACTION_SYSTEM, _('System')),
(ACTION_ADDED, _('Added')),
(ACTION_COMMENTED, _('Commented')),
(ACTION_FOLLOWED, _('Followed')),
(ACTION_LIKED, _('Liked')),
(ACTION_EDITED, _('Edited')),
(ACTION_DELETED, _('Deleted')),
(ACTION_UPDATED, _('Updated ')),
)
GENDER_CHOICES = (('F', _('Female')), ('M', _('Male')), ('N', _('None')))
CONTACT_UNKNOWN = 0
CONTACT_MOBILE_PERSONAL = 1
CONTACT_MOBILE_WORK = 2
CONTACT_LANDLINE_PERSONAL = 3
CONTACT_LANDLINE_WORK = 4
CONTACT_MAIL_WORK = 5
CONTACT_MAIL_PERSONAL = 6
CONTACT_WEBSITE_WORK = 7
CONTACT_WEBSITE_PERSONAL = 8
CONTACT_ICQ = 9
CONTACT_SKYPE = 10
CONTACT_JABBER = 11
CONTACT_FACEBOOK = 12
CONTACT_GOOGLEPLUS = 13
CONTACT_VK = 14
CONTACT_ODNOKLASSNIKI = 15
CONTACT_TWITTER = 16
CONTACT_MOIKRUG = 17
CONTACT_GITHUB = 18
CONTACT_BITMESSAGE = 19
CONTACT_LINKEDIN = 20
CONTACT_TELEGRAM = 21
CONTACT_OTHER_SOCIAL = 99
CONTACTS_CHOICES = (
(CONTACT_UNKNOWN, _('Unknown')),
(CONTACT_MOBILE_PERSONAL, _('Personal mobile phone')),
(CONTACT_MOBILE_WORK, _('Work mobile phone')),
(CONTACT_LANDLINE_PERSONAL, _('Personal landline phone')),
(CONTACT_LANDLINE_WORK, _('Work landline phone')),
(CONTACT_MAIL_WORK, _('Public email')),
(CONTACT_MAIL_PERSONAL, _('Private email')),
(CONTACT_WEBSITE_WORK, _('Work website')),
(CONTACT_WEBSITE_PERSONAL, _('Personal website')),
(CONTACT_ICQ, _('ICQ')),
(CONTACT_SKYPE, _('Skype')),
(CONTACT_JABBER, _('Jabber')),
(CONTACT_FACEBOOK, _('Facebook')),
(CONTACT_GOOGLEPLUS, _('Google+')),
(CONTACT_VK, _('VKontakte')),
(CONTACT_ODNOKLASSNIKI, _('Odnoklassniki')),
(CONTACT_TWITTER, _('Twitter')),
(CONTACT_MOIKRUG, _('Moikrug')),
(CONTACT_GITHUB, _('GitHub')),
(CONTACT_BITMESSAGE, _('BitMessage')),
(CONTACT_LINKEDIN, _('LinkedIn')),
(CONTACT_TELEGRAM, _('Telegram')),
(CONTACT_OTHER_SOCIAL, _('Other social network')),
)
SKILL_UNKNOWN = 0
SKILL_FAN = 1
SKILL_PRO = 2
SKILL_CHOICES = (
(SKILL_UNKNOWN, _("Unknown")),
(SKILL_FAN, _("Fan")),
(SKILL_PRO, _("Pro")),
)
EDU_UNKNOWN = 0
EDU_TRAINING = 1
EDU_MIDDLE = 2
EDU_HIGH = 3
EDU_CHOICES = (
(EDU_UNKNOWN, _("Unknown education")),
(EDU_TRAINING, _("Training course")),
(EDU_MIDDLE, _("Secondary education")),
(EDU_HIGH, _("Higher education")),
)
| gpl-3.0 | 6,167,032,613,762,811,000 | 22.588957 | 73 | 0.623147 | false |
lyx003288/python_test | youzai_test.py | 1 | 5600 | #!/usr/bin/python
#-*- coding: UTF-8 -*-
import sys
import os
import logging
import time
import datetime
import ftplib
import tarfile
import httplib
import json
def generate_file_name(day_str):
year = int(day_str[0:4])
month = int(day_str[5:7])
day = int(day_str[8:10])
cur_date = datetime.date(year, month, day)
next_date = cur_date + datetime.timedelta(days=1)
cur_date_str = cur_date.strftime("%Y%m%d")
next_date_str = next_date.strftime("%Y%m%d")
return "dtl_full_pv_detail"+cur_date_str+"0000-"+next_date_str+"0000.csv"
def generate_upload_file(day_fmt):
'''
生成传输数据
:param day_fmt: 数据日期,格式: YYYY-mm-dd
:return: 传输文件名字
'''
file_name = generate_file_name(day_fmt)
# sql = (r'set hive.cli.print.header=true;'
# 'select sid, uid, vid, pid, peid, req_url, page_title, ref_url, source_type,ext_url,entry_pid,'
# 'entry_url,entry_title,entry_peid,exit_pid, exit_url,exit_title,exit_peid,search_keyword,'
# 'se_charset,utm_name,utm_source,utm_medium,utm_term,utm_content,browser,browser_ver,os,os_ver,'
# 'terminal_type,terminal_maker,terminal_model,screen_width,screen_height,system_platform, '
# 'brower_language as browser_language,b_charset,browser_resolution,ip,ucountry,uregion,ucity,'
# 'pv_starttime,pv_endtime,case when scroll_max is not null then scroll_max else 0 end as scroll_max, '
# 'def01 from ptmind_data.dtl_full_pv_detail where sitetz="E0800" and partdt="' + day_fmt + '" and sid = "49512ddd";')
# cmd = "hive -e '" + sql + "' > " + file_name
cmd = "dir > " + file_name # todo del test
code = os.system(cmd)
if(code == 0):
# 将数据压缩
tar_file_name = file_name + ".tgz"
tar = tarfile.open( tar_file_name, "w:gz")
tar.add(file_name)
tar.close()
os.remove(file_name) # 删除压缩源数据
return tar_file_name
else:
logging.info("generate_upload_file exec code %d, args %s", code, day_fmt)
exit("generate_upload_file error")
def ftp_upload(files):
status = "600 Transfer error"
ftp = ftplib.FTP()
ftp.set_debuglevel(2)
try:
# 打开调试级别2,显示详细信息;0为关闭调试信息
ftp.connect('172.16.100.15', '21')
# 连接
ftp.login('ftpuser', '0459223lyx')
logging.info( ftp.getwelcome() )
# 设置缓冲块大小
bufsize = 1024
# 以读模式在本地打开文件
for file in files:
file_handler = open(file, 'rb')
# 上传文件
try:
status = ftp.storbinary('STOR %s' % os.path.basename(file), file_handler, bufsize)
if (status[0:3] == "226"):
logging.info("upload file %s ok", file)
upload_callback(file)
else:
logging.error("upload %s error, status=%s", file, status)
exit("upload error")
except Exception as e:
logging.error("%s", e)
finally:
file_handler.close()
except Exception as e:
logging.error("%s", e)
finally:
ftp.set_debuglevel(0)
ftp.quit()
return status
def upload_callback(file):
post_params = json.dumps({"file_name": file})
headers = {'Content-type': 'application/json;charset=UTF-8'}
http_client = httplib.HTTPConnection("127.0.0.1", 8890, timeout=5)
http_client.request("POST", "", post_params, headers)
response = http_client.getresponse()
print("head: %s" % response.getheaders())
print("status: %s, reason: %s" % (response.status, response.reason))
content = response.read()
logging.info("upload_callback resp %s", content)
def upload_between_date(start_date, end_date):
date_begin = datetime.date(int(start_date[0:4]), int(start_date[5:7]), int(start_date[8:10]))
date_end = datetime.date(int(end_date[0:4]), int(end_date[5:7]), int(end_date[8:10]))
upload_files = []
for i in range( (date_end-date_begin).days + 1):
day = date_begin + datetime.timedelta(days=i)
file = generate_upload_file(day.__str__())
upload_files.append(file)
ftp_upload(upload_files)
def which_date(argv):
start_date = ""
end_date = ""
argv_len = len(argv)
if(argv_len == 0):
start_date = time.strftime("%Y-%m-%d", time.localtime(time.time()-86400))
end_date = start_date
elif(argv_len == 1):
start_date = argv[0]
end_date = start_date
elif(argv_len == 2):
start_date = argv[0]
end_date = argv[1]
if (is_valid_date(start_date) == True and start_date <= end_date):
return 0, start_date, end_date
return 1, start_date, end_date
def is_valid_date(date_str):
try:
time.strptime(date_str, "%Y-%m-%d")
return True
except:
return False
if(__name__ == "__main__"):
args_num = len(sys.argv)
if( args_num > 3 ):
logging.error("Usage: %s [start_date [end_date]], date format: YYYY-MM-DD", sys.argv[0])
exit("args number error")
# 设置当前脚本所在目录为工作路径
cur_file_path = os.path.split(os.path.realpath(__file__))[0]
os.chdir(cur_file_path)
# 获得上传数据的日期范围
status, start_date, end_date = which_date(sys.argv[1:])
# 准备上传数据
if(status == 0):
upload_between_date(start_date, end_date)
else:
logging.error("args error %s", sys.argv)
| mit | 4,497,413,346,387,905,500 | 34.906667 | 129 | 0.588563 | false |
remvo/zstt-ros | src/gamecontroller_msgs/src/constants.py | 1 | 1047 | from enum import IntEnum
# Socket info.
DEFAULT_LISTENING_HOST = '0.0.0.0'
GAMECONTROLLER_LISTEN_PORT = 3838
GAMECONTROLLER_ANSWER_PORT = 3939
# Game Controller message info.
GAMECONTROLLER_STRUCT_HEADER = b'RGme'
GAMECONTROLLER_STRUCT_VERSION = 12
GAMECONTROLLER_RESPONSE_VERSION = 2
class SPLTeamColor(IntEnum):
BLUE = 0 # cyan, blue, violet
RED = 1 # magenta, pink (not red/orange)
YELLOW = 2 # yellow
BLACK = 3 # black, dark gray
WHITE = 4 # white
GREEN = 5 # green
ORANGE = 6 # orange
PURPLE = 7 # purple, violet
BROWN = 8 # brown
GRAY = 9 # lighter grey
class State(IntEnum):
INITIAL = 0
READY = 1
SET = 2
PLAYING = 3
FINISHED = 4
class State2(IntEnum):
NORMAL = 0
PENALTYSHOOT = 1
OVERTIME = 2
TIMEOUT = 3
DIRECT_FREEKICK = 4
INDIRECT_FREEKICK = 5
PENALTYKICK = 6
DROPBALL = 128
UNKNOWN = 255
class SPLPenalty(IntEnum):
SUBSTITUTE = 14
BALL_MANIPULATION = 30
PUSHING = 31
PICKUP = 34
SERVICE = 35
| apache-2.0 | 3,075,480,213,569,100,300 | 20.8125 | 48 | 0.629417 | false |
faucetsdn/faucet | clib/mininet_test_topo.py | 5 | 26614 | #!/usr/bin/env python3
"""Topology components for FAUCET Mininet unit tests."""
from collections import namedtuple
import os
import socket
import string
import shutil
import subprocess
import time
import netifaces
# pylint: disable=too-many-arguments
from mininet.log import output, warn
from mininet.topo import Topo
from mininet.node import Controller
from mininet.node import CPULimitedHost
from mininet.node import OVSSwitch
from mininet.link import TCIntf, Link
from clib import mininet_test_util
SWITCH_START_PORT = 5
class FaucetIntf(TCIntf):
"""TCIntf that doesn't complain unnecessarily"""
def delete(self):
"""Ignore interface deletion failure;
this is common after a veth pair has been deleted
on the other side."""
self.cmd('ip link del', self.name, '|| true')
self.node.delIntf(self)
self.link = None
class FaucetLink(Link):
"""Link using FaucetIntfs"""
def __init__(self, node1, node2, port1=None, port2=None,
intfName1=None, intfName2=None,
addr1=None, addr2=None, **params):
Link.__init__(self, node1, node2, port1=port1, port2=port2,
intfName1=intfName1, intfName2=intfName2,
cls1=FaucetIntf, cls2=FaucetIntf,
addr1=addr1, addr2=addr2,
params1=params, params2=params)
class FaucetHost(CPULimitedHost):
"""Base Mininet Host class, for Mininet-based tests."""
def create_dnsmasq(self, tmpdir, iprange, router, vlan, interface=None):
"""Start dnsmasq instance inside dnsmasq namespace"""
if interface is None:
interface = self.defaultIntf()
dhcp_leasefile = os.path.join(tmpdir, 'nfv-dhcp-%s-vlan%u.leases' % (self.name, vlan))
log_facility = os.path.join(tmpdir, 'nfv-dhcp-%s-vlan%u.log' % (self.name, vlan))
pid_file = os.path.join(tmpdir, 'dnsmasq-%s-vlan%u.pid' % (self.name, vlan))
cmd = 'dnsmasq'
opts = ''
opts += ' --dhcp-range=%s,255.255.255.0' % iprange
opts += ' --dhcp-sequential-ip'
opts += ' --dhcp-option=option:router,%s' % router
opts += ' --no-resolv --txt-record=does.it.work,yes'
opts += ' --bind-interfaces'
opts += ' --except-interface=lo'
opts += ' --interface=%s' % (interface)
opts += ' --dhcp-leasefile=%s' % dhcp_leasefile
opts += ' --log-facility=%s' % log_facility
opts += ' --pid-file=%s' % pid_file
opts += ' --conf-file='
return self.cmd(cmd + opts)
def return_ip(self):
"""Return host IP as a string"""
return self.cmd('hostname -I')
class VLANHost(FaucetHost):
"""Implementation of a Mininet host on a tagged VLAN."""
intf_root_name = None
vlans = None
vlan_intfs = None
def config(self, vlans=[100], **params): # pylint: disable=arguments-differ
"""Configure VLANHost according to (optional) parameters:
vlans (list): List of VLAN IDs (for the VLANs the host is configured to have) for default interface
vlan_intfs (dict): Dictionary of interface IP addresses keyed by VLAN indices"""
super_config = super().config(**params)
self.vlans = vlans
self.vlan_intfs = {}
cmds = []
intf = self.defaultIntf()
self.intf_root_name = intf.name
if 'vlan_intfs' in params:
vlan_intfs = params.get('vlan_intfs', {})
for vlan_id, ip in vlan_intfs.items():
if isinstance(vlan_id, tuple):
# Interface will take multiply VLAN tagged packets
intf_name = '%s' % intf.name
for vlan_i in vlan_id:
prev_name = intf_name
# Cannot have intf name tu0xy-eth0.VID1.VID2 as that takes up too many bytes
intf_name += '.%s' % vlan_i
cmds.extend([
'ip link add link %s name %s type vlan id %s' % (prev_name, intf_name, vlans[vlan_i]),
'ip link set dev %s up' % (intf_name)
])
self.nameToIntf[intf_name] = intf
self.vlan_intfs.setdefault(vlan_id, [])
self.vlan_intfs[vlan_id].append(intf_name)
cmds.append('ip -4 addr add %s dev %s' % (ip, intf_name))
else:
intf_name = '%s.%s' % (intf, vlans[vlan_id])
cmds.extend([
'vconfig add %s %d' % (intf.name, vlans[vlan_id]),
'ip -4 addr add %s dev %s' % (ip, intf_name),
'ip link set dev %s up' % intf_name])
self.nameToIntf[intf_name] = intf
self.vlan_intfs[vlan_id] = intf_name
else:
vlan_intf_name = '%s.%s' % (intf, '.'.join(str(v) for v in vlans))
cmds.extend([
'ip link set dev %s up' % vlan_intf_name,
'ip -4 addr add %s dev %s' % (params['ip'], vlan_intf_name)])
for v in vlans:
cmds.append('vconfig add %s %d' % (intf, v))
intf.name = vlan_intf_name
self.nameToIntf[vlan_intf_name] = intf
cmds.extend([
'ip -4 addr flush dev %s' % intf,
'ip -6 addr flush dev %s' % intf])
for cmd in cmds:
self.cmd(cmd)
return super_config
class FaucetSwitch(OVSSwitch):
"""Switch that will be used by all tests (netdev based OVS)."""
clist = None
controller_params = {
'controller_burst_limit': 25,
'controller_rate_limit': 100,
}
def __init__(self, name, **params):
self.clist = []
super().__init__(
name=name, reconnectms=8000, **params)
@staticmethod
def _workaround(args):
"""Workarounds/hacks for errors resulting from
cmd() calls within Mininet"""
# Workaround: ignore ethtool errors on tap interfaces
# This allows us to use tap tunnels as cables to switch ports,
# for example to test against OvS in a VM.
if (len(args) > 1 and args[0] == 'ethtool -K' and
getattr(args[1], 'name', '').startswith('tap')):
return True
return False
def cmd(self, *args, success=0, **kwargs):
"""Commands typically must succeed for proper switch operation,
so we check the exit code of the last command in *args.
success: desired exit code (or None to skip check)"""
# pylint: disable=arguments-differ
cmd_output = super().cmd(*args, **kwargs)
exit_code = int(super().cmd('echo $?'))
if success is not None and exit_code != success:
msg = "%s exited with (%d):'%s'" % (args, exit_code, cmd_output)
if self._workaround(args):
warn('Ignoring:', msg, '\n')
else:
raise RuntimeError(msg)
return cmd_output
def attach(self, intf):
"Attach an interface and set its port"
super().attach(intf)
# This should be done in Mininet, but we do it for now
port = self.ports[intf]
self.cmd('ovs-vsctl set Interface', intf, 'ofport_request=%s' % port)
def addController(self, controller):
self.clist.append((
self.name + controller.name, '%s:%s:%d' % (
controller.protocol, controller.IP(), controller.port)))
if self.listenPort:
self.clist.append((self.name + '-listen',
'ptcp:%s' % self.listenPort))
ccmd = '-- --id=@%s create Controller target=\\"%s\\"'
if self.reconnectms:
ccmd += ' max_backoff=%d' % self.reconnectms
for param, value in self.controller_params.items():
ccmd += ' %s=%s' % (param, value)
cargs = ' '.join(ccmd % (name, target)
for name, target in self.clist)
# Controller ID list
cids = ','.join('@%s' % name for name, _target in self.clist)
# One ovs-vsctl command to rule them all!
self.vsctl(cargs +
' -- set bridge %s controller=[%s]' % (self, cids))
def start(self, controllers):
# Transcluded from Mininet source, since need to insert
# controller parameters at switch creation time.
int(self.dpid, 16) # DPID must be a hex string
switch_intfs = [intf for intf in self.intfList() if self.ports[intf] and not intf.IP()]
# Command to add interfaces
intfs = ' '.join(' -- add-port %s %s' % (self, intf) +
self.intfOpts(intf)
for intf in switch_intfs)
# Command to create controller entries
self.clist = [(self.name + c.name, '%s:%s:%d' %
(c.protocol, c.IP(), c.port))
for c in controllers]
if self.listenPort:
self.clist.append((self.name + '-listen',
'ptcp:%s' % self.listenPort))
ccmd = '-- --id=@%s create Controller target=\\"%s\\"'
if self.reconnectms:
ccmd += ' max_backoff=%d' % self.reconnectms
for param, value in self.controller_params.items():
ccmd += ' %s=%s' % (param, value)
cargs = ' '.join(ccmd % (name, target)
for name, target in self.clist)
# Controller ID list
cids = ','.join('@%s' % name for name, _target in self.clist)
# Try to delete any existing bridges with the same name
if not self.isOldOVS():
cargs += ' -- --if-exists del-br %s' % self
# One ovs-vsctl command to rule them all!
self.vsctl(cargs +
' -- add-br %s' % self +
' -- set bridge %s controller=[%s]' % (self, cids) +
self.bridgeOpts() +
intfs)
# switch interfaces on mininet host, must have no IP config.
for intf in switch_intfs:
for ipv in (4, 6):
self.cmd('ip -%u addr flush dev %s' % (ipv, intf))
assert self.cmd('echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6' % intf) == ''
# If necessary, restore TC config overwritten by OVS
if not self.batch:
for intf in self.intfList():
self.TCReapply(intf)
class NoControllerFaucetSwitch(FaucetSwitch):
"""A switch without any controllers (typically for remapping hardware to software."""
def start(self, _controllers):
super().start(controllers=[])
class FaucetSwitchTopo(Topo):
"""FAUCET switch topology that contains a software switch."""
CPUF = 0.5
DELAY = '1ms'
def __init__(self, *args, **kwargs):
self.dpid_names = {} # maps dpids to switch names
self.switch_dpids = {} # maps switch names to dpids
self.switch_ports = {} # maps switch names to port lists
self.dpid_port_host = {} # maps switch hosts to ports
super().__init__(*args, **kwargs)
@staticmethod
def _get_sid_prefix(ports_served):
"""Return a unique switch/host prefix for a test."""
# Linux tools require short interface names.
id_chars = ''.join(sorted(string.ascii_letters + string.digits)) # pytype: disable=module-attr
id_a = int(ports_served / len(id_chars))
id_b = ports_served - (id_a * len(id_chars))
return '%s%s' % (
id_chars[id_a], id_chars[id_b])
def _add_tagged_host(self, sid_prefix, tagged_vids, host_n):
"""Add a single tagged test host."""
host_name = 't%s%1.1u' % (sid_prefix, host_n + 1)
return self.addHost(
name=host_name, cls=VLANHost, vlans=tagged_vids, cpu=self.CPUF)
def _add_untagged_host(self, sid_prefix, host_n, inNamespace=True): # pylint: disable=invalid-name
"""Add a single untagged test host."""
host_name = 'u%s%1.1u' % (sid_prefix, host_n + 1)
return self.addHost(name=host_name, cls=FaucetHost, cpu=self.CPUF, inNamespace=inNamespace)
def _add_extended_host(self, sid_prefix, host_n, e_cls, tmpdir):
"""Add a single extended test host."""
host_name = 'e%s%1.1u' % (sid_prefix, host_n + 1)
return self.addHost(name=host_name, cls=e_cls, host_n=host_n, tmpdir=tmpdir)
def _add_faucet_switch(self, sid_prefix, dpid, hw_dpid, ovs_type):
"""Add a FAUCET switch."""
switch_cls = FaucetSwitch
switch_name = 's%s' % sid_prefix
self.switch_dpids[switch_name] = dpid
self.dpid_names[dpid] = switch_name
if hw_dpid and hw_dpid == dpid:
remap_dpid = str(int(dpid) + 1)
output('bridging hardware switch DPID %s (%x) dataplane via OVS DPID %s (%x)\n' % (
dpid, int(dpid), remap_dpid, int(remap_dpid)))
dpid = remap_dpid
switch_cls = NoControllerFaucetSwitch
return self.addSwitch(
name=switch_name,
cls=switch_cls,
datapath=ovs_type,
dpid=mininet_test_util.mininet_dpid(dpid))
# Hardware switch port virtualization through
# transparent OVS attachment bridge/patch panel
#
# Since FAUCET is talking to the hardware switch, it needs
# to use the hardware switch's OpenFlow ports, rather than
# the OpenFlow ports of the (transparent) OVS attachment bridge.
def hw_remap_port(self, dpid, port):
"""Map OVS attachment bridge port number -> HW port number if necessary"""
if dpid != self.hw_dpid:
return port
assert self.hw_ports
return self.hw_ports[port - self.start_port]
peer_link = namedtuple('peer_link', 'port peer_dpid peer_port')
def hw_remap_peer_link(self, dpid, link):
"""Remap HW port numbers -> OVS port numbers in link if necessary"""
port = self.hw_remap_port(dpid, link.port)
peer_port = self.hw_remap_port(link.peer_dpid, link.peer_port)
return self.peer_link(port, link.peer_dpid, peer_port)
def dpid_ports(self, dpid):
"""Return port list for dpid, remapping if necessary"""
name = self.dpid_names[dpid]
ports = self.switch_ports[name]
return [self.hw_remap_port(dpid, port) for port in ports]
@staticmethod
def extend_port_order(port_order=None, max_length=16):
"""Extend port_order to max_length if needed"""
if not port_order:
port_order = []
return port_order + list(range(len(port_order), max_length + 1))
def _add_links(self, switch, dpid, hosts, links_per_host):
self.switch_ports.setdefault(switch, [])
self.dpid_port_host.setdefault(int(dpid), {})
index = 0
for host in hosts:
for _ in range(links_per_host):
# Order of switch/host is important, since host may be in a container.
port = self.start_port + self.port_order[index]
self.addLink(switch, host, port1=port, delay=self.DELAY, use_htb=True)
# Keep track of switch ports
self.switch_ports.setdefault(switch, [])
self.switch_ports[switch].append(port)
self.dpid_port_host[int(dpid)][port] = host
index += 1
return index
# pylint: disable=too-many-locals,arguments-differ
def build(self, ovs_type, ports_sock, test_name, dpids,
n_tagged=0, tagged_vid=100, n_untagged=0, links_per_host=0,
n_extended=0, e_cls=None, tmpdir=None, hw_dpid=None, switch_map=None,
host_namespace=None, start_port=SWITCH_START_PORT, port_order=None,
get_serialno=mininet_test_util.get_serialno):
if not host_namespace:
host_namespace = {}
self.hw_dpid = hw_dpid # pylint: disable=attribute-defined-outside-init
self.hw_ports = sorted(switch_map) if switch_map else [] # pylint: disable=attribute-defined-outside-init
self.start_port = start_port # pylint: disable=attribute-defined-outside-init
maxlength = n_tagged + n_untagged + n_extended
self.port_order = self.extend_port_order( # pylint: disable=attribute-defined-outside-init
port_order, maxlength)
for dpid in dpids:
serialno = get_serialno(ports_sock, test_name)
sid_prefix = self._get_sid_prefix(serialno)
tagged = [self._add_tagged_host(sid_prefix, [tagged_vid], host_n)
for host_n in range(n_tagged)]
untagged = [self._add_untagged_host(
sid_prefix, host_n, host_namespace.get(host_n, True))
for host_n in range(n_untagged)]
extended = [self._add_extended_host(sid_prefix, host_n, e_cls, tmpdir)
for host_n in range(n_extended)]
switch = self._add_faucet_switch(sid_prefix, dpid, hw_dpid, ovs_type)
self._add_links(switch, dpid, tagged + untagged + extended, links_per_host)
class BaseFAUCET(Controller):
"""Base class for FAUCET and Gauge controllers."""
# Set to True to have cProfile output to controller log.
CPROFILE = False
controller_intf = None
controller_ipv6 = False
controller_ip = None
pid_file = None
tmpdir = None
ofcap = None
MAX_OF_PKTS = 5000
MAX_CTL_TIME = 600
BASE_CARGS = ' '.join((
'--verbose',
'--use-stderr',
'--ryu-ofp-tcp-listen-port=%s'))
RYU_CONF = """
[DEFAULT]
echo_request_interval=10
maximum_unreplied_echo_requests=5
socket_timeout=15
"""
def __init__(self, name, tmpdir, controller_intf=None, controller_ipv6=False,
cargs='', **kwargs):
name = '%s-%u' % (name, os.getpid())
self.tmpdir = tmpdir
self.controller_intf = controller_intf
self.controller_ipv6 = controller_ipv6
super().__init__(
name, cargs=self._add_cargs(cargs, name), **kwargs)
def _add_cargs(self, cargs, name):
ofp_listen_host_arg = ''
if self.controller_intf is not None:
socket_type = socket.AF_INET
if self.controller_ipv6:
socket_type = socket.AF_INET6
self.controller_ip = netifaces.ifaddresses( # pylint: disable=c-extension-no-member
self.controller_intf)[socket_type][0]['addr']
ofp_listen_host_arg = '--ryu-ofp-listen-host=%s' % self.controller_ip
self.pid_file = os.path.join(self.tmpdir, name + '.pid')
pid_file_arg = '--ryu-pid-file=%s' % self.pid_file
ryu_conf_file = os.path.join(self.tmpdir, 'ryu.conf')
with open(ryu_conf_file, 'w') as ryu_conf:
ryu_conf.write(self.RYU_CONF)
ryu_conf_arg = '--ryu-config-file=%s' % ryu_conf_file
return ' '.join((
self.BASE_CARGS, pid_file_arg, ryu_conf_arg, ofp_listen_host_arg, cargs))
def IP(self): # pylint: disable=invalid-name,arguments-differ
if self.controller_intf is not None:
return self.controller_ip
return super().IP()
def _start_tcpdump(self):
"""Start a tcpdump for OF port."""
self.ofcap = os.path.join(self.tmpdir, '-'.join((self.name, 'of.cap')))
tcpdump_args = ' '.join((
'-s 0',
'-e',
'-n',
'-U',
'-q',
'-W 1', # max files 1
'-G %u' % (self.MAX_CTL_TIME - 1),
'-c %u' % (self.MAX_OF_PKTS),
'-i %s' % self.controller_intf,
'-w %s' % self.ofcap,
'tcp and port %u' % self.port,
'>/dev/null',
'2>/dev/null',
))
self.cmd('timeout %s tcpdump %s &' % (
self.MAX_CTL_TIME, tcpdump_args))
for _ in range(5):
if os.path.exists(self.ofcap):
return
time.sleep(1)
assert False, 'tcpdump of OF channel did not start'
@staticmethod
def _tls_cargs(ofctl_port, ctl_privkey, ctl_cert, ca_certs):
"""Add TLS/cert parameters to Ryu."""
tls_cargs = []
for carg_val, carg_key in ((ctl_privkey, 'ryu-ctl-privkey'),
(ctl_cert, 'ryu-ctl-cert'),
(ca_certs, 'ryu-ca-certs')):
if carg_val:
tls_cargs.append(('--%s=%s' % (carg_key, carg_val)))
if tls_cargs:
tls_cargs.append(('--ryu-ofp-ssl-listen-port=%u' % ofctl_port))
return ' '.join(tls_cargs)
def _command(self, env, tmpdir, name, args):
"""Wrap controller startup command in shell script with environment."""
env_vars = []
for var, val in sorted(env.items()):
env_vars.append('='.join((var, val)))
script_wrapper_name = os.path.join(tmpdir, 'start-%s.sh' % name)
cprofile_args = ''
if self.CPROFILE:
cprofile_args = 'python3 -m cProfile -s time'
full_faucet_dir = os.path.abspath(mininet_test_util.FAUCET_DIR)
with open(script_wrapper_name, 'w') as script_wrapper:
faucet_cli = (
'PYTHONPATH=%s %s exec timeout %u %s %s %s $*\n' % (
os.path.dirname(full_faucet_dir),
' '.join(env_vars),
self.MAX_CTL_TIME,
os.path.join(full_faucet_dir, '__main__.py'),
cprofile_args,
args))
script_wrapper.write(faucet_cli)
return '/bin/sh %s' % script_wrapper_name
def ryu_pid(self):
"""Return PID of ryu-manager process."""
if os.path.exists(self.pid_file) and os.path.getsize(self.pid_file) > 0:
pid = None
with open(self.pid_file) as pid_file:
pid = int(pid_file.read())
return pid
return None
def listen_port(self, port, state='LISTEN'):
"""Return True if port in specified TCP state."""
for ipv in (4, 6):
listening_out = self.cmd(
mininet_test_util.tcp_listening_cmd(port, ipv=ipv, state=state)).split()
for pid in listening_out:
if int(pid) == self.ryu_pid():
return True
return False
# pylint: disable=invalid-name
@staticmethod
def checkListening():
"""Mininet's checkListening() causes occasional false positives (with
exceptions we can't catch), and we handle port conflicts ourselves anyway."""
return
def listening(self):
"""Return True if controller listening on required ports."""
return self.listen_port(self.port)
def connected(self):
"""Return True if at least one switch connected and controller healthy."""
return self.healthy() and self.listen_port(self.port, state='ESTABLISHED')
def logname(self):
"""Return log file for controller."""
return os.path.join('/tmp', self.name + '.log')
def healthy(self):
"""Return True if controller logging and listening on required ports."""
if (os.path.exists(self.logname()) and
os.path.getsize(self.logname()) and
self.listening()):
return True
return False
def start(self):
"""Start tcpdump for OF port and then start controller."""
self._start_tcpdump()
super().start()
def _stop_cap(self):
"""Stop tcpdump for OF port and run tshark to decode it."""
if os.path.exists(self.ofcap):
self.cmd(' '.join(['fuser', '-15', '-m', self.ofcap]))
text_ofcap_log = '%s.txt' % self.ofcap
with open(text_ofcap_log, 'w') as text_ofcap:
subprocess.call(
['timeout', str(self.MAX_CTL_TIME),
'tshark', '-l', '-n', '-Q',
'-d', 'tcp.port==%u,openflow' % self.port,
'-O', 'openflow_v4',
'-Y', 'openflow_v4',
'-r', self.ofcap],
stdout=text_ofcap,
stdin=mininet_test_util.DEVNULL,
stderr=mininet_test_util.DEVNULL,
close_fds=True)
def stop(self): # pylint: disable=arguments-differ
"""Stop controller."""
try:
if self.CPROFILE:
os.kill(self.ryu_pid(), 2)
else:
os.kill(self.ryu_pid(), 15)
except ProcessLookupError:
pass
self._stop_cap()
super().stop()
if os.path.exists(self.logname()):
tmpdir_logname = os.path.join(
self.tmpdir, os.path.basename(self.logname()))
if os.path.exists(tmpdir_logname):
os.remove(tmpdir_logname)
shutil.move(self.logname(), tmpdir_logname)
class FAUCET(BaseFAUCET):
"""Start a FAUCET controller."""
START_ARGS = ['--ryu-app=ryu.app.ofctl_rest']
# pylint: disable=too-many-locals
def __init__(self, name, tmpdir, controller_intf, controller_ipv6, env,
ctl_privkey, ctl_cert, ca_certs,
ports_sock, prom_port, port, test_name, **kwargs):
self.prom_port = prom_port
self.ofctl_port = mininet_test_util.find_free_port(
ports_sock, test_name)
cargs = ' '.join((
'--ryu-wsapi-host=%s' % mininet_test_util.LOCALHOSTV6,
'--ryu-wsapi-port=%u' % self.ofctl_port,
self._tls_cargs(port, ctl_privkey, ctl_cert, ca_certs)))
super().__init__(
name,
tmpdir,
controller_intf,
controller_ipv6,
cargs=cargs,
command=self._command(env, tmpdir, name, ' '.join(self.START_ARGS)),
port=port,
**kwargs)
def listening(self):
return (
self.listen_port(self.ofctl_port) and
self.listen_port(self.prom_port) and
super().listening())
class Gauge(BaseFAUCET):
"""Start a Gauge controller."""
def __init__(self, name, tmpdir, controller_intf, controller_ipv6, env,
ctl_privkey, ctl_cert, ca_certs,
port, **kwargs):
super().__init__(
name,
tmpdir,
controller_intf, controller_ipv6,
cargs=self._tls_cargs(port, ctl_privkey, ctl_cert, ca_certs),
command=self._command(env, tmpdir, name, '--gauge'),
port=port,
**kwargs)
| apache-2.0 | 3,959,059,830,026,162,700 | 39.632061 | 114 | 0.554257 | false |
rev2004/android2cloud.app-engine | google_appengine/google/appengine/api/blobstore/blobstore.py | 7 | 8449 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Python blobstore API used by app developers.
Contains methods uses to interface with Blobstore API. Defines db.Key-like
class representing a blob-key. Contains API part that forward to apiproxy.
"""
import datetime
import time
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import api_base_pb
from google.appengine.api.blobstore import blobstore_service_pb
from google.appengine.runtime import apiproxy_errors
__all__ = ['BLOB_INFO_KIND',
'BLOB_KEY_HEADER',
'BLOB_RANGE_HEADER',
'MAX_BLOB_FETCH_SIZE',
'UPLOAD_INFO_CREATION_HEADER',
'BlobFetchSizeTooLargeError',
'BlobKey',
'BlobNotFoundError',
'DataIndexOutOfRangeError',
'Error',
'InternalError',
'create_upload_url',
'delete',
'fetch_data',
]
BlobKey = datastore_types.BlobKey
BLOB_INFO_KIND = '__BlobInfo__'
BLOB_KEY_HEADER = 'X-AppEngine-BlobKey'
BLOB_RANGE_HEADER = 'X-AppEngine-BlobRange'
MAX_BLOB_FETCH_SIZE = (1 << 20) - (1 << 15)
UPLOAD_INFO_CREATION_HEADER = 'X-AppEngine-Upload-Creation'
_BASE_CREATION_HEADER_FORMAT = '%Y-%m-%d %H:%M:%S'
class Error(Exception):
"""Base blobstore error type."""
class InternalError(Error):
"""Raised when an internal error occurs within API."""
class BlobNotFoundError(Error):
"""Raised when attempting to access blob data for non-existant blob."""
class DataIndexOutOfRangeError(Error):
"""Raised when attempting to access indexes out of range in wrong order."""
class BlobFetchSizeTooLargeError(Error):
"""Raised when attempting to fetch too large a block from a blob."""
class _CreationFormatError(Error):
"""Raised when attempting to parse bad creation date format."""
def _ToBlobstoreError(error):
"""Translate an application error to a datastore Error, if possible.
Args:
error: An ApplicationError to translate.
"""
error_map = {
blobstore_service_pb.BlobstoreServiceError.INTERNAL_ERROR:
InternalError,
blobstore_service_pb.BlobstoreServiceError.BLOB_NOT_FOUND:
BlobNotFoundError,
blobstore_service_pb.BlobstoreServiceError.DATA_INDEX_OUT_OF_RANGE:
DataIndexOutOfRangeError,
blobstore_service_pb.BlobstoreServiceError.BLOB_FETCH_SIZE_TOO_LARGE:
BlobFetchSizeTooLargeError,
}
if error.application_error in error_map:
return error_map[error.application_error](error.error_detail)
else:
return error
def _format_creation(stamp):
"""Format an upload creation timestamp with milliseconds.
This method is necessary to format a timestamp with microseconds on Python
versions before 2.6.
Cannot simply convert datetime objects to str because the microseconds are
stripped from the format when set to 0. The upload creation date format will
always have microseconds padded out to 6 places.
Args:
stamp: datetime.datetime object to format.
Returns:
Formatted datetime as Python 2.6 format '%Y-%m-%d %H:%M:%S.%f'.
"""
return '%s.%06d' % (stamp.strftime(_BASE_CREATION_HEADER_FORMAT),
stamp.microsecond)
def _parse_creation(creation_string, field_name):
"""Parses upload creation string from header format.
Parse creation date of the format:
YYYY-mm-dd HH:MM:SS.ffffff
Y: Year
m: Month (01-12)
d: Day (01-31)
H: Hour (00-24)
M: Minute (00-59)
S: Second (00-59)
f: Microsecond
Args:
creation_string: String creation date format.
Returns:
datetime object parsed from creation_string.
Raises:
_CreationFormatError when the creation string is formatted incorrectly.
"""
split_creation_string = creation_string.split('.', 1)
if len(split_creation_string) != 2:
raise _CreationFormatError(
'Could not parse creation %s in field %s.' % (creation_string,
field_name))
timestamp_string, microsecond = split_creation_string
try:
timestamp = time.strptime(timestamp_string,
_BASE_CREATION_HEADER_FORMAT)
microsecond = int(microsecond)
except ValueError:
raise _CreationFormatError('Could not parse creation %s in field %s.'
% (creation_string, field_name))
return datetime.datetime(*timestamp[:6] + tuple([microsecond]))
def create_upload_url(success_path,
_make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
_make_sync_call: Used for dependency injection in tests.
"""
request = blobstore_service_pb.CreateUploadURLRequest()
response = blobstore_service_pb.CreateUploadURLResponse()
request.set_success_path(success_path)
try:
_make_sync_call('blobstore', 'CreateUploadURL', request, response)
except apiproxy_errors.ApplicationError, e:
raise _ToBlobstoreError(e)
return response.url()
def delete(blob_keys, _make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Delete a blob from Blobstore.
Args:
blob_keys: Single instance or list of blob keys. A blob-key can be either
a string or an instance of BlobKey.
_make_sync_call: Used for dependency injection in tests.
"""
if isinstance(blob_keys, (basestring, BlobKey)):
blob_keys = [blob_keys]
request = blobstore_service_pb.DeleteBlobRequest()
for blob_key in blob_keys:
request.add_blob_key(str(blob_key))
response = api_base_pb.VoidProto()
try:
_make_sync_call('blobstore', 'DeleteBlob', request, response)
except apiproxy_errors.ApplicationError, e:
raise _ToBlobstoreError(e)
def fetch_data(blob_key, start_index, end_index,
_make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Fetch data for blob.
See docstring for ext.blobstore.fetch_data for more details.
Args:
blob: BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (exclusive) of blob data to fetch. Must be
>= start_index.
Returns:
str containing partial data of blob. See docstring for
ext.blobstore.fetch_data for more details.
Raises:
See docstring for ext.blobstore.fetch_data for more details.
"""
if not isinstance(start_index, (int, long)):
raise TypeError('start_index must be integer.')
if not isinstance(end_index, (int, long)):
raise TypeError('end_index must be integer.')
if isinstance(blob_key, BlobKey):
blob_key = str(blob_key).decode('utf-8')
elif isinstance(blob_key, str):
blob_key = blob_key.decode('utf-8')
elif not isinstance(blob_key, unicode):
raise TypeError('Blob-key must be str, unicode or BlobKey: %s' % blob_key)
if start_index < 0:
raise DataIndexOutOfRangeError(
'May not fetch blob at negative index.')
if end_index < start_index:
raise DataIndexOutOfRangeError(
'Start index %d > end index %d' % (start_index, end_index))
fetch_size = end_index - start_index + 1
if fetch_size > MAX_BLOB_FETCH_SIZE:
raise BlobFetchSizeTooLargeError(
'Blob fetch size is too large: %d' % fetch_size)
request = blobstore_service_pb.FetchDataRequest()
response = blobstore_service_pb.FetchDataResponse()
request.set_blob_key(blob_key)
request.set_start_index(start_index)
request.set_end_index(end_index)
try:
_make_sync_call('blobstore', 'FetchData', request, response)
except apiproxy_errors.ApplicationError, e:
raise _ToBlobstoreError(e)
return response.data()
| mit | 4,746,732,319,896,020,000 | 29.392086 | 79 | 0.694875 | false |
contactless/wirenboard | contrib/deb/mosquitto/mosquitto-1.4.2/test/broker/04-retain-qos0-repeated.py | 6 | 1985 | #!/usr/bin/env python
# Test whether a retained PUBLISH to a topic with QoS 0 is actually retained
# and delivered when multiple sub/unsub operations are carried out.
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
mid = 16
connect_packet = mosq_test.gen_connect("retain-qos0-rep-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
publish_packet = mosq_test.gen_publish("retain/qos0/test", qos=0, payload="retained message", retain=True)
subscribe_packet = mosq_test.gen_subscribe(mid, "retain/qos0/test", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
unsub_mid = 13
unsubscribe_packet = mosq_test.gen_unsubscribe(unsub_mid, "retain/qos0/test")
unsuback_packet = mosq_test.gen_unsuback(unsub_mid)
cmd = ['../../src/mosquitto', '-p', '1888']
broker = mosq_test.start_broker(filename=os.path.basename(__file__), cmd=cmd)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20)
sock.send(publish_packet)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
if mosq_test.expect_packet(sock, "publish", publish_packet):
sock.send(unsubscribe_packet)
if mosq_test.expect_packet(sock, "unsuback", unsuback_packet):
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
if mosq_test.expect_packet(sock, "publish", publish_packet):
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| mit | -3,811,859,383,711,886,300 | 32.644068 | 129 | 0.687154 | false |
kevinlee12/oppia | core/domain/calculation_registry_test.py | 4 | 1483 | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for calculation registry."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import calculation_registry
from core.tests import test_utils
from extensions.answer_summarizers import models
class CalculationRegistryTests(test_utils.GenericTestBase):
"""Provides testing of the calculation registry."""
def test_get_calculation_by_id(self):
self.assertTrue(
isinstance(
calculation_registry.Registry.get_calculation_by_id(
'AnswerFrequencies'),
models.AnswerFrequencies))
with self.assertRaisesRegexp(
TypeError, '\'a\' is not a valid calculation id.'):
calculation_registry.Registry.get_calculation_by_id('a')
| apache-2.0 | -6,398,055,330,669,881,000 | 38.026316 | 78 | 0.718139 | false |
cmos3511/cmos_linux | python/pj/pj/bin/vplan_runner.py | 1 | 19315 | """
Author: Guanyu Yi @ CPU Verification Platform Group
Email: [email protected]
Description: pj vplan sub cmd entrence and vplan flow class
"""
import os
import re
import datetime as dt
import collections
import requests
import openpyxl
from openpyxl.styles import Alignment, PatternFill
import pcom
import env_booter
LOG = pcom.gen_logger(__name__)
class VPlanProc(object):
"""vplan flow processor and excel generator for pj"""
def __init__(self, ced, cfg_dic, days):
self.ced = ced
self.cfg_dic = cfg_dic
self.days = days
self.gen_flg = False
self.v_dic = {"tpn": 0, "ttn": 0, "ccs": "0", "fcp": "0"}
@classmethod
def gen_per_color(cls, per):
"""to generate percentage background color"""
if not per.replace(".", "").isdigit():
return "FFFFFFFF"
per = float(per)
if 0 <= per < 5:
c_s = "FFFF0000"
elif 5 <= per < 20:
c_s = "FFFF3C3C"
elif 20 <= per < 40:
c_s = "FFFF2A00"
elif 40 <= per < 60:
c_s = "FFFFA500"
elif 60 <= per < 80:
c_s = "FFAEFF00"
elif 80 <= per < 95:
c_s = "FF04FF00"
elif 95 <= per <= 100:
c_s = "FF00FF00"
else:
c_s = "FFFF0000"
return c_s
def fill_cc_scores(self, cell, score):
"""to fill code coverage cell by using score"""
cell.value = f"{score} %"
cell.fill = PatternFill(fill_type="gray125", end_color=self.gen_per_color(score))
def proc_vplan_row1(self, w_s):
"""to process vplan row1 style per sheet except home"""
for cell, width in zip(w_s[1], pcom.rd_cfg(
self.cfg_dic["proj"], "vplan_column_width", w_s.title)):
if self.gen_flg:
cell.style = "Accent1"
cell.alignment = Alignment(wrap_text=True)
w_s.column_dimensions[cell.column].width = width
def parse_ch_report(self):
"""to parse coverage hierarchy report"""
cd_rpt_file = f"{self.ced['COV_MERGE']}{os.sep}urgReport{os.sep}dashboard.txt"
ch_rpt_file = f"{self.ced['COV_MERGE']}{os.sep}urgReport{os.sep}hierarchy.txt"
if not os.path.isfile(cd_rpt_file):
raise Exception(f"merged code coverage dashboard file {cd_rpt_file} is NA")
if not os.path.isfile(ch_rpt_file):
raise Exception(f"merged code coverage report file {ch_rpt_file} is NA")
with open(cd_rpt_file) as rptf:
cd_rpt_con = rptf.read()
self.v_dic["ccs"] = re.search(
r"Total Coverage Summary.*?(\d+\.\d+)", cd_rpt_con, flags=re.DOTALL).group(1)
ch_score_dic = collections.OrderedDict()
with open(ch_rpt_file) as rptf:
ch_rpt_con = rptf.read()
con_lst = re.findall(
rf"{os.linesep}\s{{0,2}}-{{10,}}{os.linesep}(.*?)(?={os.linesep}\s+-{{10}}|$)",
ch_rpt_con, flags=re.DOTALL)
for index, con in enumerate(con_lst):
p_str = "(top)" if index == 0 else ""
for line in con.split(os.linesep):
line = line.strip()
mop = pcom.REOpter(line)
if mop.match(re.compile(
r"([\d\.\-]+)\s+([\d\.\-]+)\s+([\d\.\-]+)\s+([\d\.\-]+)\s+"
r"([\d\.\-]+)\s+([\d\.\-]+)\s+([\d\.\-]+)\s+(\w+)")):
ch_score_dic[f"{mop.group(8)}{p_str}"] = {
"s": mop.group(1), "l": mop.group(2), "c": mop.group(3), "t": mop.group(4),
"f": mop.group(5), "b": mop.group(6), "a": mop.group(7)}
return ch_score_dic
def parse_cg_report(self):
"""to parse coverage group report"""
cg_rpt_file = f"{self.ced['COV_MERGE']}{os.sep}urgReport{os.sep}groups.txt"
cp_rpt_file = f"{self.ced['COV_MERGE']}{os.sep}urgReport{os.sep}grpinfo.txt"
if not os.path.isfile(cg_rpt_file):
raise Exception(f"merged coverage groups report file {cg_rpt_file} is NA")
cg_score_dic = collections.OrderedDict()
with open(cg_rpt_file) as rptf:
for line in rptf:
line = line.strip()
mop = pcom.REOpter(line)
if mop.match(re.compile(r"(\d+\.\d+)\s+\d+$")):
self.v_dic["fcp"] = mop.group(1)
elif mop.match(re.compile(r"(\d+\.\d+)\s+.*\w+::\w+::(\w+)")):
cg_score_dic[mop.group(2)] = {
"per": mop.group(1), "cp_dic": collections.OrderedDict()}
if not os.path.isfile(cp_rpt_file):
LOG.warning("merged coverage points report file %s is NA", cp_rpt_file)
cp_rpt_con = ""
else:
with open(cp_rpt_file) as rptf:
cp_rpt_con = rptf.read()
for cg_n, cg_dic in cg_score_dic.items():
cg_sum_con = re.search(
rf"Summary for Group\s+(?:\w+::)+{cg_n}(.*?{os.linesep}-{{60}})",
cp_rpt_con, flags=re.DOTALL).group(1)
var_con, cro_con = re.search(
rf"Variables for Group\s+(?:\w+::)+{cg_n}(.*?){os.linesep}"
rf"Crosses for Group\s+(?:\w+::)+{cg_n}(.*?){os.linesep}-{{60}}",
cg_sum_con, flags=re.DOTALL).groups()
for line in var_con.split(os.linesep):
line = line.strip()
mop = pcom.REOpter(line)
if mop.match(re.compile(r"(\w+)\s+(?:\d+\s+)+(\d+\.\d+)\s+(?:\d+\s+)+")):
cg_dic["cp_dic"][f"{cg_n}::{mop.group(1)}"] = mop.group(2)
for line in cro_con.split(os.linesep):
line = line.strip()
mop = pcom.REOpter(line)
if mop.match(re.compile(r"(\w+)\s+(?:\d+\s+)+(\d+\.\d+)\s+(?:\d+\s+)+")):
cg_dic["cp_dic"][f"{cg_n}::{mop.group(1)}(cross)"] = mop.group(2)
return cg_score_dic
@classmethod
def clean_cg_score_dic(cls, cg_score_dic):
"""to clean no cover point items in cover group dic"""
cg_del_lst = []
for cg_name, cg_dic in cg_score_dic.items():
if not cg_dic["cp_dic"]:
cg_del_lst.append(cg_name)
for cg_del in cg_del_lst:
del cg_score_dic[cg_del]
def proc_home_sheet(self, w_s):
"""to process generated vplan home sheet"""
w_s.title = "home"
home_row_lst = pcom.rd_cfg(self.cfg_dic["proj"], "vplan_sheets", "home")
home_row_lst.insert(5, "")
for index, row in enumerate(home_row_lst):
if index == 5:
continue
cell = w_s[f"a{index+1}"]
cell.value = row
if self.gen_flg:
cell.style = "Accent1"
cell.alignment = Alignment(wrap_text=True)
next_cell = w_s[f"b{index+1}"]
if row == "Project":
next_cell.value = self.ced["PROJ_NAME"]
elif row == "Module Name":
next_cell.value = self.ced["MODULE"]
elif row == "Case Passing Rate":
d_v = self.v_dic["tpn"]/self.v_dic["ttn"] if self.v_dic["ttn"] else 0
cpr = str(round(100*d_v, 2))
next_cell.value = f"{cpr} % ({self.v_dic['tpn']}/{self.v_dic['ttn']})"
next_cell.fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(cpr))
elif row == "Code Coverage Score":
next_cell.value = f"{self.v_dic['ccs']} %"
next_cell.fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(self.v_dic["ccs"]))
elif row == "Function Coverage Per":
next_cell.value = f"{self.v_dic['fcp']} %"
next_cell.fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(self.v_dic["fcp"]))
w_s.column_dimensions["a"].width = pcom.rd_cfg(
self.cfg_dic["proj"], "vplan_column_width", w_s.title, True)
w_s.column_dimensions["b"].width = w_s.column_dimensions["a"].width
def proc_tc_sheet(self, w_s):
"""to process generated vplan test case sheet"""
query_url = f"{pcom.BE_URL}/pj_app/regr/db_query/query_case_dic/"
query_param = {
"date": dt.datetime.now().strftime("%Y_%m_%d"),
"proj": self.ced["PROJ_NAME"],
"module": self.ced["MODULE"],
"days": self.days}
case_pr_dic = requests.get(query_url, params=query_param).json() if pcom.BACKEND else {}
tc_col_lst = pcom.rd_cfg(self.cfg_dic["proj"], "vplan_sheets", "test_case")
index_dic = {
"c": tc_col_lst.index("Case Name"),
"p": tc_col_lst.index("Priority"),
"o": tc_col_lst.index("Owner"),
"s": tc_col_lst.index("Status"),
"r": tc_col_lst.index("Days"),
"v": tc_col_lst.index("CL Ver"),
"d": tc_col_lst.index("Description")}
if self.gen_flg:
w_s.append(tc_col_lst)
self.proc_vplan_row1(w_s)
for index, case_row in enumerate(w_s.rows):
if index == 0:
continue
case_name = case_row[index_dic["c"]].value
case_dic = case_pr_dic.get(case_name, {})
if case_name in self.cfg_dic["case"]:
case_row[index_dic["d"]].value = self.cfg_dic["case"][case_name][
"vplan_desc"].replace(os.linesep, "; ")
case_row[index_dic["p"]].value = pcom.rd_cfg(
self.cfg_dic["case"], case_name, "vplan_priority", True)
case_row[index_dic["o"]].value = pcom.rd_cfg(
self.cfg_dic["case"], case_name, "vplan_owner", True)
case_row[index_dic["s"]].value = (
f"{case_dic.get('pr', 0.0)} % "
f"({case_dic.get('pn', 0)}/{case_dic.get('tn', 0)})")
self.v_dic["tpn"] += case_dic.get("pn", 0)
self.v_dic["ttn"] += case_dic.get("tn", 0)
case_row[index_dic["s"]].fill = PatternFill(
fill_type="gray125",
end_color=case_dic.get("bc", "#FF0000").replace("#", "FF"))
case_row[index_dic["r"]].value = self.days
case_row[index_dic["v"]].value = case_dic.get("cl_range", "NA")
del self.cfg_dic["case"][case_name]
else:
case_row[index_dic["p"]].value = "Out of Date"
case_row[index_dic["p"]].fill = PatternFill(
fill_type="gray125", end_color="FFFF0000")
for case_name in self.cfg_dic["case"]:
if case_name == "DEFAULT":
continue
case_dic = case_pr_dic.get(case_name, {})
new_line = [""]*len(tc_col_lst)
new_line[index_dic["c"]] = case_name
new_line[index_dic["d"]] = self.cfg_dic["case"][case_name][
"vplan_desc"].replace(os.linesep, "; ")
new_line[index_dic["p"]] = pcom.rd_cfg(
self.cfg_dic["case"], case_name, "vplan_priority", True)
new_line[index_dic["o"]] = pcom.rd_cfg(
self.cfg_dic["case"], case_name, "vplan_owner", True)
new_line[index_dic["s"]] = (
f"{case_dic.get('pr', 0.0)} % "
f"({case_dic.get('pn', 0)}/{case_dic.get('tn', 0)})")
self.v_dic["tpn"] += case_dic.get("pn", 0)
self.v_dic["ttn"] += case_dic.get("tn", 0)
new_line[index_dic["r"]] = self.days
new_line[index_dic["v"]] = case_dic.get("cl_range", "NA")
w_s.append(new_line)
w_s[w_s.max_row][index_dic["s"]].fill = PatternFill(
fill_type="gray125",
end_color=case_dic.get("bc", "#FF0000").replace("#", "FF"))
def proc_cc_sheet(self, w_s):
"""to process generated vplan code coverage sheet"""
ch_score_dic = self.parse_ch_report()
cc_col_lst = pcom.rd_cfg(self.cfg_dic["proj"], "vplan_sheets", "code_coverage")
index_dic = {
"h": cc_col_lst.index("Hierarchy"),
"p": cc_col_lst.index("Priority"),
"s": cc_col_lst.index("Score"),
"l": cc_col_lst.index("Line"),
"c": cc_col_lst.index("Cond"),
"t": cc_col_lst.index("Toggle"),
"f": cc_col_lst.index("FSM"),
"b": cc_col_lst.index("Branch"),
"a": cc_col_lst.index("Assert")}
if self.gen_flg:
w_s.append(cc_col_lst)
self.proc_vplan_row1(w_s)
for index, ch_row in enumerate(w_s.rows):
if index == 0:
continue
ch_name = ch_row[index_dic["h"]].value
if ch_name in ch_score_dic:
self.fill_cc_scores(ch_row[index_dic["s"]], ch_score_dic[ch_name]["s"])
self.fill_cc_scores(ch_row[index_dic["l"]], ch_score_dic[ch_name]["l"])
self.fill_cc_scores(ch_row[index_dic["c"]], ch_score_dic[ch_name]["c"])
self.fill_cc_scores(ch_row[index_dic["t"]], ch_score_dic[ch_name]["t"])
self.fill_cc_scores(ch_row[index_dic["f"]], ch_score_dic[ch_name]["f"])
self.fill_cc_scores(ch_row[index_dic["b"]], ch_score_dic[ch_name]["b"])
self.fill_cc_scores(ch_row[index_dic["a"]], ch_score_dic[ch_name]["a"])
del ch_score_dic[ch_name]
else:
ch_row[index_dic["p"]].value = "Out of Date"
ch_row[index_dic["p"]].fill = PatternFill(
fill_type="gray125", end_color="FFFF0000")
for ch_name, ch_dic in ch_score_dic.items():
new_line = [""]*len(cc_col_lst)
new_line[index_dic["h"]] = ch_name
new_line[index_dic["s"]] = f"{ch_dic['s']} %"
new_line[index_dic["l"]] = f"{ch_dic['l']} %"
new_line[index_dic["c"]] = f"{ch_dic['c']} %"
new_line[index_dic["t"]] = f"{ch_dic['t']} %"
new_line[index_dic["f"]] = f"{ch_dic['f']} %"
new_line[index_dic["b"]] = f"{ch_dic['b']} %"
new_line[index_dic["a"]] = f"{ch_dic['a']} %"
w_s.append(new_line)
if "(top)" in ch_name:
w_s[w_s.max_row][index_dic["h"]].fill = PatternFill(
fill_type="gray125", end_color="FFFFFF00")
w_s[w_s.max_row][index_dic["s"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(ch_dic["s"]))
w_s[w_s.max_row][index_dic["l"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(ch_dic["l"]))
w_s[w_s.max_row][index_dic["c"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(ch_dic["c"]))
w_s[w_s.max_row][index_dic["t"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(ch_dic["t"]))
w_s[w_s.max_row][index_dic["f"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(ch_dic["f"]))
w_s[w_s.max_row][index_dic["b"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(ch_dic["b"]))
w_s[w_s.max_row][index_dic["a"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(ch_dic["a"]))
def proc_fc_sheet(self, w_s):
"""to process generated vplan function coverage sheet"""
cg_score_dic = self.parse_cg_report()
fc_col_lst = pcom.rd_cfg(self.cfg_dic["proj"], "vplan_sheets", "function_coverage")
index_dic = {
"c": fc_col_lst.index("Coverage Group"),
"p": fc_col_lst.index("Priority"),
"s": fc_col_lst.index("SNPS Cov Per")}
if self.gen_flg:
w_s.append(fc_col_lst)
self.proc_vplan_row1(w_s)
for index, cg_row in enumerate(w_s.rows):
if index == 0:
continue
cg_name = cg_row[index_dic["c"]].value
if cg_name in cg_score_dic:
per = cg_score_dic[cg_name]["per"]
cg_row[index_dic["s"]].value = f"{per} %"
cg_row[index_dic["s"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(per))
elif "::" in cg_name:
base_cg_name = cg_name.split("::")[0]
per = cg_score_dic[base_cg_name]["cp_dic"][cg_name]
if cg_name in cg_score_dic[base_cg_name]["cp_dic"]:
cg_row[index_dic["s"]].value = f"{per} %"
cg_row[index_dic["s"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(per))
del cg_score_dic[base_cg_name]["cp_dic"][cg_name]
else:
cg_row[index_dic["p"]].value = "Out of Date"
cg_row[index_dic["p"]].fill = PatternFill(
fill_type="gray125", end_color="FFFF0000")
self.clean_cg_score_dic(cg_score_dic)
for cg_name, cg_dic in cg_score_dic.items():
new_line = [""]*len(fc_col_lst)
new_line[index_dic["c"]] = cg_name
new_line[index_dic["s"]] = f"{cg_dic['per']} %"
w_s.append(new_line)
w_s[w_s.max_row][index_dic["c"]].fill = PatternFill(
fill_type="gray125", end_color="FFFFFF00")
w_s[w_s.max_row][index_dic["s"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(cg_dic["per"]))
for cp_name, cp_per in cg_dic["cp_dic"].items():
new_line = [""]*len(fc_col_lst)
new_line[index_dic["c"]] = cp_name
new_line[index_dic["s"]] = f"{cp_per} %"
w_s.append(new_line)
w_s[w_s.max_row][index_dic["s"]].fill = PatternFill(
fill_type="gray125", end_color=self.gen_per_color(cp_per))
def proc_vplan(self):
"""top execution function"""
vplan_file = f"{self.ced['MODULE_VPLAN']}{os.sep}{self.ced['MODULE']}_vplan.xlsx"
self.gen_flg = False if os.path.isfile(vplan_file) else True
if self.gen_flg:
w_b = openpyxl.Workbook()
home_sheet = w_b.active
tc_sheet = w_b.create_sheet("test_case")
cc_sheet = w_b.create_sheet("code_coverage")
fc_sheet = w_b.create_sheet("function_coverage")
else:
w_b = openpyxl.load_workbook(vplan_file)
home_sheet = w_b["home"]
tc_sheet = w_b["test_case"]
cc_sheet = w_b["code_coverage"]
fc_sheet = w_b["function_coverage"]
self.proc_tc_sheet(tc_sheet)
self.proc_cc_sheet(cc_sheet)
self.proc_fc_sheet(fc_sheet)
self.proc_home_sheet(home_sheet)
w_b.save(vplan_file)
def run_vplan(args):
"""to run vplan sub cmd"""
if args.vplan_module and args.vplan_proc:
ced, cfg_dic = env_booter.EnvBooter().module_env(args.vplan_module)
VPlanProc(ced, cfg_dic, args.vplan_days).proc_vplan()
LOG.info("processing vplan of %s module done", args.vplan_module)
else:
raise Exception("missing main arguments")
| gpl-3.0 | 2,155,645,069,527,077,400 | 48.780928 | 99 | 0.501424 | false |
QQuick/Transcrypt | transcrypt/demos/pysteroids_demo/org/theodox/__init__.py | 1 | 2009 |
import math
import itertools
class Vector:
"""
Generic vector operations.
"""
def _apply(self,op, other):
pairwise = None
if type(other) is Vector:
pairwise = zip(self.vals, other.vals)
else:
pairwise = zip(self.vals, [other for _ in self.vals])
return Vector(*itertools.starmap(op, pairwise))
def __init__(self, *args):
self.vals = args
def __add__(self, other):
return self._apply(lambda a, b: a + b, other)
def __sub__(self, other):
return self._apply(lambda a, b: a - b, other)
def __mul__(self, other):
return self._apply(lambda a, b: a*b, other)
def __div__(self, other):
return self._apply(lambda a, b: a / b, other)
def length(self):
total = sum(map(lambda a: math.pow(a, 2), self.vals))
return math.sqrt(total)
def normalized(self):
divisor = [self.length()] * len(self)
return Vector(*(self / divisor))
def __iter__(self):
return py_iter(self.vals)
@classmethod
def map(cls, *args):
return args[0].map(args[1:])
def __getitem__(self, item):
return self.values[item]
def __str__(self):
return str(self.vals)
def __len__(self):
return len(self.vals)
@classmethod
def add(cls, a, b):
return Vector(*a) + Vector(*b)
@classmethod
def sub(cls, a, b):
return Vector(*a) - Vector(*b)
@classmethod
def mul(cls, a, b):
return Vector(*a) * Vector(*b)
@classmethod
def div(cls, a, b):
return Vector(*a) / Vector(*b)
@classmethod
def dot(cls, left, right):
return sum(Vector.mul(left, right))
@classmethod
def norm_dot(Vector, left, right):
left = Vector(*left).normalized()
right = Vector(*right).normalized()
return sum(Vector.mul(left, right))
| apache-2.0 | 634,549,561,471,562,500 | 21.091954 | 65 | 0.526132 | false |
asadoughi/quark | quark/db/migration/alembic/versions/2e9cf60b0ef6_quota_reservations.py | 6 | 1661 | """Implements reservations, resourcedeltas and quota usages from upstream
Revision ID: 2e9cf60b0ef6
Revises: 341d2e702dc4
Create Date: 2015-09-13 13:46:03.888079
"""
# revision identifiers, used by Alembic.
revision = '2e9cf60b0ef6'
down_revision = '341d2e702dc4'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import sql
def upgrade():
op.create_table(
'reservations',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('expiration', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'resourcedeltas',
sa.Column('resource', sa.String(length=255), nullable=False),
sa.Column('reservation_id', sa.String(length=36), nullable=False),
sa.Column('amount', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('resource', 'reservation_id'))
op.create_table(
'quotausages',
sa.Column('tenant_id', sa.String(length=255),
nullable=False, primary_key=True, index=True),
sa.Column('resource', sa.String(length=255),
nullable=False, primary_key=True, index=True),
sa.Column('dirty', sa.Boolean(), nullable=False,
server_default=sql.false()),
sa.Column('in_use', sa.Integer(), nullable=False,
server_default='0'),
sa.Column('reserved', sa.Integer(), nullable=False,
server_default='0'))
| apache-2.0 | 5,031,174,265,641,524,000 | 35.108696 | 74 | 0.624925 | false |
vdel/BMVC10 | libs/colordescriptors/DescriptorIO.py | 4 | 5420 | import os, struct
import numpy # you need to have NumPy installed
def __pointsToMatrix(points):
"""Function to parse the point location part of the KOEN1 format"""
t = []
for p in points:
#assert p[0] == "<" and p[-1] == ">"
parts = p[1:-1].split(" ")
assert parts[0] == "CIRCLE"
t.append(parts[1:])
#print parts, m.shape
#m[i, 0] = float(parts[1]) # x
#m[i, 1] = float(parts[2]) # y
#m[i, 2] = float(parts[3]) # scale
#m[i, 3] = float(parts[4]) # orientation
#m[i, 4] = float(parts[5]) # cornerness
return numpy.matrix(t, dtype=numpy.float64)
def parseKoen1(data, parsePoints=False):
"""Parse the KOEN1 format into two matrices: points and descriptors.
Data contains the raw bytes of the file."""
lines = data.splitlines()
dimensionality = int(lines[1])
regionCount = int(lines[2])
points = []
descriptors = []
for i in range(regionCount):
line = lines[i+3]
parts = line.split(";")
points.append(parts[0])
descriptors.append(parts[1].split())
if parsePoints:
points = __pointsToMatrix(points)
return points, numpy.matrix(descriptors, dtype=numpy.float64)
def parseBinaryDescriptors(data):
"""Parse the BINDESC1 format into two matrices: points and descriptors.
Data contains the raw bytes of the file."""
header = data[0:32]
assert header[:8] == "BINDESC1"
#assert header[8:16] == "CIRCLE "
values = struct.unpack("<4I", header[16:])
elementsPerPoint = values[0]
dimensionCount = values[1]
pointCount = values[2]
bytesPerElement = values[3]
if bytesPerElement == 8:
dt = numpy.float64
elif bytesPerElement == 4:
dt = numpy.float32
else:
raise ValueError("Bytes per element unknown: %d" % bytesPerElement)
# New way of point and descriptor extraction
points = numpy.fromstring(data[32:(elementsPerPoint * pointCount * bytesPerElement)+32], dtype=dt)
descriptors = numpy.fromstring(data[(elementsPerPoint * pointCount * bytesPerElement)+32:], dtype=dt)
# Reshape the arrays into matrices
points = numpy.reshape(points, (pointCount, elementsPerPoint))
descriptors = numpy.reshape(descriptors, (pointCount, dimensionCount))
return points, descriptors
def readDescriptors(filename):
"""Load descriptors from filename or file descriptor.
Identification of KOEN1/BINDESC1 format is automatic. Returns two matrices:
the first one contains the points with a typical size of (n,5) and
descriptors with a typical size of (n,d) with d the dimensionality of
the descriptor."""
if hasattr(filename, "read"):
f = filename
else:
f = open(filename, "rb")
identify = f.read(4)
f.seek(-4, os.SEEK_CUR)
# text file?
if identify == "KOEN":
return parseKoen1(f.read(), True)
# this is a binary file
header = f.read(32)
assert header[:8] == "BINDESC1"
#assert header[8:16] == "CIRCLE "
values = struct.unpack("<4I", header[16:])
elementsPerPoint = values[0]
dimensionCount = values[1]
pointCount = values[2]
bytesPerElement = values[3]
if bytesPerElement == 8:
dt = numpy.float64
elif bytesPerElement == 4:
dt = numpy.float32
else:
raise ValueError("Bytes per element unknown: %d" % bytesPerElement)
points = numpy.fromstring(f.read(elementsPerPoint * pointCount * bytesPerElement), dtype=dt)
descriptors = numpy.fromstring(f.read(dimensionCount * pointCount * bytesPerElement), dtype=dt)
points = numpy.reshape(points, (pointCount, elementsPerPoint))
descriptors = numpy.reshape(descriptors, (pointCount, dimensionCount))
if not hasattr(filename, "read"):
f.close()
return points, descriptors
def writeBinaryDescriptors(filename, points, descriptors, info=""):
"""Write the BINDESC1 format from two matrices: points and descriptors."""
elementsPerPoint = points.shape[1]
dimensionCount = descriptors.shape[1]
pointCount = descriptors.shape[0]
bytesPerElement = 8
if pointCount != points.shape[0]:
raise ValueError("Shape mismatch: should have same number of rows")
if hasattr(filename, "write"):
f = filename
else:
f = open(filename, "wb")
header = "BINDESC1" + (info + " " * 8)[:8]
header += struct.pack("<4I", elementsPerPoint, dimensionCount, pointCount, bytesPerElement)
f.write(header)
if bytesPerElement == 8:
dt = numpy.float64
elif bytesPerElement == 4:
dt = numpy.float32
else:
raise ValueError("Bytes per element unknown: %d" % bytesPerElement)
# New way of point and descriptor extraction
data = points.astype(dt).tostring()
f.write(data)
data = descriptors.astype(dt).tostring()
f.write(data)
f.close()
if __name__ == "__main__":
import sys
filename = sys.argv[1]
print "Loading", filename
points, descriptors = readDescriptors(filename)
print "Points in the file:", points.shape
print points
print "Descriptors in the file:", descriptors.shape
print descriptors
| gpl-2.0 | 7,716,768,943,040,394,000 | 32.967742 | 105 | 0.620295 | false |
samdoran/ansible | hacking/conf2yaml.py | 15 | 3020 | #!/usr/bin/env python
import ast
import yaml
import os
import sys
from ansible.parsing.yaml.dumper import AnsibleDumper
things = {}
stuff = {}
op_map = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
}
def get_values(values):
if not isinstance(values, list):
return get_value(values)
ret = []
for value in values:
ret.append(get_value(value))
return ret
def get_value(value):
if hasattr(value, 'id'):
ret = value.id
elif hasattr(value, 's'):
ret = value.s
elif hasattr(value, 'n'):
ret = value.n
elif hasattr(value, 'left'):
operator = op_map[type(value.op)]
left = get_values(value.left)
right = get_values(value.right)
return '%s %s %s' % (left, operator, right)
elif hasattr(value, 'value'):
ret = value.value
elif hasattr(value, 'elts'):
ret = get_values(value.elts)
elif isinstance(value, ast.Call):
func, args, kwargs = get_call(value)
args[:] = [repr(arg) for arg in args]
for k, v in kwargs.items():
args.append('%s=%s' % (k, repr(v)))
return '%s(%s)' % (func, ', '.join(args))
else:
return value
return get_value(ret)
def get_call(value):
args = []
for arg in value.args:
v = get_value(arg)
try:
v = getattr(C, v, v)
except:
pass
args.append(v)
kwargs = {}
for keyword in value.keywords:
v = get_value(keyword.value)
try:
v = getattr(C, v, v)
except:
pass
kwargs[keyword.arg] = v
func = get_value(value.func)
try:
attr = '.%s' % value.func.attr
except:
attr = ''
return '%s%s' % (func, attr), args, kwargs
with open(sys.argv[1]) as f:
tree = ast.parse(f.read())
for item in tree.body:
if hasattr(item, 'value') and isinstance(item.value, ast.Call):
try:
if item.value.func.id != 'get_config':
continue
except AttributeError:
continue
_, args, kwargs = get_call(item.value)
name = get_value(item.targets[0])
section = args[1].lower()
config = args[2]
# new form
if name not in stuff:
stuff[name] = {}
stuff[name] = {
'desc': 'TODO: write it',
'ini': [{'section': section, 'key': config}],
'env': [args[3]],
'default': args[4] if len(args) == 5 else None,
'yaml': {'key': '%s.%s' % (section, config)},
'vars': []
}
stuff[name].update(kwargs)
## ini like
#if section not in things:
# things[section] = {}
#things[section][config] = {
# 'env_var': args[3],
# 'default': args[4] if len(args) == 5 else 'UNKNOWN'
#}
#things[section][config].update(kwargs)
print(yaml.dump(stuff, Dumper=AnsibleDumper, indent=2, width=170))
| gpl-3.0 | 553,530,131,804,152,900 | 23.354839 | 67 | 0.513245 | false |
froyobin/horizon | openstack_dashboard/usage/quotas.py | 2 | 12667 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import itertools
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
NOVA_QUOTA_FIELDS = ("metadata_items",
"cores",
"instances",
"injected_files",
"injected_file_content_bytes",
"ram",
"floating_ips",
"fixed_ips",
"security_groups",
"security_group_rules",)
MISSING_QUOTA_FIELDS = ("key_pairs",
"injected_file_path_bytes",)
CINDER_QUOTA_FIELDS = ("volumes",
"snapshots",
"gigabytes",)
NEUTRON_QUOTA_FIELDS = ("network",
"subnet",
"port",
"router",
"floatingip",
"security_group",
"security_group_rule",
)
QUOTA_FIELDS = NOVA_QUOTA_FIELDS + CINDER_QUOTA_FIELDS + NEUTRON_QUOTA_FIELDS
QUOTA_NAMES = {
"metadata_items": _('Metadata Items'),
"cores": _('VCPUs'),
"instances": _('Instances'),
"injected_files": _('Injected Files'),
"injected_file_content_bytes": _('Injected File Content Bytes'),
"ram": _('RAM (MB)'),
"floating_ips": _('Floating IPs'),
"fixed_ips": _('Fixed IPs'),
"security_groups": _('Security Groups'),
"security_group_rules": _('Security Group Rules'),
"key_pairs": _('Key Pairs'),
"injected_file_path_bytes": _('Injected File Path Bytes'),
"volumes": _('Volumes'),
"snapshots": _('Volume Snapshots'),
"gigabytes": _('Total Size of Volumes and Snapshots (GB)'),
"network": _("Networks"),
"subnet": _("Subnets"),
"port": _("Ports"),
"router": _("Routers"),
"floatingip": _('Floating IPs'),
"security_group": _("Security Groups"),
"security_group_rule": _("Security Group Rules")
}
class QuotaUsage(dict):
"""Tracks quota limit, used, and available for a given set of quotas."""
def __init__(self):
self.usages = defaultdict(dict)
def __contains__(self, key):
return key in self.usages
def __getitem__(self, key):
return self.usages[key]
def __setitem__(self, key, value):
raise NotImplementedError("Directly setting QuotaUsage values is not "
"supported. Please use the add_quota and "
"tally methods.")
def __repr__(self):
return repr(dict(self.usages))
def get(self, key, default=None):
return self.usages.get(key, default)
def add_quota(self, quota):
"""Adds an internal tracking reference for the given quota."""
if quota.limit is None or quota.limit == -1:
# Handle "unlimited" quotas.
self.usages[quota.name]['quota'] = float("inf")
self.usages[quota.name]['available'] = float("inf")
else:
self.usages[quota.name]['quota'] = int(quota.limit)
def tally(self, name, value):
"""Adds to the "used" metric for the given quota."""
value = value or 0 # Protection against None.
# Start at 0 if this is the first value.
if 'used' not in self.usages[name]:
self.usages[name]['used'] = 0
# Increment our usage and update the "available" metric.
self.usages[name]['used'] += int(value) # Fail if can't coerce to int.
self.update_available(name)
def update_available(self, name):
"""Updates the "available" metric for the given quota."""
available = self.usages[name]['quota'] - self.usages[name]['used']
if available < 0:
available = 0
self.usages[name]['available'] = available
def _get_quota_data(request, method_name, disabled_quotas=None,
tenant_id=None):
quotasets = []
if not tenant_id:
tenant_id = request.user.tenant_id
quotasets.append(getattr(nova, method_name)(request, tenant_id))
qs = base.QuotaSet()
if disabled_quotas is None:
disabled_quotas = get_disabled_quotas(request)
if 'volumes' not in disabled_quotas:
quotasets.append(getattr(cinder, method_name)(request, tenant_id))
for quota in itertools.chain(*quotasets):
if quota.name not in disabled_quotas:
qs[quota.name] = quota.limit
return qs
def get_default_quota_data(request, disabled_quotas=None, tenant_id=None):
return _get_quota_data(request,
"default_quota_get",
disabled_quotas=disabled_quotas,
tenant_id=tenant_id)
def get_tenant_quota_data(request, disabled_quotas=None, tenant_id=None):
qs = _get_quota_data(request,
"tenant_quota_get",
disabled_quotas=disabled_quotas,
tenant_id=tenant_id)
# TODO(jpichon): There is no API to get the default system quotas
# in Neutron (cf. LP#1204956), so for now handle tenant quotas here.
# This should be handled in _get_quota_data() eventually.
if not disabled_quotas:
return qs
# Check if neutron is enabled by looking for network and router
if 'network' and 'router' not in disabled_quotas:
tenant_id = tenant_id or request.user.tenant_id
neutron_quotas = neutron.tenant_quota_get(request, tenant_id)
if 'floating_ips' in disabled_quotas:
# Neutron with quota extension disabled
if 'floatingip' in disabled_quotas:
qs.add(base.QuotaSet({'floating_ips': -1}))
# Neutron with quota extension enabled
else:
# Rename floatingip to floating_ips since that's how it's
# expected in some places (e.g. Security & Access' Floating IPs)
fips_quota = neutron_quotas.get('floatingip').limit
qs.add(base.QuotaSet({'floating_ips': fips_quota}))
if 'security_groups' in disabled_quotas:
if 'security_group' in disabled_quotas:
qs.add(base.QuotaSet({'security_groups': -1}))
# Neutron with quota extension enabled
else:
# Rename security_group to security_groups since that's how it's
# expected in some places (e.g. Security & Access' Security Groups)
sec_quota = neutron_quotas.get('security_group').limit
qs.add(base.QuotaSet({'security_groups': sec_quota}))
if 'network' in disabled_quotas:
for item in qs.items:
if item.name == 'networks':
qs.items.remove(item)
break
else:
net_quota = neutron_quotas.get('network').limit
qs.add(base.QuotaSet({'networks': net_quota}))
if 'router' in disabled_quotas:
for item in qs.items:
if item.name == 'routers':
qs.items.remove(item)
break
else:
router_quota = neutron_quotas.get('router').limit
qs.add(base.QuotaSet({'routers': router_quota}))
return qs
def get_disabled_quotas(request):
disabled_quotas = []
# Cinder
if not base.is_service_enabled(request, 'volume'):
disabled_quotas.extend(CINDER_QUOTA_FIELDS)
# Neutron
if not base.is_service_enabled(request, 'network'):
disabled_quotas.extend(NEUTRON_QUOTA_FIELDS)
else:
# Remove the nova network quotas
disabled_quotas.extend(['floating_ips', 'fixed_ips'])
if neutron.is_extension_supported(request, 'security-group'):
# If Neutron security group is supported, disable Nova quotas
disabled_quotas.extend(['security_groups', 'security_group_rules'])
else:
# If Nova security group is used, disable Neutron quotas
disabled_quotas.extend(['security_group', 'security_group_rule'])
try:
if not neutron.is_quotas_extension_supported(request):
disabled_quotas.extend(NEUTRON_QUOTA_FIELDS)
except Exception:
LOG.exception("There was an error checking if the Neutron "
"quotas extension is enabled.")
return disabled_quotas
@memoized
def tenant_quota_usages(request):
# Get our quotas and construct our usage object.
disabled_quotas = get_disabled_quotas(request)
usages = QuotaUsage()
for quota in get_tenant_quota_data(request,
disabled_quotas=disabled_quotas):
usages.add_quota(quota)
# Get our usages.
floating_ips = []
try:
if network.floating_ip_supported(request):
floating_ips = network.tenant_floating_ip_list(request)
except Exception:
pass
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
instances, has_more = nova.server_list(request)
# Fetch deleted flavors if necessary.
missing_flavors = [instance.flavor['id'] for instance in instances
if instance.flavor['id'] not in flavors]
for missing in missing_flavors:
if missing not in flavors:
try:
flavors[missing] = nova.flavor_get(request, missing)
except Exception:
flavors[missing] = {}
exceptions.handle(request, ignore=True)
usages.tally('instances', len(instances))
usages.tally('floating_ips', len(floating_ips))
if 'security_group' not in disabled_quotas:
security_groups = []
security_groups = network.security_group_list(request)
usages.tally('security_groups', len(security_groups))
if 'network' not in disabled_quotas:
networks = []
networks = neutron.network_list(request, shared=False)
usages.tally('networks', len(networks))
if 'router' not in disabled_quotas:
routers = []
routers = neutron.router_list(request)
usages.tally('routers', len(routers))
if 'volumes' not in disabled_quotas:
volumes = cinder.volume_list(request)
snapshots = cinder.volume_snapshot_list(request)
usages.tally('gigabytes', sum([int(v.size) for v in volumes]))
usages.tally('volumes', len(volumes))
usages.tally('snapshots', len(snapshots))
# Sum our usage based on the flavors of the instances.
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
usages.tally('cores', getattr(flavor, 'vcpus', None))
usages.tally('ram', getattr(flavor, 'ram', None))
# Initialise the tally if no instances have been launched yet
if len(instances) == 0:
usages.tally('cores', 0)
usages.tally('ram', 0)
return usages
def tenant_limit_usages(request):
# TODO(licostan): This method shall be removed from Quota module.
# ProjectUsage/BaseUsage maybe used instead on volume/image dashboards.
limits = {}
try:
limits.update(nova.tenant_absolute_limits(request))
except Exception:
msg = _("Unable to retrieve compute limit information.")
exceptions.handle(request, msg)
if base.is_service_enabled(request, 'volume'):
try:
limits.update(cinder.tenant_absolute_limits(request))
volumes = cinder.volume_list(request)
snapshots = cinder.volume_snapshot_list(request)
total_size = sum([getattr(volume, 'size', 0) for volume
in volumes])
limits['gigabytesUsed'] = total_size
limits['volumesUsed'] = len(volumes)
limits['snapshotsUsed'] = len(snapshots)
except Exception:
msg = _("Unable to retrieve volume limit information.")
exceptions.handle(request, msg)
return limits
| apache-2.0 | 8,296,709,320,381,819,000 | 36.255882 | 79 | 0.605589 | false |
403JFW/akatsuki | tests/exporter.py | 1 | 2449 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from akatsuki import exporter
class TestExporter(unittest.TestCase):
def test_entry_html(self):
entry_dict = {
'keyword': 'keyword1, keyword2',
'title': 'An amazing title',
'journal': 'KU Journal',
'author': 'John Appleseed',
'abstract': 'This is an abstract.',
'month': 'Jan',
'volume': '12',
'number': '32',
'comments': 'A comment',
'year': '2014',
'id': 'ku2014',
'type': 'article',
'pages': '121--123',
'URL': 'http://www.google.com/'}
entry_text = "%s<br>\n" % entry_dict['author']
entry_text += "%s<br>\n" % entry_dict['title']
entry_text += "%s %s %s;%s(%s):%s.<br>\n" % (
entry_dict['journal'],
entry_dict['year'],
entry_dict['month'],
entry_dict['volume'],
entry_dict['number'],
entry_dict['pages'])
if 'URL' in entry_dict:
link_text = '<a href="{0:s}">{0:s}</a><br>\n'
entry_text += link_text.format(entry_dict['URL'])
self.assertEqual(entry_text, exporter._entry_html(entry_dict))
def test_entry_wordpress(self):
entry_dict = {
'keyword': 'keyword1, keyword2',
'title': 'An amazing title',
'journal': 'KU Journal',
'author': 'John Appleseed',
'abstract': 'This is an abstract.',
'month': 'Jan',
'volume': '12',
'number': '32',
'comments': 'A comment',
'year': '2014',
'id': 'ku2014',
'type': 'article',
'pages': '121--123',
'URL': 'http://www.google.com/'}
entry_text = "%s\n" % entry_dict['author']
entry_text += "%s\n" % entry_dict['title']
entry_text += "%s %s %s;%s(%s):%s.\n" % (
entry_dict['journal'],
entry_dict['year'],
entry_dict['month'],
entry_dict['volume'],
entry_dict['number'],
entry_dict['pages'])
if 'URL' in entry_dict:
link_text = '<a href="{0:s}">{0:s}</a>\n'
entry_text += link_text.format(entry_dict['URL'])
self.assertEqual(entry_text, exporter._entry_wordpress(entry_dict))
| mit | 3,969,742,949,562,519,600 | 33.985714 | 75 | 0.469171 | false |
ioram7/keystone-federado-pgid2013 | build/sqlalchemy/examples/declarative_reflection/__init__.py | 1 | 1502 | """
Illustrates how to mix table reflection with Declarative, such that
the reflection process itself can take place **after** all classes
are defined. Declarative classes can also override column
definitions loaded from the database.
At the core of this example is the ability to change how Declarative
assigns mappings to classes. The ``__mapper_cls__`` special attribute
is overridden to provide a function that gathers mapping requirements
as they are established, without actually creating the mapping.
Then, a second class-level method ``prepare()`` is used to iterate
through all mapping configurations collected, reflect the tables
named within and generate the actual mappers.
.. versionadded:: 0.7.5
This new example makes usage of the new
``autoload_replace`` flag on :class:`.Table` to allow declared
classes to override reflected columns.
Usage example::
Base = declarative_base(cls=DeclarativeReflectedBase)
class Foo(Base):
__tablename__ = 'foo'
bars = relationship("Bar")
class Bar(Base):
__tablename__ = 'bar'
# illustrate overriding of "bar.foo_id" to have
# a foreign key constraint otherwise not
# reflected, such as when using MySQL
foo_id = Column(Integer, ForeignKey('foo.id'))
Base.prepare(e)
s = Session(e)
s.add_all([
Foo(bars=[Bar(data='b1'), Bar(data='b2')], data='f1'),
Foo(bars=[Bar(data='b3'), Bar(data='b4')], data='f2')
])
s.commit()
"""
| apache-2.0 | 6,828,502,744,824,415,000 | 30.957447 | 71 | 0.688415 | false |
shivajimedida/SU2 | SU2_PY/SU2/io/data.py | 2 | 16398 | #!/usr/bin/env python
## \file data.py
# \brief python package for data utility functions
# \author T. Lukaczyk, F. Palacios
# \version 4.0.0 "Cardinal"
#
# SU2 Lead Developers: Dr. Francisco Palacios ([email protected]).
# Dr. Thomas D. Economon ([email protected]).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
#
# Copyright (C) 2012-2015 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
import cPickle as pickle
from filelock import filelock
# -------------------------------------------------------------------
# Load a Dictionary of Data
# -------------------------------------------------------------------
def load_data( file_name, var_names=None ,
file_format = 'infer' ,
core_name = 'python_data' ):
""" data = load_data( file_name, var_names=None ,
file_format = 'infer' ,
core_name = 'python_data' )
loads dictionary of data from python pickle or matlab struct
Inputs:
file_name - data file name
var_names - variable names to read
file_format - 'infer', 'pickle', or 'matlab'
core_name - data is stored under a dictionary with this name
default looks for variable 'python_data' in file_name
file_format = pickle, will return any python object
file_format = matlab, will return strings or float lists and
requires scipy.io.loadmat
file_format = infer (default), will infer format from extention
('.mat','.pkl')
"""
try:
import scipy.io
scipy_loaded = True
except ImportError:
scipy_loaded = False
if not os.path.exists(file_name):
raise Exception , 'File does not exist: %s' % file_name
# process file format
if file_format == 'infer':
if os.path.splitext(file_name)[1] == '.mat':
file_format = 'matlab'
elif os.path.splitext(file_name)[1] == '.pkl':
file_format = 'pickle'
assert file_format in ['matlab','pickle'] , 'unsupported file format'
# get filelock
with filelock(file_name):
# LOAD MATLAB
if file_format == 'matlab' and scipy_loaded:
input_data = scipy.io.loadmat( file_name = file_name ,
squeeze_me = False ,
chars_as_strings = True ,
struct_as_record = True )
# pull core variable
assert input_data.has_key(core_name) , 'core data not found'
input_data = input_data[core_name]
# convert recarray to dictionary
input_data = rec2dict(input_data)
# LOAD PICKLE
elif file_format == 'pickle':
input_data = load_pickle(file_name)
# pull core variable
assert input_data.has_key(core_name) , 'core data not found'
input_data = input_data[core_name]
#: if file_format
#: with filelock
# load specified varname into dictionary
if var_names != None:
# check for one item name array
if isinstance(var_names,str):
var_names = [var_names,]
for key in input_data.keys():
if not key in var_names:
del input_data[key]
#: for key
#: if var_names
return input_data
#: def load()
# -------------------------------------------------------------------
# Save a Dictionary of Data
# -------------------------------------------------------------------
def save_data( file_name, data_dict, append=False ,
file_format = 'infer' ,
core_name='python_data' ):
""" save_data( file_name, data_dict, append=False ,
file_format = 'infer' ,
core_name='python_data' ):
Inputs:
file_name - data file name
data_dict - a dictionary or bunch to write
append - True/False to append existing data
file_format - 'infer', 'pickle', or 'matlab'
core_name - data is stored under a dictionary with this name
file_format = pickle, will save any pickleable python object
file_format = matlab, will save strings or float lists and
requires scipy.io.loadmat
file_format = infer (default), will infer format from extention
('.mat','.pkl')
matlab format saves data file from matlab 5 and later
will save nested dictionaries into nested matlab structures
cannot save classes and modules
uses scipy.io.loadmat
"""
try:
import scipy.io
scipy_loaded = True
except ImportError:
scipy_loaded = False
# process file format
if file_format == 'infer':
if os.path.splitext(file_name)[1] == '.mat':
file_format = 'matlab'
elif os.path.splitext(file_name)[1] == '.pkl':
file_format = 'pickle'
assert file_format in ['matlab','pickle'] , 'unsupported file format'
# get filelock
with filelock(file_name):
# if appending needed
# TODO: don't overwrite other core_names
if append == True and os.path.exists(file_name):
# check file exists
if not os.path.exists(file_name):
raise Exception , 'Cannot append, file does not exist: %s' % file_name
# load old data
data_dict_old = load( file_name = file_name ,
var_names = None ,
file_format = file_format ,
core_name = core_name )
# check for keys not in new data
for key,value in data_dict_old.iteritems():
if not data_dict.has_key(key):
data_dict[key] = value
#: for each dict item
#: if append
# save to core name
data_dict = {core_name : data_dict}
# SAVE MATLAB
if file_format == 'matlab':
# bunch it
data_dict = mat_bunch(data_dict)
# save it
scipy.io.savemat( file_name = file_name ,
mdict = data_dict,
format = '5', # matlab 5 .mat format
oned_as = 'column' )
elif file_format == 'pickle':
# save it
save_pickle(file_name,data_dict)
#: if file_format
#: with filelock
return
#: def save()
# -------------------------------------------------------------------
# Load Pickle
# -------------------------------------------------------------------
def load_pickle(file_name):
""" data = load_pickle(file_name)
loads a pickle with core_data dictionaries
assumes first entry is a list of all following data names
returns dictionary of data
"""
pkl_file = open(file_name,'rb')
#names = safe_unpickle.loadf(pkl_file)
names = pickle.load(pkl_file)
data_dict = dict.fromkeys(names,[])
for key in names:
#data_dict[key] = safe_unpickle.loadf(pkl_file)
data_dict[key] = pickle.load(pkl_file)
pkl_file.close()
return data_dict
#: def load_pickle()
# -------------------------------------------------------------------
# Save Pickle
# -------------------------------------------------------------------
def save_pickle(file_name,data_dict):
""" save_pickle(file_name,data_dict)
saves a core data dictionary
first pickle entry is a list of all following data names
"""
pkl_file = open(file_name,'wb')
names = data_dict.keys()
pickle.dump(names,pkl_file)
for key in names:
pickle.dump(data_dict[key],pkl_file)
pkl_file.close()
#: def save_pickle()
# -------------------------------------------------------------------
# Safe UnPickle
# -------------------------------------------------------------------
#class safe_unpickle(pickle.Unpickler):
#''' adds some safety to unpickling
#checks that only supported classes are loaded
#original source from http://nadiana.com/python-pickle-insecure#comment-144
#'''
## modules : classes considered safe
#PICKLE_SAFE = {
#'copy_reg' : ['_reconstructor'] ,
#'__builtin__' : ['object'] ,
#'numpy' : ['dtype','ndarray'] ,
#'numpy.core.multiarray' : ['scalar','_reconstruct'] ,
#'collections' : ['OrderedDict'] ,
#'SU2.io.state' : ['State'] , # SU2 Specific
#'SU2.io.config' : ['Config'] ,
#'SU2.eval.design' : ['Design'] ,
#'SU2.opt.project' : ['Project'] ,
#'SU2.util.ordered_bunch' : ['OrderedBunch'] ,
#'SU2.util.bunch' : ['Bunch'] ,
#'tasks_general' : ['General_Task'] ,
#'tasks_project' : ['Project','Job'] ,
#'tasks_su2' : ['Decomp','Deform','Direct','Cont_Adjoint',
#'Multiple_Cont_Adjoint','Finite_Diff','Adapt'] ,
#}
## make sets
#for key in PICKLE_SAFE.keys():
#PICKLE_SAFE[key] = set(PICKLE_SAFE[key])
## check for save module/class
#def find_class(self, module, name):
#if not module in self.PICKLE_SAFE:
#raise pickle.UnpicklingError(
#'Attempting to unpickle unsafe module %s' % module
#)
#__import__(module)
#mod = sys.modules[module]
#if not name in self.PICKLE_SAFE[module]:
#raise pickle.UnpicklingError(
#'Attempting to unpickle unsafe class %s' % name
#)
#klass = getattr(mod, name)
#return klass
## extend the load() and loads() methods
#@classmethod
#def loadf(self, pickle_file): # loads a file like pickle.load()
#return self(pickle_file).load()
#@classmethod
#def loads(self, pickle_string): #loads a string like pickle.loads()
#return self(StringIO.StringIO(pickle_string)).load()
# -------------------------------------------------------------------
# Convert Record Array to Dictionary
# -------------------------------------------------------------------
def rec2dict(array_in):
""" converts numpy record array to dictionary of lists
needed for loading matlab data
assumes array comes from scipy.io.loadmat, with
squeeze_me = False and struct_as_record = True
"""
import numpy
assert isinstance(array_in,numpy.ndarray) , 'input must be a numpy record array'
# make sure it's not an object array
if array_in.dtype == numpy.dtype('object'):
array_in = array_in.tolist()
# get record keys/names
keys = array_in.dtype.names
# start output dictionary
dataout = dict.fromkeys(keys,[])
for key in keys:
# squeeze_me option puts all items in a two-dim array
value = array_in[key].tolist()[0][0]
# convert string
if isinstance(value[0],unicode):
value = str(value[0])
# convert array
elif isinstance(value,numpy.ndarray):
# check for another struct level
if value.dtype.names == None:
value = value.tolist()
# telescoping
else:
value = rec2dict(value)
# store value
dataout[key] = value
return dataout
#: def rec2dict()
# -------------------------------------------------------------------
# Flatten a List
# -------------------------------------------------------------------
def flatten_list(input_list):
''' flatten an irregular list of lists of any depth
'''
output_list = []
for value in input_list:
if isinstance(value,list):
output_list.extend( flatten_list(value) ) # telescope
else:
output_list.append(value)
return output_list
#: def flatten_list()
# -------------------------------------------------------------------
# Append Lists in a Nested Dictionary
# -------------------------------------------------------------------
def append_nestdict(base_dict,add_dict):
""" append_nestdict(base_dict,add_dict)
appends base_dict with add_dict, allowing for
updating nested dictionaries
will update base_dict in place
"""
# break pointer
add_dict = copy.deepcopy(add_dict)
# append add_dict keys
for key in add_dict.keys():
# ensure base_dict key exists and is a list
if not base_dict.has_key(key):
if isinstance( add_dict[key] , dict ):
base_dict[key] = {}
else:
base_dict[key] = []
elif not ( isinstance( base_dict[key] , list )
or isinstance( base_dict[key] , dict ) ):
assert not isinstance( add_dict[key] , dict ) , 'base[key] is not a dictionary while add[key] is'
base_dict[key] = [base_dict[key]]
# append list or telescope
if isinstance( base_dict[key] , dict ):
append_nestdict(base_dict[key],add_dict[key]) # telescope
else:
base_dict[key].append(add_dict[key])
#: for add_dict[key]
# base_dict will be updated through its pointer
return
#: def append_nestdict()
# -------------------------------------------------------------------
# Matlab Bunch Class
# -------------------------------------------------------------------
class mat_bunch:
""" replicates dictionary functionality with class dot structure
for output of dictionaries to matlab
"""
def __init__(self, d):
for k, v in d.items():
if isinstance(v, dict):
if len(v): v = mat_bunch(v)
else: v = []
self.__dict__[k] = v
def __dict__(self):
return self.__dict__
# items
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
# dictionary get/set/etc
def __getitem__(self,k):
return self.__dict__[k]
def __setitem__(self,k,v):
self.__dict__[k] = v
def __delitem__(self,k):
del self.__dict__[k]
def __str__(self):
print_format = '%s: %s'
state = []
for k,v in self.__dict__.items():
if isinstance(v,mat_bunch):
v = '%i-item mat_bunch' % len(v.items())
state.append(print_format % (k,v) )
return '\n'.join(state)
#: class mat_bunch
| lgpl-2.1 | -8,448,012,229,911,935,000 | 32.950311 | 109 | 0.496585 | false |
Techbikers/techbikers | server/riders/serializers.py | 1 | 4229 | import hashlib
from django.contrib.auth.models import User
from rest_framework import serializers
from server.rides.models import RideRiders
from server.fundraisers.serializers import FundraiserSerializer
class RiderSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='get_full_name', read_only=True)
avatar = serializers.SerializerMethodField(method_name='get_gravatar_url', read_only=True)
company = serializers.CharField(source='profile.company', required=False, allow_blank=True)
website = serializers.URLField(source='profile.website', required=False, allow_blank=True)
twitter = serializers.CharField(source='profile.twitter', required=False, allow_blank=True)
biography = serializers.CharField(source='profile.biography', required=False, allow_blank=True)
statement = serializers.CharField(source='profile.statement', required=False, allow_blank=True)
donation_page = serializers.URLField(source='profile.donation_page', read_only=True)
rides = serializers.SerializerMethodField(source='get_rides', read_only=True)
fundraisers = FundraiserSerializer(source='fundraiser_set', many=True, read_only=True)
def validate_email(self, value):
if User.objects.filter(email__iexact=value):
raise serializers.ValidationError("This email address is already in use.")
return value
def get_gravatar_url(self, rider):
return "https://www.gravatar.com/avatar/" + hashlib.md5(rider.email.lower()).hexdigest()
def get_rides(self, rider):
return rider.ride_set.filter(rideriders__status=RideRiders.REGISTERED).values_list('id', flat=True)
def to_representation(self, instance):
# Instantiate the superclass normally
data = super(RiderSerializer, self).to_representation(instance)
# Only include the email field if the user is looking up their own record
request = self.context.get('request', None)
if not request.user.is_authenticated() or (data.get('id', 0) != request.user.id):
data.pop("email")
return data
def create(self, validated_data):
email = validated_data.get("email", None)
password = validated_data.get("password", None)
profile = validated_data.get("profile", {})
# Create the new user
new_user = User.objects.create_user(email, email, password)
new_user.first_name = validated_data.get("first_name", None)
new_user.last_name = validated_data.get("last_name", None)
new_user.save()
new_user.profile.company = profile.get("company", None)
new_user.profile.website = profile.get("website", None)
new_user.profile.twitter = profile.get("twitter", None)
new_user.profile.biography = profile.get("biography", None)
new_user.profile.statement = profile.get("statement", None)
new_user.profile.save()
return new_user
def update(self, instance, validated_data):
email = validated_data.get("email", instance.email)
profile = validated_data.get("profile", {})
instance.username = email
instance.email = email
instance.first_name = validated_data.get("first_name", instance.first_name)
instance.last_name = validated_data.get("last_name", instance.last_name)
instance.save()
instance.profile.company = profile.get("company", instance.profile.company)
instance.profile.website = profile.get("website", instance.profile.website)
instance.profile.twitter = profile.get("twitter", instance.profile.twitter)
instance.profile.biography = profile.get("biography", instance.profile.biography)
instance.profile.statement = profile.get("statement", instance.profile.statement)
instance.profile.save()
return instance
class Meta:
model = User
fields = ('id', 'email', 'password', 'name', 'first_name', 'last_name', 'avatar', 'company',
'website', 'twitter', 'biography', 'statement', 'donation_page', 'rides', 'fundraisers')
read_only_fields = ('id', 'donation_page', 'fundraisers')
extra_kwargs = {'password': { 'required': False, 'write_only': True }}
| mit | -6,279,959,711,486,975,000 | 48.752941 | 107 | 0.686687 | false |
egabancho/invenio | invenio/legacy/refextract/kbs.py | 3 | 32307 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import re
import six
import sys
import csv
from six import iteritems
from invenio.legacy.refextract.config import CFG_REFEXTRACT_KBS
from invenio.modules.knowledge.api import get_kbr_items
from invenio.config import CFG_REFEXTRACT_KBS_OVERRIDE
from invenio.legacy.refextract.regexs import re_kb_line, \
re_regexp_character_class, \
re_report_num_chars_to_escape, \
re_extract_quoted_text, \
re_extract_char_class, \
re_punctuation
from invenio.legacy.docextract.utils import write_message
from invenio.legacy.docextract.text import re_group_captured_multiple_space
from invenio.utils.hash import md5
from invenio.legacy.search_engine import get_collection_reclist
from invenio.legacy.search_engine.utils import get_fieldvalues
def get_kbs(custom_kbs_files=None, cache={}):
"""Load kbs (with caching)
This function stores the loaded kbs into the cache variable
For the caching to work, it needs to receive an empty dictionary
as "cache" paramater.
"""
cache_key = make_cache_key(custom_kbs_files)
if cache_key not in cache:
# Build paths from defaults and specified ones
kbs_files = CFG_REFEXTRACT_KBS.copy()
for key, path in CFG_REFEXTRACT_KBS_OVERRIDE.items():
kbs_files[key] = path
if custom_kbs_files:
for key, path in custom_kbs_files.items():
if path:
kbs_files[key] = path
# Loads kbs from those paths
cache[cache_key] = load_kbs(kbs_files)
return cache[cache_key]
def load_kbs(kbs_files):
"""Load kbs (without caching)
Args:
- kb_files: list of custom paths you can specify to override the
default values
If path starts with "kb:", the kb will be loaded from the database
"""
return {
'journals_re': build_journals_re_kb(kbs_files['journals-re']),
'journals': load_kb(kbs_files['journals'], build_journals_kb),
'report-numbers': build_reportnum_kb(kbs_files['report-numbers']),
'authors': build_authors_kb(kbs_files['authors']),
'books': build_books_kb(kbs_files['books']),
'publishers': load_kb(kbs_files['publishers'], build_publishers_kb),
'special_journals': build_special_journals_kb(kbs_files['special-journals']),
'collaborations': load_kb(kbs_files['collaborations'], build_collaborations_kb),
}
def load_kb(path, builder):
try:
path.startswith
except AttributeError:
write_message("Loading kb from array", verbose=3)
return load_kb_from_iterable(path, builder)
else:
write_message("Loading kb from %s" % path, verbose=3)
kb_start = 'kb:'
records_start = 'records:'
if path.startswith(kb_start):
return load_kb_from_db(path[len(kb_start):], builder)
elif path.startswith(records_start):
return load_kb_from_records(path[len(kb_start):], builder)
else:
return load_kb_from_file(path, builder)
def make_cache_key(custom_kbs_files=None):
"""Create cache key for kbs caches instances
This function generates a unique key for a given set of arguments.
The files dictionary is transformed like this:
{'journal': '/var/journal.kb', 'books': '/var/books.kb'}
to
"journal=/var/journal.kb;books=/var/books.kb"
Then _inspire is appended if we are an INSPIRE site.
"""
if custom_kbs_files:
serialized_args = ('%s=%s' % v for v in iteritems(custom_kbs_files))
serialized_args = ';'.join(serialized_args)
else:
serialized_args = "default"
cache_key = md5(serialized_args).digest()
return cache_key
def order_reportnum_patterns_bylen(numeration_patterns):
"""Given a list of user-defined patterns for recognising the numeration
styles of an institute's preprint references, for each pattern,
strip out character classes and record the length of the pattern.
Then add the length and the original pattern (in a tuple) into a new
list for these patterns and return this list.
@param numeration_patterns: (list) of strings, whereby each string is
a numeration pattern.
@return: (list) of tuples, where each tuple contains a pattern and
its length.
"""
def _compfunc_bylen(a, b):
"""Compares regexp patterns by the length of the pattern-text.
"""
if a[0] < b[0]:
return 1
elif a[0] == b[0]:
return 0
else:
return -1
pattern_list = []
for pattern in numeration_patterns:
base_pattern = re_regexp_character_class.sub('1', pattern)
pattern_list.append((len(base_pattern), pattern))
pattern_list.sort(_compfunc_bylen)
return pattern_list
def create_institute_numeration_group_regexp_pattern(patterns):
"""Using a list of regexp patterns for recognising numeration patterns
for institute preprint references, ordered by length - longest to
shortest - create a grouped 'OR' or of these patterns, ready to be
used in a bigger regexp.
@param patterns: (list) of strings. All of the numeration regexp
patterns for recognising an institute's preprint reference styles.
@return: (string) a grouped 'OR' regexp pattern of the numeration
patterns. E.g.:
(?P<num>[12]\d{3} \d\d\d|\d\d \d\d\d|[A-Za-z] \d\d\d)
"""
patterns_list = [institute_num_pattern_to_regex(p[1]) for p in patterns]
grouped_numeration_pattern = u"(?P<numn>%s)" % u'|'.join(patterns_list)
return grouped_numeration_pattern
def institute_num_pattern_to_regex(pattern):
"""Given a numeration pattern from the institutes preprint report
numbers KB, convert it to turn it into a regexp string for
recognising such patterns in a reference line.
Change:
\ -> \\
9 -> \d
a -> [A-Za-z]
v -> [Vv] # Tony for arXiv vN
mm -> (0[1-9]|1[0-2])
yy -> \d{2}
yyyy -> [12]\d{3}
/ -> \/
s -> \s*
@param pattern: (string) a user-defined preprint reference numeration
pattern.
@return: (string) the regexp for recognising the pattern.
"""
simple_replacements = [
('9', r'\d'),
('9+', r'\d+'),
('w+', r'\w+'),
('a', r'[A-Za-z]'),
('v', r'[Vv]'),
('mm', r'(0[1-9]|1[0-2])'),
('yyyy', r'[12]\d{3}'),
('yy', r'\d\d'),
('s', r'\s*'),
(r'/', r'\/')]
# first, escape certain characters that could be sensitive to a regexp:
pattern = re_report_num_chars_to_escape.sub(r'\\\g<1>', pattern)
# now loop through and carry out the simple replacements:
for repl in simple_replacements:
pattern = pattern.replace(repl[0], repl[1])
# now replace a couple of regexp-like paterns:
# quoted string with non-quoted version ("hello" with hello);
# Replace / [abcd ]/ with /( [abcd])?/ :
pattern = re_extract_quoted_text[0].sub(re_extract_quoted_text[1],
pattern)
pattern = re_extract_char_class[0].sub(re_extract_char_class[1],
pattern)
# the pattern has been transformed
return pattern
def build_reportnum_kb(fpath):
"""Given the path to a knowledge base file containing the details
of institutes and the patterns that their preprint report
numbering schemes take, create a dictionary of regexp search
patterns to recognise these preprint references in reference
lines, and a dictionary of replacements for non-standard preprint
categories in these references.
The knowledge base file should consist only of lines that take one
of the following 3 formats:
#####Institute Name####
(the name of the institute to which the preprint reference patterns
belong, e.g. '#####LANL#####', surrounded by 5 # on either side.)
<pattern>
(numeration patterns for an institute's preprints, surrounded by
< and >.)
seek-term --- replace-term
(i.e. a seek phrase on the left hand side, a replace phrase on the
right hand side, with the two phrases being separated by 3 hyphens.)
E.g.:
ASTRO PH ---astro-ph
The left-hand side term is a non-standard version of the preprint
reference category; the right-hand side term is the standard version.
If the KB file cannot be read from, or an unexpected line is
encountered in the KB, an error message is output to standard error
and execution is halted with an error-code 0.
@param fpath: (string) the path to the knowledge base file.
@return: (tuple) containing 2 dictionaries. The first contains regexp
search patterns used to identify preprint references in a line. This
dictionary is keyed by a tuple containing the line number of the
pattern in the KB and the non-standard category string.
E.g.: (3, 'ASTRO PH').
The second dictionary contains the standardised category string,
and is keyed by the non-standard category string. E.g.: 'astro-ph'.
"""
def _add_institute_preprint_patterns(preprint_classifications,
preprint_numeration_ptns,
preprint_reference_search_regexp_patterns,
standardised_preprint_reference_categories,
kb_line_num):
"""For a list of preprint category strings and preprint numeration
patterns for a given institute, create the regexp patterns for
each of the preprint types. Add the regexp patterns to the
dictionary of search patterns
(preprint_reference_search_regexp_patterns), keyed by the line
number of the institute in the KB, and the preprint category
search string. Also add the standardised preprint category string
to another dictionary, keyed by the line number of its position
in the KB and its non-standardised version.
@param preprint_classifications: (list) of tuples whereby each tuple
contains a preprint category search string and the line number of
the name of institute to which it belongs in the KB.
E.g.: (45, 'ASTRO PH').
@param preprint_numeration_ptns: (list) of preprint reference
numeration search patterns (strings)
@param preprint_reference_search_regexp_patterns: (dictionary) of
regexp patterns used to search in document lines.
@param standardised_preprint_reference_categories: (dictionary)
containing the standardised strings for preprint reference
categories. (E.g. 'astro-ph'.)
@param kb_line_num: (integer) - the line number int the KB at
which a given institute name was found.
@return: None
"""
if preprint_classifications and preprint_numeration_ptns:
# the previous institute had both numeration styles and categories
# for preprint references.
# build regexps and add them for this institute:
# First, order the numeration styles by line-length, and build a
# grouped regexp for recognising numeration:
ordered_patterns = \
order_reportnum_patterns_bylen(preprint_numeration_ptns)
# create a grouped regexp for numeration part of
# preprint reference:
numeration_regexp = \
create_institute_numeration_group_regexp_pattern(ordered_patterns)
# for each "classification" part of preprint references, create a
# complete regex:
# will be in the style "(categ)-(numatn1|numatn2|numatn3|...)"
for classification in preprint_classifications:
search_pattern_str = ur'(?:^|[^a-zA-Z0-9\/\.\-])([\[\(]?(?P<categ>' \
+ classification[0].strip() + u')' \
+ numeration_regexp + u'[\]\)]?)'
re_search_pattern = re.compile(search_pattern_str,
re.UNICODE)
preprint_reference_search_regexp_patterns[(kb_line_num,
classification[0])] =\
re_search_pattern
standardised_preprint_reference_categories[(kb_line_num,
classification[0])] =\
classification[1]
preprint_reference_search_regexp_patterns = {} # a dictionary of patterns
# used to recognise
# categories of preprints
# as used by various
# institutes
standardised_preprint_reference_categories = {} # dictionary of
# standardised category
# strings for preprint cats
current_institute_preprint_classifications = [] # list of tuples containing
# preprint categories in
# their raw & standardised
# forms, as read from KB
current_institute_numerations = [] # list of preprint
# numeration patterns, as
# read from the KB
# pattern to recognise an institute name line in the KB
re_institute_name = re.compile(ur'^\*{5}\s*(.+)\s*\*{5}$', re.UNICODE)
# pattern to recognise an institute preprint categ line in the KB
re_preprint_classification = \
re.compile(ur'^\s*(\w.*)\s*---\s*(\w.*)\s*$', re.UNICODE)
# pattern to recognise a preprint numeration-style line in KB
re_numeration_pattern = re.compile(ur'^\<(.+)\>$', re.UNICODE)
kb_line_num = 0 # when making the dictionary of patterns, which is
# keyed by the category search string, this counter
# will ensure that patterns in the dictionary are not
# overwritten if 2 institutes have the same category
# styles.
try:
if isinstance(fpath, six.string_types):
write_message('Loading reports kb from %s' % fpath, verbose=3)
fh = open(fpath, "r")
fpath_needs_closing = True
else:
fpath_needs_closing = False
fh = fpath
for rawline in fh:
if rawline.startswith('#'):
continue
kb_line_num += 1
try:
rawline = rawline.decode("utf-8")
except UnicodeError:
write_message("*** Unicode problems in %s for line %e"
% (fpath, kb_line_num), sys.stderr, verbose=0)
raise UnicodeError("Error: Unable to parse report number kb (line: %s)" % str(kb_line_num))
m_institute_name = re_institute_name.search(rawline)
if m_institute_name:
# This KB line is the name of an institute
# append the last institute's pattern list to the list of
# institutes:
_add_institute_preprint_patterns(current_institute_preprint_classifications,
current_institute_numerations,
preprint_reference_search_regexp_patterns,
standardised_preprint_reference_categories,
kb_line_num)
# Now start a new dictionary to contain the search patterns
# for this institute:
current_institute_preprint_classifications = []
current_institute_numerations = []
# move on to the next line
continue
m_preprint_classification = \
re_preprint_classification.search(rawline)
if m_preprint_classification:
# This KB line contains a preprint classification for
# the current institute
try:
current_institute_preprint_classifications.append((m_preprint_classification.group(1),
m_preprint_classification.group(2)))
except (AttributeError, NameError):
# didn't match this line correctly - skip it
pass
# move on to the next line
continue
m_numeration_pattern = re_numeration_pattern.search(rawline)
if m_numeration_pattern:
# This KB line contains a preprint item numeration pattern
# for the current institute
try:
current_institute_numerations.append(m_numeration_pattern.group(1))
except (AttributeError, NameError):
# didn't match the numeration pattern correctly - skip it
pass
continue
_add_institute_preprint_patterns(current_institute_preprint_classifications,
current_institute_numerations,
preprint_reference_search_regexp_patterns,
standardised_preprint_reference_categories,
kb_line_num)
if fpath_needs_closing:
write_message('Loaded reports kb', verbose=3)
fh.close()
except IOError:
# problem opening KB for reading, or problem while reading from it:
emsg = """Error: Could not build knowledge base containing """ \
"""institute preprint referencing patterns - failed """ \
"""to read from KB %(kb)s.""" \
% {'kb' : fpath}
write_message(emsg, sys.stderr, verbose=0)
raise IOError("Error: Unable to open report number kb '%s'" % fpath)
# return the preprint reference patterns and the replacement strings
# for non-standard categ-strings:
return (preprint_reference_search_regexp_patterns,
standardised_preprint_reference_categories)
def _cmp_bystrlen_reverse(a, b):
"""A private "cmp" function to be used by the "sort" function of a
list when ordering the titles found in a knowledge base by string-
length - LONGEST -> SHORTEST.
@param a: (string)
@param b: (string)
@return: (integer) - 0 if len(a) == len(b); 1 if len(a) < len(b);
-1 if len(a) > len(b);
"""
if len(a) > len(b):
return -1
elif len(a) < len(b):
return 1
else:
return 0
def build_special_journals_kb(fpath):
"""Load special journals database from file
Special journals are journals that have a volume which is not unique
among different years. To keep the volume unique we are adding the year
before the volume.
"""
journals = set()
write_message('Loading special journals kb from %s' % fpath, verbose=3)
fh = open(fpath, "r")
try:
for line in fh:
# Skip commented lines
if line.startswith('#'):
continue
# Skip empty line
if not line.strip():
continue
journals.add(line.strip())
finally:
fh.close()
write_message('Loaded special journals kb', verbose=3)
return journals
def build_books_kb(fpath):
if isinstance(fpath, six.string_types):
fpath_needs_closing = True
try:
write_message('Loading books kb from %s' % fpath, verbose=3)
fh = open(fpath, "r")
source = csv.reader(fh, delimiter='|', lineterminator=';')
except IOError:
# problem opening KB for reading, or problem while reading from it:
emsg = "Error: Could not build list of books - failed " \
"to read from KB %(kb)s." % {'kb' : fpath}
raise IOError(emsg)
else:
fpath_needs_closing = False
source = fpath
try:
books = {}
for line in source:
try:
books[line[1].upper()] = line
except IndexError:
write_message('Invalid line in books kb %s' % line, verbose=1)
finally:
if fpath_needs_closing:
fh.close()
write_message('Loaded books kb', verbose=3)
return books
def build_publishers_kb(fpath):
if isinstance(fpath, six.string_types):
fpath_needs_closing = True
try:
write_message('Loading publishers kb from %s' % fpath, verbose=3)
fh = open(fpath, "r")
source = csv.reader(fh, delimiter='|', lineterminator='\n')
except IOError:
# problem opening KB for reading, or problem while reading from it:
emsg = "Error: Could not build list of publishers - failed " \
"to read from KB %(kb)s." % {'kb' : fpath}
raise IOError(emsg)
else:
fpath_needs_closing = False
source = fpath
try:
publishers = {}
for line in source:
try:
pattern = re.compile(ur'(\b|^)%s(\b|$)' % line[0], re.I|re.U)
publishers[line[0]] = {'pattern': pattern, 'repl': line[1]}
except IndexError:
write_message('Invalid line in books kb %s' % line, verbose=1)
finally:
if fpath_needs_closing:
fh.close()
write_message('Loaded publishers kb', verbose=3)
return publishers
def build_authors_kb(fpath):
replacements = []
if isinstance(fpath, six.string_types):
fpath_needs_closing = True
try:
fh = open(fpath, "r")
except IOError:
# problem opening KB for reading, or problem while reading from it:
emsg = "Error: Could not build list of authors - failed " \
"to read from KB %(kb)s." % {'kb' : fpath}
write_message(emsg, sys.stderr, verbose=0)
raise IOError("Error: Unable to open authors kb '%s'" % fpath)
else:
fpath_needs_closing = False
fh = fpath
try:
for rawline in fh:
if rawline.startswith('#'):
continue
# Extract the seek->replace terms from this KB line:
m_kb_line = re_kb_line.search(rawline.decode('utf-8'))
if m_kb_line:
seek = m_kb_line.group('seek')
repl = m_kb_line.group('repl')
replacements.append((seek, repl))
finally:
if fpath_needs_closing:
fh.close()
return replacements
def build_journals_re_kb(fpath):
"""Load journals regexps knowledge base
@see build_journals_kb
"""
def make_tuple(match):
regexp = match.group('seek')
repl = match.group('repl')
return regexp, repl
kb = []
if isinstance(fpath, six.string_types):
fpath_needs_closing = True
try:
fh = open(fpath, "r")
except IOError:
raise IOError("Error: Unable to open journal kb '%s'" % fpath)
else:
fpath_needs_closing = False
fh = fpath
try:
for rawline in fh:
if rawline.startswith('#'):
continue
# Extract the seek->replace terms from this KB line:
m_kb_line = re_kb_line.search(rawline.decode('utf-8'))
kb.append(make_tuple(m_kb_line))
finally:
if fpath_needs_closing:
fh.close()
return kb
def load_kb_from_iterable(kb, builder):
return builder(kb)
def load_kb_from_file(path, builder):
try:
fh = open(path, "r")
except IOError as e:
raise StandardError("Unable to open kb '%s': %s" % (path, e))
def lazy_parser(fh):
for rawline in fh:
if rawline.startswith('#'):
continue
try:
rawline = rawline.decode("utf-8").rstrip("\n")
except UnicodeError:
raise StandardError("Unicode problems in kb %s at line %s"
% (path, rawline))
# Test line to ensure that it is a correctly formatted
# knowledge base line:
# Extract the seek->replace terms from this KB line
m_kb_line = re_kb_line.search(rawline)
if m_kb_line: # good KB line
yield m_kb_line.group('seek'), m_kb_line.group('repl')
else:
raise StandardError("Badly formatted kb '%s' at line %s"
% (path, rawline))
try:
return builder(lazy_parser(fh))
finally:
fh.close()
def load_kb_from_db(kb_name, builder):
def lazy_parser(kb):
for mapping in kb:
yield mapping['key'], mapping['value']
return builder(lazy_parser(get_kbr_items(kb_name)))
def load_kb_from_records(kb_name, builder):
def get_tag_values(recid, tags):
for tag in tags:
for value in get_fieldvalues(recid, tag):
yield value
def lazy_parser(collection, left_tags, right_tags):
for recid in get_collection_reclist(collection):
try:
# Key tag
# e.g. for journals database: 711__a
left_values = get_tag_values(recid, left_tags)
except IndexError:
pass
else:
# Value tags
# e.g. for journals database: 130__a, 730__a and 030__a
right_values = get_tag_values(recid, right_tags)
for left_value in set(left_values):
for right_value in set(right_values):
yield left_value, right_value
dummy, collection, left_str, right_str = kb_name.split(':')
left_tags = left_str.split(',')
right_tags = right_str.split(',')
return builder(lazy_parser(collection, left_tags, right_tags))
def build_journals_kb(knowledgebase):
"""Given the path to a knowledge base file, read in the contents
of that file into a dictionary of search->replace word phrases.
The search phrases are compiled into a regex pattern object.
The knowledge base file should consist only of lines that take
the following format:
seek-term --- replace-term
(i.e. a seek phrase on the left hand side, a replace phrase on
the right hand side, with the two phrases being separated by 3
hyphens.) E.g.:
ASTRONOMY AND ASTROPHYSICS ---Astron. Astrophys.
The left-hand side term is a non-standard version of the title,
whereas the right-hand side term is the standard version.
If the KB file cannot be read from, or an unexpected line is
encountered in the KB, an error
message is output to standard error and execution is halted with
an error-code 0.
@param fpath: (string) the path to the knowledge base file.
@return: (tuple) containing a list and a dictionary. The list
contains compiled regex patterns used as search terms and will
be used to force searching order to match that of the knowledge
base.
The dictionary contains the search->replace terms. The keys of
the dictionary are the compiled regex word phrases used for
searching in the reference lines; The values in the dictionary are
the replace terms for matches.
"""
# Initialise vars:
# dictionary of search and replace phrases from KB:
kb = {}
standardised_titles = {}
seek_phrases = []
# A dictionary of "replacement terms" (RHS) to be inserted into KB as
# "seek terms" later, if they were not already explicitly added
# by the KB:
repl_terms = {}
write_message('Processing journals kb', verbose=3)
for seek_phrase, repl in knowledgebase:
# We match on a simplified line, thus dots are replaced
# with spaces
seek_phrase = seek_phrase.replace('.', ' ').upper()
# good KB line
# Add the 'replacement term' into the dictionary of
# replacement terms:
repl_terms[repl] = None
# add the phrase from the KB if the 'seek' phrase is longer
# compile the seek phrase into a pattern:
seek_ptn = re.compile(ur'(?<!\w)(%s)\W' % re.escape(seek_phrase),
re.UNICODE)
kb[seek_phrase] = seek_ptn
standardised_titles[seek_phrase] = repl
seek_phrases.append(seek_phrase)
# Now, for every 'replacement term' found in the KB, if it is
# not already in the KB as a "search term", add it:
for repl_term in repl_terms.keys():
raw_repl_phrase = repl_term.upper()
raw_repl_phrase = re_punctuation.sub(u' ', raw_repl_phrase)
raw_repl_phrase = \
re_group_captured_multiple_space.sub(u' ', raw_repl_phrase)
raw_repl_phrase = raw_repl_phrase.strip()
if raw_repl_phrase not in kb:
# The replace-phrase was not in the KB as a seek phrase
# It should be added.
pattern = ur'(?<!\/)\b(%s)[^A-Z0-9]' % re.escape(raw_repl_phrase)
seek_ptn = re.compile(pattern, re.U)
kb[raw_repl_phrase] = seek_ptn
standardised_titles[raw_repl_phrase] = repl_term
seek_phrases.append(raw_repl_phrase)
# Sort the titles by string length (long - short)
seek_phrases.sort(_cmp_bystrlen_reverse)
write_message('Processed journals kb', verbose=3)
# return the raw knowledge base:
return kb, standardised_titles, seek_phrases
def build_collaborations_kb(knowledgebase):
kb = {}
for pattern, collab in knowledgebase:
prefix = ur"(?:^|[\(\"\[\s]|(?<=\W))\s*(?:(?:the|and)\s+)?"
collaboration_pattern = ur"(?:\s*coll(?:aborations?|\.)?)?"
suffix = ur"(?=$|[><\]\)\"\s.,:])"
pattern = pattern.replace(' ', '\s')
pattern = pattern.replace('Collaboration', collaboration_pattern)
re_pattern = "%s(%s)%s" % (prefix, pattern, suffix)
kb[collab] = re.compile(re_pattern, re.I|re.U)
return kb
| gpl-2.0 | 1,831,684,262,560,901,400 | 40.207908 | 107 | 0.568081 | false |
eamigo86/graphene-django-extras | graphene_django_extras/filters/filter.py | 1 | 1492 | # -*- coding: utf-8 -*-
from django_filters.filterset import BaseFilterSet, FilterSet
from django_filters.filterset import FILTER_FOR_DBFIELD_DEFAULTS
from graphene_django.filter.utils import replace_csv_filters
def get_filterset_class(filterset_class, **meta):
"""
Get the class to be used as the FilterSet.
"""
if filterset_class:
# If were given a FilterSet class, then set it up.
graphene_filterset_class = setup_filterset(filterset_class)
else:
# Otherwise create one.
graphene_filterset_class = custom_filterset_factory(**meta)
replace_csv_filters(graphene_filterset_class)
return graphene_filterset_class
class GrapheneFilterSetMixin(BaseFilterSet):
FILTER_DEFAULTS = FILTER_FOR_DBFIELD_DEFAULTS
def setup_filterset(filterset_class):
""" Wrap a provided filterset in Graphene-specific functionality
"""
return type(
"Graphene{}".format(filterset_class.__name__),
(filterset_class, GrapheneFilterSetMixin),
{},
)
def custom_filterset_factory(model, filterset_base_class=FilterSet, **meta):
"""
Create a filterset for the given model using the provided meta data
"""
meta.update({"model": model, "exclude": []})
meta_class = type(str("Meta"), (object,), meta)
filterset = type(
str("%sFilterSet" % model._meta.object_name),
(filterset_base_class, GrapheneFilterSetMixin),
{"Meta": meta_class},
)
return filterset
| mit | -8,306,736,808,612,642,000 | 30.083333 | 76 | 0.676944 | false |
bdastur/notes | python/asyncio/testqueue2.py | 1 | 1732 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import random
import time
async def worker(name, queue):
print("Worker %s started" % name)
while True:
# Get a work item
sleep_for = await queue.get()
#await asyncio.sleep(sleep_for[1])
queue.task_done()
print("Worker %s, task done" % name)
async def main():
task_queue = asyncio.PriorityQueue(20)
total_sleep_time = 0
# for _ in range(20):
# sleep_for = random.uniform(1, 3)
# total_sleep_time += sleep_for
# task_queue.put_nowait((1, sleep_for))
tasks = []
for i in range(3):
worker_name = "Worker-%i" % i
task = asyncio.create_task(worker(worker_name, task_queue))
tasks.append(task)
for _ in range(20):
sleep_for = random.uniform(2, 5)
total_sleep_time += sleep_for
task_queue.put_nowait((1, sleep_for))
await asyncio.sleep(1)
# Wait until queue is fully processed
# started_at = time.monotonic()
# await task_queue.join()
# total_slept_for = time.monotonic() - started_at
# print("Total sleep time: %i, Total slept for: %i" % \
# (total_sleep_time, total_slept_for))
# Cancel our worker tasks.
# for task in tasks:
# task.cancel()
# Wait until all worker tasks are cancelled.
await asyncio.gather(*tasks, return_exceptions=True)
print('====')
print(f'3 workers slept in parallel for {total_slept_for:.2f} seconds')
print(f'total expected sleep time: {total_sleep_time:.2f} seconds')
asyncio.run(main())
| apache-2.0 | -265,001,707,688,438,660 | 27.393443 | 79 | 0.556005 | false |
kmaglione/olympia | apps/editors/forms.py | 4 | 19914 | import datetime
import logging
from datetime import timedelta
from django import forms
from django.core.validators import ValidationError
from django.db.models import Q
from django.forms import widgets
from django.utils.translation import get_language
import happyforms
import jinja2
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
import constants.editors as rvw
from addons.models import Addon, Persona
from amo.urlresolvers import reverse
from amo.utils import raise_required
from applications.models import AppVersion
from editors.helpers import (file_review_status, ReviewAddon, ReviewFiles,
ReviewHelper)
from editors.models import CannedResponse, ReviewerScore, ThemeLock
from editors.tasks import approve_rereview, reject_rereview, send_mail
from files.models import File
log = logging.getLogger('z.reviewers.forms')
ACTION_FILTERS = (('', ''), ('approved', _lazy(u'Approved reviews')),
('deleted', _lazy(u'Deleted reviews')))
ACTION_DICT = dict(approved=amo.LOG.APPROVE_REVIEW,
deleted=amo.LOG.DELETE_REVIEW)
class EventLogForm(happyforms.Form):
start = forms.DateField(required=False,
label=_lazy(u'View entries between'))
end = forms.DateField(required=False,
label=_lazy(u'and'))
filter = forms.ChoiceField(required=False, choices=ACTION_FILTERS,
label=_lazy(u'Filter by type/action'))
def clean(self):
data = self.cleaned_data
# We want this to be inclusive of the end date.
if 'end' in data and data['end']:
data['end'] += timedelta(days=1)
if 'filter' in data and data['filter']:
data['filter'] = ACTION_DICT[data['filter']]
return data
class BetaSignedLogForm(happyforms.Form):
VALIDATION_CHOICES = (
('', ''),
(amo.LOG.BETA_SIGNED_VALIDATION_PASSED.id,
_lazy(u'Passed automatic validation')),
(amo.LOG.BETA_SIGNED_VALIDATION_FAILED.id,
_lazy(u'Failed automatic validation')))
filter = forms.ChoiceField(required=False, choices=VALIDATION_CHOICES,
label=_lazy(u'Filter by automatic validation'))
class ReviewLogForm(happyforms.Form):
start = forms.DateField(required=False,
label=_lazy(u'View entries between'))
end = forms.DateField(required=False, label=_lazy(u'and'))
search = forms.CharField(required=False, label=_lazy(u'containing'))
def __init__(self, *args, **kw):
super(ReviewLogForm, self).__init__(*args, **kw)
# L10n: start, as in "start date"
self.fields['start'].widget.attrs = {'placeholder': _('start'),
'size': 10}
# L10n: end, as in "end date"
self.fields['end'].widget.attrs = {'size': 10, 'placeholder': _('end')}
# L10n: Description of what can be searched for
search_ph = _('add-on, editor or comment')
self.fields['search'].widget.attrs = {'placeholder': search_ph,
'size': 30}
def clean(self):
data = self.cleaned_data
# We want this to be inclusive of the end date.
if 'end' in data and data['end']:
data['end'] += timedelta(days=1)
return data
class QueueSearchForm(happyforms.Form):
text_query = forms.CharField(
required=False,
label=_lazy(u'Search by add-on name / author email'))
searching = forms.BooleanField(widget=forms.HiddenInput, required=False,
initial=True)
admin_review = forms.ChoiceField(required=False,
choices=[('', ''),
('1', _lazy(u'yes')),
('0', _lazy(u'no'))],
label=_lazy(u'Admin Flag'))
application_id = forms.ChoiceField(
required=False,
label=_lazy(u'Application'),
choices=([('', '')] +
[(a.id, a.pretty) for a in amo.APPS_ALL.values()]))
max_version = forms.ChoiceField(
required=False,
label=_lazy(u'Max. Version'),
choices=[('', _lazy(u'Select an application first'))])
waiting_time_days = forms.ChoiceField(
required=False,
label=_lazy(u'Days Since Submission'),
choices=([('', '')] +
[(i, i) for i in range(1, 10)] + [('10+', '10+')]))
addon_type_ids = forms.MultipleChoiceField(
required=False,
label=_lazy(u'Add-on Types'),
choices=((id, tp) for id, tp in amo.ADDON_TYPES.items()))
platform_ids = forms.MultipleChoiceField(
required=False,
label=_lazy(u'Platforms'),
choices=[(p.id, p.name)
for p in amo.PLATFORMS.values()
if p not in (amo.PLATFORM_ANY, amo.PLATFORM_ALL)])
def __init__(self, *args, **kw):
super(QueueSearchForm, self).__init__(*args, **kw)
w = self.fields['application_id'].widget
# Get the URL after the urlconf has loaded.
w.attrs['data-url'] = reverse('editors.application_versions_json')
def version_choices_for_app_id(self, app_id):
versions = AppVersion.objects.filter(application=app_id)
return [('', '')] + [(v.version, v.version) for v in versions]
def clean_addon_type_ids(self):
if self.cleaned_data['addon_type_ids']:
# Remove "Any Addon Extension" from the list so that no filter
# is applied in that case.
ids = set(self.cleaned_data['addon_type_ids'])
self.cleaned_data['addon_type_ids'] = ids - set(str(amo.ADDON_ANY))
return self.cleaned_data['addon_type_ids']
def clean_application_id(self):
if self.cleaned_data['application_id']:
choices = self.version_choices_for_app_id(
self.cleaned_data['application_id'])
self.fields['max_version'].choices = choices
return self.cleaned_data['application_id']
def clean_max_version(self):
if self.cleaned_data['max_version']:
if not self.cleaned_data['application_id']:
raise forms.ValidationError("No application selected")
return self.cleaned_data['max_version']
def filter_qs(self, qs):
data = self.cleaned_data
if data['admin_review']:
qs = qs.filter(admin_review=data['admin_review'])
if data['addon_type_ids']:
qs = qs.filter_raw('addon_type_id IN', data['addon_type_ids'])
if data['application_id']:
qs = qs.filter_raw('apps_match.application_id =',
data['application_id'])
# We join twice so it includes all apps, and not just the ones
# filtered by the search criteria.
app_join = ('LEFT JOIN applications_versions apps_match ON '
'(versions.id = apps_match.version_id)')
qs.base_query['from'].extend([app_join])
if data['max_version']:
joins = ["""JOIN applications_versions vs
ON (versions.id = vs.version_id)""",
"""JOIN appversions max_version
ON (max_version.id = vs.max)"""]
qs.base_query['from'].extend(joins)
qs = qs.filter_raw('max_version.version =',
data['max_version'])
if data['platform_ids']:
qs = qs.filter_raw('files.platform_id IN', data['platform_ids'])
# Adjust _file_platform_ids so that it includes ALL platforms
# not the ones filtered by the search criteria:
qs.base_query['from'].extend([
"""LEFT JOIN files all_files
ON (all_files.version_id = versions.id)"""])
group = 'GROUP_CONCAT(DISTINCT all_files.platform_id)'
qs.base_query['select']['_file_platform_ids'] = group
if data['text_query']:
lang = get_language()
joins = [
'LEFT JOIN addons_users au on (au.addon_id = addons.id)',
'LEFT JOIN users u on (u.id = au.user_id)',
"""LEFT JOIN translations AS supportemail_default ON
(supportemail_default.id = addons.supportemail AND
supportemail_default.locale=addons.defaultlocale)""",
"""LEFT JOIN translations AS supportemail_local ON
(supportemail_local.id = addons.supportemail AND
supportemail_local.locale=%%(%s)s)""" %
qs._param(lang),
"""LEFT JOIN translations AS ad_name_local ON
(ad_name_local.id = addons.name AND
ad_name_local.locale=%%(%s)s)""" %
qs._param(lang)]
qs.base_query['from'].extend(joins)
fuzzy_q = u'%' + data['text_query'] + u'%'
qs = qs.filter_raw(
Q('addon_name LIKE', fuzzy_q) |
# Search translated add-on names / support emails in
# the editor's locale:
Q('ad_name_local.localized_string LIKE', fuzzy_q) |
Q('supportemail_default.localized_string LIKE', fuzzy_q) |
Q('supportemail_local.localized_string LIKE', fuzzy_q) |
Q('au.role IN', [amo.AUTHOR_ROLE_OWNER,
amo.AUTHOR_ROLE_DEV],
'u.email LIKE', fuzzy_q))
if data['waiting_time_days']:
if data['waiting_time_days'] == '10+':
# Special case
args = ('waiting_time_days >=',
int(data['waiting_time_days'][:-1]))
else:
args = ('waiting_time_days <=', data['waiting_time_days'])
qs = qs.having(*args)
return qs
class AddonFilesMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, addon_file):
addon = addon_file.version.addon
# L10n: 0 = platform, 1 = filename, 2 = status message
return jinja2.Markup(_(u"<strong>%s</strong> · %s · %s")
% (addon_file.get_platform_display(),
addon_file.filename,
file_review_status(addon, addon_file)))
class NonValidatingChoiceField(forms.ChoiceField):
"""A ChoiceField that doesn't validate."""
def validate(self, value):
pass
class ReviewAddonForm(happyforms.Form):
addon_files = AddonFilesMultipleChoiceField(
required=False,
queryset=File.objects.none(), label=_lazy(u'Files:'),
widget=forms.CheckboxSelectMultiple())
comments = forms.CharField(required=True, widget=forms.Textarea(),
label=_lazy(u'Comments:'))
canned_response = NonValidatingChoiceField(required=False)
action = forms.ChoiceField(required=True, widget=forms.RadioSelect())
operating_systems = forms.CharField(required=False,
label=_lazy(u'Operating systems:'))
applications = forms.CharField(required=False,
label=_lazy(u'Applications:'))
notify = forms.BooleanField(required=False,
label=_lazy(u'Notify me the next time this '
'add-on is updated. (Subsequent '
'updates will not generate an '
'email)'))
adminflag = forms.BooleanField(required=False,
label=_lazy(u'Clear Admin Review Flag'))
def is_valid(self):
result = super(ReviewAddonForm, self).is_valid()
if result:
self.helper.set_data(self.cleaned_data)
return result
def __init__(self, *args, **kw):
self.helper = kw.pop('helper')
self.type = kw.pop('type', amo.CANNED_RESPONSE_ADDON)
super(ReviewAddonForm, self).__init__(*args, **kw)
self.fields['addon_files'].queryset = self.helper.all_files
self.addon_files_disabled = (
self.helper.all_files
# We can't review disabled, and public are already reviewed.
.filter(status__in=[amo.STATUS_DISABLED, amo.STATUS_PUBLIC])
.values_list('pk', flat=True))
# We're starting with an empty one, which will be hidden via CSS.
canned_choices = [['', [('', _('Choose a canned response...'))]]]
responses = CannedResponse.objects.filter(type=self.type)
# Loop through the actions (prelim, public, etc).
for k, action in self.helper.actions.iteritems():
action_choices = [[c.response, c.name] for c in responses
if c.sort_group and k in c.sort_group.split(',')]
# Add the group of responses to the canned_choices array.
if action_choices:
canned_choices.append([action['label'], action_choices])
# Now, add everything not in a group.
for r in responses:
if not r.sort_group:
canned_choices.append([r.response, r.name])
self.fields['canned_response'].choices = canned_choices
self.fields['action'].choices = [
(k, v['label']) for k, v in self.helper.actions.items()]
class ReviewFileForm(ReviewAddonForm):
def clean_addon_files(self):
files = self.data.getlist('addon_files')
if self.data.get('action', '') == 'prelim':
if not files:
raise ValidationError(_('You must select some files.'))
for pk in files:
file = self.helper.all_files.get(pk=pk)
if (file.status != amo.STATUS_UNREVIEWED and not
(self.helper.addon.status == amo.STATUS_LITE and
file.status == amo.STATUS_UNREVIEWED)):
raise ValidationError(_('File %s is not pending review.')
% file.filename)
return self.fields['addon_files'].queryset.filter(pk__in=files)
def get_review_form(data, request=None, addon=None, version=None):
helper = ReviewHelper(request=request, addon=addon, version=version)
form = {ReviewAddon: ReviewAddonForm,
ReviewFiles: ReviewFileForm}[helper.handler.__class__]
return form(data, helper=helper)
class MOTDForm(happyforms.Form):
motd = forms.CharField(required=True, widget=widgets.Textarea())
class DeletedThemeLogForm(ReviewLogForm):
def __init__(self, *args, **kwargs):
super(DeletedThemeLogForm, self).__init__(*args, **kwargs)
self.fields['search'].widget.attrs = {
# L10n: Description of what can be searched for.
'placeholder': _lazy(u'theme name'),
'size': 30}
class ThemeReviewForm(happyforms.Form):
theme = forms.ModelChoiceField(queryset=Persona.objects.all(),
widget=forms.HiddenInput())
action = forms.TypedChoiceField(
choices=rvw.REVIEW_ACTIONS.items(),
widget=forms.HiddenInput(attrs={'class': 'action'}),
coerce=int, empty_value=None
)
# Duplicate is the same as rejecting but has its own flow.
reject_reason = forms.TypedChoiceField(
choices=rvw.THEME_REJECT_REASONS.items() + [('duplicate', '')],
widget=forms.HiddenInput(attrs={'class': 'reject-reason'}),
required=False, coerce=int, empty_value=None)
comment = forms.CharField(
required=False, widget=forms.HiddenInput(attrs={'class': 'comment'}))
def clean_theme(self):
theme = self.cleaned_data['theme']
try:
ThemeLock.objects.get(theme=theme)
except ThemeLock.DoesNotExist:
raise forms.ValidationError(
_('Someone else is reviewing this theme.'))
return theme
def clean_reject_reason(self):
reject_reason = self.cleaned_data.get('reject_reason', None)
if (self.cleaned_data.get('action') == rvw.ACTION_REJECT
and reject_reason is None):
raise_required()
return reject_reason
def clean_comment(self):
# Comment field needed for duplicate, flag, moreinfo, and other reject
# reason.
action = self.cleaned_data.get('action')
reject_reason = self.cleaned_data.get('reject_reason')
comment = self.cleaned_data.get('comment')
if (not comment and (action == rvw.ACTION_FLAG or
action == rvw.ACTION_MOREINFO or
(action == rvw.ACTION_REJECT and
reject_reason == 0))):
raise_required()
return comment
def save(self):
action = self.cleaned_data['action']
comment = self.cleaned_data.get('comment')
reject_reason = self.cleaned_data.get('reject_reason')
theme = self.cleaned_data['theme']
is_rereview = (
theme.rereviewqueuetheme_set.exists() and
theme.addon.status not in (amo.STATUS_PENDING,
amo.STATUS_REVIEW_PENDING))
theme_lock = ThemeLock.objects.get(theme=self.cleaned_data['theme'])
mail_and_log = True
if action == rvw.ACTION_APPROVE:
if is_rereview:
approve_rereview(theme)
theme.addon.update(status=amo.STATUS_PUBLIC)
theme.approve = datetime.datetime.now()
theme.save()
elif action in (rvw.ACTION_REJECT, rvw.ACTION_DUPLICATE):
if is_rereview:
reject_rereview(theme)
else:
theme.addon.update(status=amo.STATUS_REJECTED)
elif action == rvw.ACTION_FLAG:
if is_rereview:
mail_and_log = False
else:
theme.addon.update(status=amo.STATUS_REVIEW_PENDING)
elif action == rvw.ACTION_MOREINFO:
if not is_rereview:
theme.addon.update(status=amo.STATUS_REVIEW_PENDING)
if mail_and_log:
send_mail(self.cleaned_data, theme_lock)
# Log.
amo.log(amo.LOG.THEME_REVIEW, theme.addon, details={
'theme': theme.addon.name.localized_string,
'action': action,
'reject_reason': reject_reason,
'comment': comment}, user=theme_lock.reviewer)
log.info('%sTheme %s (%s) - %s' % (
'[Rereview] ' if is_rereview else '', theme.addon.name,
theme.id, action))
score = 0
if action in (rvw.ACTION_REJECT, rvw.ACTION_DUPLICATE,
rvw.ACTION_APPROVE):
score = ReviewerScore.award_points(
theme_lock.reviewer, theme.addon, theme.addon.status)
theme_lock.delete()
return score
class ThemeSearchForm(forms.Form):
q = forms.CharField(
required=False, label=_lazy(u'Search'),
widget=forms.TextInput(attrs={'autocomplete': 'off',
'placeholder': _lazy(u'Search')}))
queue_type = forms.CharField(required=False, widget=forms.HiddenInput())
class ReviewAppLogForm(ReviewLogForm):
def __init__(self, *args, **kwargs):
super(ReviewAppLogForm, self).__init__(*args, **kwargs)
self.fields['search'].widget.attrs = {
# L10n: Description of what can be searched for.
'placeholder': _lazy(u'app, reviewer, or comment'),
'size': 30}
class WhiteboardForm(forms.ModelForm):
class Meta:
model = Addon
fields = ['whiteboard']
| bsd-3-clause | -1,669,600,586,924,650,000 | 40.4875 | 79 | 0.56518 | false |
obestwalter/mau-mau | mau_mau/stats.py | 1 | 1816 | import logging
import sys
from statistics import mean
from timeit import timeit
import fire
from mau_mau import constants, rules, play
log = logging.getLogger()
class Stats:
def turns(self, players=3, reps=1000):
"""calculate mean turns for <reps> games of <players>"""
games = self._simulate_games(players, reps)
log.info(f"mean turns played: {mean([g.turns for g in games])}")
def winners(self, players=("Eric", "Terry", "John"), reps=1000):
"""calculate winner distribution for <reps> <players>"""
games = self._simulate_games(players, reps)
wc = {}
# not optimal but premature optimization is the root of all evil ...
for name in players:
wc[name] = len([g for g in games if g.table.winner.name == name])
log.info(f"winner distribution: {wc}")
def durations(self, reps=1000):
"""calculate durations for <reps> games"""
timing = timeit(
setup=(
"from mau_mau.play import play_game;"
"from mau_mau.rules import MauMau;"
"mmRules = MauMau()"
),
stmt="play_game(mmRules, 3)",
number=reps,
)
log.info("it takes %0.3f seconds to play %s games", timing, reps)
def _simulate_games(self, players, reps):
log.info("players: %s; %s reps", players, reps)
mmRules = rules.MauMau()
games = []
for i in range(reps):
game = play.play_game(mmRules, players)
games.append(game)
return games
def cli():
"""Command line interface."""
logging.basicConfig(format=constants.LOG.FMT, level=logging.INFO)
try:
fire.Fire(Stats)
except KeyboardInterrupt:
sys.exit("\nsimulation was interrupted by user!")
| mit | 5,269,908,785,438,771,000 | 30.859649 | 77 | 0.589758 | false |
cespare/pastedown | vendor/pygments/pygments/lexers/web.py | 9 | 126452 | # -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Lexers for web-related languages and markup.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
include, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Other, Punctuation, Literal
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches, unirange
from pygments.lexers.agile import RubyLexer
from pygments.lexers.compiled import ScalaLexer
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer',
'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer',
'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer',
'DtdLexer', 'DartLexer', 'LassoLexer']
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class JsonLexer(RegexLexer):
"""
For JSON data structures.
*New in Pygments 1.5.*
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = [ 'application/json', ]
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
flags = re.DOTALL
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r']', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
*New in Pygments 0.9.*
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~\^\*!%&<>\|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|var|with|new|typeof|arguments|instanceof|this|'
r'switch)\b', Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(r'(Accessibility|AccessibilityProperties|ActionScriptVersion|'
r'ActivityEvent|AntiAliasType|ApplicationDomain|AsBroadcaster|Array|'
r'AsyncErrorEvent|AVM1Movie|BevelFilter|Bitmap|BitmapData|'
r'BitmapDataChannel|BitmapFilter|BitmapFilterQuality|BitmapFilterType|'
r'BlendMode|BlurFilter|Boolean|ByteArray|Camera|Capabilities|CapsStyle|'
r'Class|Color|ColorMatrixFilter|ColorTransform|ContextMenu|'
r'ContextMenuBuiltInItems|ContextMenuEvent|ContextMenuItem|'
r'ConvultionFilter|CSMSettings|DataEvent|Date|DefinitionError|'
r'DeleteObjectSample|Dictionary|DisplacmentMapFilter|DisplayObject|'
r'DisplacmentMapFilterMode|DisplayObjectContainer|DropShadowFilter|'
r'Endian|EOFError|Error|ErrorEvent|EvalError|Event|EventDispatcher|'
r'EventPhase|ExternalInterface|FileFilter|FileReference|'
r'FileReferenceList|FocusDirection|FocusEvent|Font|FontStyle|FontType|'
r'FrameLabel|FullScreenEvent|Function|GlowFilter|GradientBevelFilter|'
r'GradientGlowFilter|GradientType|Graphics|GridFitType|HTTPStatusEvent|'
r'IBitmapDrawable|ID3Info|IDataInput|IDataOutput|IDynamicPropertyOutput'
r'IDynamicPropertyWriter|IEventDispatcher|IExternalizable|'
r'IllegalOperationError|IME|IMEConversionMode|IMEEvent|int|'
r'InteractiveObject|InterpolationMethod|InvalidSWFError|InvokeEvent|'
r'IOError|IOErrorEvent|JointStyle|Key|Keyboard|KeyboardEvent|KeyLocation|'
r'LineScaleMode|Loader|LoaderContext|LoaderInfo|LoadVars|LocalConnection|'
r'Locale|Math|Matrix|MemoryError|Microphone|MorphShape|Mouse|MouseEvent|'
r'MovieClip|MovieClipLoader|Namespace|NetConnection|NetStatusEvent|'
r'NetStream|NewObjectSample|Number|Object|ObjectEncoding|PixelSnapping|'
r'Point|PrintJob|PrintJobOptions|PrintJobOrientation|ProgressEvent|Proxy|'
r'QName|RangeError|Rectangle|ReferenceError|RegExp|Responder|Sample|Scene|'
r'ScriptTimeoutError|Security|SecurityDomain|SecurityError|'
r'SecurityErrorEvent|SecurityPanel|Selection|Shape|SharedObject|'
r'SharedObjectFlushStatus|SimpleButton|Socket|Sound|SoundChannel|'
r'SoundLoaderContext|SoundMixer|SoundTransform|SpreadMethod|Sprite|'
r'StackFrame|StackOverflowError|Stage|StageAlign|StageDisplayState|'
r'StageQuality|StageScaleMode|StaticText|StatusEvent|String|StyleSheet|'
r'SWFVersion|SyncEvent|SyntaxError|System|TextColorType|TextField|'
r'TextFieldAutoSize|TextFieldType|TextFormat|TextFormatAlign|'
r'TextLineMetrics|TextRenderer|TextSnapshot|Timer|TimerEvent|Transform|'
r'TypeError|uint|URIError|URLLoader|URLLoaderDataFormat|URLRequest|'
r'URLRequestHeader|URLRequestMethod|URLStream|URLVariabeles|VerifyError|'
r'Video|XML|XMLDocument|XMLList|XMLNode|XMLNodeType|XMLSocket|XMLUI)\b',
Name.Builtin),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b',Name.Function),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
*New in Pygments 0.11.*
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
identifier = r'[$a-zA-Z_][a-zA-Z0-9_]*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~\^\*!%&<>\|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s*', Text, '#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',?', Operator, '#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'{', Punctuation, 'content'),
(r'\:[a-zA-Z0-9_-]+', Name.Decorator),
(r'\.[a-zA-Z0-9_-]+', Name.Class),
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow(?:-x|-y)?|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Keyword),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex|s)\b', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name)
]
}
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
*New in Pygments 1.3.*
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[\+-]' + _ws + r')([\(a-zA-Z_].*?[^\(])(' + _ws + '{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{\(\)}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname' : [
# interface definition that inherits
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r':' + _ws +
r')([a-zA-Z_][a-zA-Z0-9_]*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r'\()([a-zA-Z_][a-zA-Z0-9_]*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop'),
],
'forward_classname' : [
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups (Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(Name.Function), "#pop"),
('', Text, '#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^\)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', Text),
],
'expression' : [
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'[a-zA-Z0-9_:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]']
mimetypes = ['text/x-php']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
(r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in MODULES.iteritems():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
*New in Pygments 1.5.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.5
class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
*New in Pygments 0.10.*
'''
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set([
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
])
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
*New in Pygments 1.1.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class HaxeLexer(RegexLexer):
"""
For haXe source code (http://haxe.org/).
*New in Pygments 1.3.*
"""
name = 'haXe'
aliases = ['hx', 'haXe']
filenames = ['*.hx']
mimetypes = ['text/haxe']
ident = r'(?:[a-zA-Z_][a-zA-Z0-9_]*)'
typeid = r'(?:(?:[a-z0-9_\.])*[A-Z_][A-Za-z0-9_]*)'
key_prop = r'(?:default|null|never)'
key_decl_mod = r'(?:public|private|override|static|inline|extern|dynamic)'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('comments'),
(key_decl_mod, Keyword.Declaration),
include('enumdef'),
include('typedef'),
include('classdef'),
include('imports'),
],
# General constructs
'comments': [
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'#[^\n]*', Comment.Preproc),
],
'whitespace': [
include('comments'),
(r'\s+', Text),
],
'codekeywords': [
(r'\b(if|else|while|do|for|in|break|continue|'
r'return|switch|case|try|catch|throw|null|trace|'
r'new|this|super|untyped|cast|callback|here)\b',
Keyword.Reserved),
],
'literals': [
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'~/([^\n])*?/[gisx]*', String.Regex),
(r'\b(true|false|null)\b', Keyword.Constant),
],
'codeblock': [
include('whitespace'),
include('new'),
include('case'),
include('anonfundef'),
include('literals'),
include('vardef'),
include('codekeywords'),
(r'[();,\[\]]', Punctuation),
(r'(?:=|\+=|-=|\*=|/=|%=|&=|\|=|\^=|<<=|>>=|>>>=|\|\||&&|'
r'\.\.\.|==|!=|>|<|>=|<=|\||&|\^|<<|>>>|>>|\+|\-|\*|/|%|'
r'!|\+\+|\-\-|~|\.|\?|\:)',
Operator),
(ident, Name),
(r'}', Punctuation,'#pop'),
(r'{', Punctuation,'#push'),
],
# Instance/Block level constructs
'propertydef': [
(r'(\()(' + key_prop + ')(,)(' + key_prop + ')(\))',
bygroups(Punctuation, Keyword.Reserved, Punctuation,
Keyword.Reserved, Punctuation)),
],
'new': [
(r'\bnew\b', Keyword, 'typedecl'),
],
'case': [
(r'\b(case)(\s+)(' + ident + ')(\s*)(\()',
bygroups(Keyword.Reserved, Text, Name, Text, Punctuation),
'funargdecl'),
],
'vardef': [
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable), 'vardecl'),
],
'vardecl': [
include('whitespace'),
include('typelabel'),
(r'=', Operator,'#pop'),
(r';', Punctuation,'#pop'),
],
'instancevardef': [
(key_decl_mod,Keyword.Declaration),
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable.Instance),
'instancevardecl'),
],
'instancevardecl': [
include('vardecl'),
include('propertydef'),
],
'anonfundef': [
(r'\bfunction\b', Keyword.Declaration, 'fundecl'),
],
'instancefundef': [
(key_decl_mod, Keyword.Declaration),
(r'\b(function)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Function), 'fundecl'),
],
'fundecl': [
include('whitespace'),
include('typelabel'),
include('generictypedecl'),
(r'\(',Punctuation,'funargdecl'),
(r'(?=[a-zA-Z0-9_])',Text,'#pop'),
(r'{',Punctuation,('#pop','codeblock')),
(r';',Punctuation,'#pop'),
],
'funargdecl': [
include('whitespace'),
(ident, Name.Variable),
include('typelabel'),
include('literals'),
(r'=', Operator),
(r',', Punctuation),
(r'\?', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'typelabel': [
(r':', Punctuation, 'type'),
],
'typedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'(?=[{}()=,a-z])', Text,'#pop'),
],
'type': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'->', Keyword.Type),
(r'(?=[{}(),;=])', Text, '#pop'),
],
'generictypedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation),
],
# Top level constructs
'imports': [
(r'(package|import|using)(\s+)([^;]+)(;)',
bygroups(Keyword.Namespace, Text, Name.Namespace,Punctuation)),
],
'typedef': [
(r'typedef', Keyword.Declaration, ('typedefprebody', 'typedecl')),
],
'typedefprebody': [
include('whitespace'),
(r'(=)(\s*)({)', bygroups(Punctuation, Text, Punctuation),
('#pop', 'typedefbody')),
],
'enumdef': [
(r'enum', Keyword.Declaration, ('enumdefprebody', 'typedecl')),
],
'enumdefprebody': [
include('whitespace'),
(r'{', Punctuation, ('#pop','enumdefbody')),
],
'classdef': [
(r'class', Keyword.Declaration, ('classdefprebody', 'typedecl')),
],
'classdefprebody': [
include('whitespace'),
(r'(extends|implements)', Keyword.Declaration,'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'interfacedef': [
(r'interface', Keyword.Declaration,
('interfacedefprebody', 'typedecl')),
],
'interfacedefprebody': [
include('whitespace'),
(r'(extends)', Keyword.Declaration, 'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'typedefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'>', Punctuation, 'typedecl'),
(r',', Punctuation),
(r'}', Punctuation, '#pop'),
],
'enumdefbody': [
include('whitespace'),
(ident, Name.Variable.Instance),
(r'\(', Punctuation, 'funargdecl'),
(r';', Punctuation),
(r'}', Punctuation, '#pop'),
],
'classdefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'}', Punctuation, '#pop'),
include('codeblock'),
],
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
*New in Pygments 1.3.*
"""
name = 'Haml'
aliases = ['haml', 'HAML']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant),
(r'(indigo|gold|firebrick|indianred|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|skyblue|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|orchid|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|palegreen|burlywood|'
r'seashell|mediumspringgreen|papayawhip|blanchedalmond|'
r'peru|aquamarine|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|darkgoldenrod|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|'
r'blueviolet|peachpuff)\b', Name.Entity),
(r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|'
r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#{', String.Interpol, 'interpolation'),
(r'[~\^\*!&%<>\|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
*New in Pygments 1.3.*
"""
name = 'Sass'
aliases = ['sass', 'SASS']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
(r'', Text, 'selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
(r'', Text, 'value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'(\$[\w-]\w*)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
(r'', Text, 'selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
class CoffeeScriptLexer(RegexLexer):
"""
For `CoffeeScript`_ source code.
.. _CoffeeScript: http://coffeescript.org
*New in Pygments 1.3.*
"""
name = 'CoffeeScript'
aliases = ['coffee-script', 'coffeescript']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gim]+\b|\B)', String.Regex, '#pop'),
(r'#{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'(?:\([^()]+\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|null|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\$]*', Name.Other, 'slashstartsregex'),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
.. _LiveScript: http://gkz.github.com/LiveScript/
New in Pygments 1.6.
"""
name = 'LiveScript'
aliases = ['live-script', 'livescript']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'#.*?\n', Comment.Single),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gim]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![\.\$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\[\w$-]+', String),
(r'<\[.*\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
See http://duelengine.org/.
See http://jsonml.org/jbst/.
*New in Pygments 1.4.*
"""
name = 'Duel'
aliases = ['duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST']
filenames = ['*.duel','*.jbst']
mimetypes = ['text/x-duel','text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
*New in Pygments 1.4.*
"""
name = 'Scaml'
aliases = ['scaml', 'SCAML']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
*New in Pygments 1.4.*
"""
name = 'Jade'
aliases = ['jade', 'JADE']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
*New in Pygments 1.4.*
"""
name = 'XQuery'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
#ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
#)
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
#ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WY-Z]|_|:|[a-kn-wy-z])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
#elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_`\|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
#x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\||:=|=)',
operator_root_callback),
(r'(::|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
#(r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'external', Keyword),
(r'collation', Keyword, 'uritooperator'),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'varname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Variable, 'operator'),
],
'singletype': [
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Punctuation, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'\(\#', Punctuation, 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
(r'', Text, 'operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword, Text, Keyword), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\})', popstate_callback),
#NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
#NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword, Text, Keyword), 'namespacekeyword'),
#VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
#ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword, Text, Keyword), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
#ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
#ELEMENT
(r'(element)(\s+)(?=' +qname+ r')',
bygroups(Keyword, Text), 'element_qname'),
#PROCESSING_INSTRUCTION
(r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword, Text, Keyword)),
(r'(\{)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword, Text, Keyword), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)', Punctuation, 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword, Text, Keyword),
'option'),
#URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# ML specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute),
(r'(@'+ncname+')', Name.Attribute),
(r'@\*:'+ncname, Name.Attribute),
(r'(@)', Name.Attribute),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(qname, Name.Tag, 'operator'),
]
}
class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Dart'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'#!(.*?)$', Comment.Preproc),
(r'(#)(import|library|source)', bygroups(Text, Keyword)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(class|interface)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|in|is|new|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'(abstract|const|extends|factory|final|get|implements|'
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'(bool|double|Dynamic|int|num|Object|String|void)', Keyword.Type),
(r'(false|null|true)', Keyword.Constant),
(r'@"(\\\\|\\"|[^"])*"', String.Double), # raw string
(r"@'(\\\\|\\'|[^'])*'", String.Single), # raw string
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
(r'[a-zA-Z_$][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Text)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^"$]+', String.Double),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol)),
(r'\$+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^'$]+", String.Single),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol)),
(r'\$+', String.Single)
]
}
class LassoLexer(RegexLexer):
"""
For `Lasso <http://www.lassosoft.com/>`_ source code, covering both Lasso 9
syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in
HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin tags, types, traits, and
methods (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
*New in Pygments 1.6.*
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#!.+lasso9\b', Comment.Preproc, 'lasso'),
(r'\s+', Other),
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')),
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
(r'<', Other, 'delimiters'),
include('lasso'),
],
'nosquarebrackets': [
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^<]+', Other),
],
'delimiters': [
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^[<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lasso': [
# whitespace/comments
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
# literals
(r'\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'\d+', Number.Integer),
(r'([+-]?)(infinity|NaN)\b', bygroups(Operator, Number)),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# names
(r'\$[a-z_][\w.]*', Name.Variable),
(r'#[a-z_][\w.]*|#\d+', Name.Variable.Instance),
(r"(\.)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Name.Variable.Class)),
(r"(self)(->)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
(r'(\.\.?)([a-z_][\w.]*)',
bygroups(Name.Builtin.Pseudo, Name.Other)),
(r'(self|inherited|global|void)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w.]*', Name.Attribute),
(r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(define)(\s+)([a-z_][\w.]*)(\s*)(=>)(\s*)(type|trait|thread)\b',
bygroups(Keyword.Declaration, Text, Name.Class, Text, Operator,
Text, Keyword)),
(r'(define)(\s+)([a-z_][\w.]*)(->)([a-z_][\w.]*=?|[-+*/%<>]|==)',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function), 'signature'),
(r'(define)(\s+)([a-z_][\w.]*)',
bygroups(Keyword.Declaration, Text, Name.Function),
'signature'),
(r'(public|protected|private|provide)(\s+)([a-z_][\w.]*=?|'
r'[-+*/%<>]|==)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation),
('signature', 'parameter')),
(r'(public|protected|private)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Function)),
# keywords
(r'(true|false|none|minimal|full|all)\b', Keyword.Constant),
(r'(local|var|variable|data)\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|'
r'null)\b', Keyword.Type),
(r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)),
(r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|'
r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|'
r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|'
r'Namespace_Using|NoProcess|Output_None|Portal|Private|Protect|'
r'Records|Referer|Referrer|Repeating|ResultSet|Rows|Search_Args|'
r'Search_Arguments|Select|Sort_Args|Sort_Arguments|Thread_Atomic|'
r'Value_List|While|Abort|Case|Else|If_Empty|If_False|If_Null|'
r'If_True|Loop_Abort|Loop_Continue|Loop_Count|Params|Params_Up|'
r'Return|Return_Value|Run_Children|SOAP_DefineTag|'
r'SOAP_LastRequest|SOAP_LastResponse|Tag_Name|ascending|average|'
r'by|define|descending|do|equals|frozen|group|handle_failure|'
r'import|in|into|join|let|match|max|min|on|order|parent|protected|'
r'provide|public|require|skip|split_thread|sum|take|thread|to|'
r'trait|type|where|with|yield)\b', bygroups(Punctuation, Keyword)),
# other
(r'(and|or|not)\b', Operator.Word),
(r'([a-z_][\w.]*)(\s*)(::\s*)([a-z_][\w.]*)(\s*)(=)',
bygroups(Name, Text, Punctuation, Name.Label, Text, Operator)),
(r'((?<!->)[a-z_][\w.]*)(\s*)(=(?!=))',
bygroups(Name, Text, Operator)),
(r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)),
(r'(=)(bw|ew|cn|lte?|gte?|n?eq|ft|n?rx)\b',
bygroups(Operator, Operator.Word)),
(r':=|[-+*/%=<>&|!?\\]+', Operator),
(r'[{}():;,@^]', Punctuation),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\+", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\+', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
r'[abefnrtv?\"\'\\]|$)', String.Escape),
],
'signature': [
(r'[(,]', Punctuation, 'parameter'),
(r'=>', Operator, '#pop'),
include('lasso'),
],
'parameter': [
(r'\.\.\.', Name.Builtin.Pseudo),
(r'-?[a-z_][\w.]*', Name.Attribute, '#pop'),
(r'\)', Punctuation, '#pop'),
include('lasso'),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
if self.builtinshighlighting:
from pygments.lexers._lassobuiltins import BUILTINS
for key, value in BUILTINS.iteritems():
self._builtins.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value.lower() in self._builtins:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?(=|lasso)', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
if re.search(r'\[\n|\?>', text):
rv += 0.4
return rv
| mit | -2,779,814,613,805,224,400 | 38.390654 | 99 | 0.488445 | false |
bank-netforce/netforce | netforce_ecom/netforce_ecom/controllers/ecom_wishlist.py | 4 | 1820 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.template import render
from netforce.database import get_connection
from .cms_base import BaseController
class Wishlist(BaseController):
_path = "/ecom_wishlist"
def get(self):
db = get_connection()
try:
user_id = self.get_cookie("user_id", None)
if not user_id:
self.redirect("/cms_login")
ctx = self.context
content = render("ecom_wishlist", ctx)
ctx["content"] = content
html = render("cms_layout", ctx)
self.write(html)
db.commit()
except:
import traceback
traceback.print_exc()
db.rollback()
Wishlist.register()
| mit | -930,645,928,053,855,200 | 38.565217 | 80 | 0.69011 | false |
Scan-o-Matic/scanomatic | scanomatic/models/factories/features_factory.py | 1 | 1234 | from __future__ import absolute_import
import os
from types import StringTypes
from scanomatic.generics.abstract_model_factory import (
AbstractModelFactory, email_serializer
)
import scanomatic.models.features_model as features_model
class FeaturesFactory(AbstractModelFactory):
MODEL = features_model.FeaturesModel
STORE_SECTION_HEAD = ("analysis_directory", )
STORE_SECTION_SERIALIZERS = {
"analysis_directory": str,
"email": email_serializer,
"extraction_data": features_model.FeatureExtractionData,
"try_keep_qc": bool
}
@classmethod
def _validate_analysis_directory(cls, model):
if not isinstance(model.analysis_directory, StringTypes):
return model.FIELD_TYPES.analysis_directory
analysis_directory = model.analysis_directory.rstrip("/")
if (os.path.abspath(analysis_directory) == analysis_directory and
os.path.isdir(model.analysis_directory)):
return True
return model.FIELD_TYPES.analysis_directory
@classmethod
def create(cls, **settings):
""":rtype : scanomatic.models.features_model.FeaturesModel"""
return super(FeaturesFactory, cls).create(**settings)
| gpl-3.0 | 2,029,703,341,801,107,200 | 29.85 | 73 | 0.694489 | false |
64studio/smart | smart/plugins/channelsync.py | 4 | 8207 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.channel import *
from smart import *
import os
CHANNELSDIR = "/etc/smart/channels/"
def syncChannels(channelsdir, force=None):
if force is None:
force = sysconf.get("force-channelsync", False)
if os.path.isdir(channelsdir):
seenalias = {}
for entry in os.listdir(channelsdir):
if not (entry.endswith(".channel") or entry.endswith(".repo")):
continue
filepath = os.path.join(channelsdir, entry)
if not os.path.isfile(filepath):
continue
file = open(filepath)
data = file.read()
file.close()
try:
descriptions = parseChannelsDescription(data)
except Error, e:
iface.error(_("While using %s: %s") % (filepath, e))
continue
for alias in descriptions:
if alias in seenalias:
continue
seenalias[alias] = True
olddescr = sysconf.get(("channelsync", alias))
newdescr = descriptions[alias]
chndescr = sysconf.get(("channels", alias))
if not olddescr and chndescr:
olddescr = chndescr
if chndescr:
name = chndescr.get("name")
else:
name = None
if not name:
name = newdescr.get("name")
if not name:
name = alias
else:
name += " (%s)" % alias
else:
name += " (%s)" % alias
if not olddescr:
if (force or
iface.askYesNo(_("New channel '%s' detected.\n"
"Include it?") % name, True)):
try:
createChannel(alias, newdescr)
except Error, e:
iface.error(_("While using %s: %s") %
(filepath, e))
else:
sysconf.set(("channels", alias), newdescr)
sysconf.set(("channelsync", alias), newdescr)
else:
sysconf.set(("channelsync", alias), newdescr)
elif (not chndescr or
newdescr == chndescr or
newdescr == olddescr):
continue
elif not newdescr.get("type"):
iface.error(_("Channel in %s has no type.") % fielpath)
elif newdescr.get("type") != chndescr.get("type"):
if (force or
iface.askYesNo(_("Change in channel '%s' detected.\n"
"Old channel:\n\n%s\n\n"
"New channel:\n\n%s\n\n"
"Do you want to replace it?") %
(name,
createChannelDescription(alias,
chndescr),
createChannelDescription(alias,
newdescr)),
True)):
try:
createChannel(alias, newdescr)
except Error, e:
iface.error(_("While using %s: %s") %
(filepath, e))
else:
sysconf.set(("channels", alias), newdescr)
sysconf.set(("channelsync", alias), newdescr)
else:
sysconf.set(("channelsync", alias), newdescr)
else:
info = getChannelInfo(chndescr["type"])
def getLabel(key, info=info):
for _key, label, ftype, default, descr in info.fields:
if _key == key:
return label
return key
def toStr(value):
if type(value) is bool:
return value and _("Yes") or _("No")
elif value is None:
return _("None")
return str(value)
try:
pardescr = parseChannelData(newdescr)
except Error, e:
iface.error(_("While using %s: %s") % (filepath, e))
continue
changed = False
for key in newdescr:
oldvalue = olddescr.get(key)
newvalue = newdescr.get(key)
parvalue = pardescr.get(key)
chnvalue = chndescr.get(key)
if newvalue == oldvalue or parvalue == chnvalue:
continue
if (force or
iface.askYesNo(_("Change in field '%(label)s' of "
"channel '%(name)s' detected.\n"
"Old value: %(curvalue)s\n"
"New value: %(newvalue)s\n"
"Replace current value?") %
{"label": getLabel(key),
"name": name,
"curvalue": toStr(chnvalue),
"newvalue": toStr(parvalue)},
True)):
chndescr[key] = parvalue
changed = True
if changed:
try:
createChannel(alias, chndescr)
except Error, e:
iface.error(unicode(e))
else:
sysconf.set(("channels", alias), chndescr)
sysconf.set(("channelsync", alias), newdescr)
if not sysconf.has("channelsync"):
return
for alias in sysconf.keys("channelsync"):
if alias not in seenalias:
sysconf.remove(("channelsync", alias))
if not sysconf.has(("channels", alias)):
continue
name = sysconf.get(("channels", alias, "name"))
if not name:
name = alias
else:
name += " (%s)" % alias
if (force or
iface.askYesNo(_("Removing channel '%s' was suggested.\n"
"Do you want to remove it?") % name,
True)):
sysconf.remove(("channels", alias))
if not sysconf.getReadOnly():
syncChannels(sysconf.get("channel-sync-dir", CHANNELSDIR))
| gpl-2.0 | -2,205,787,779,533,312,000 | 39.830846 | 78 | 0.415621 | false |
openspending/ckanext-openspending_registry | setup.py | 1 | 3135 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='''ckanext-openspending_registry''',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.0.1',
description='''''',
long_description=long_description,
# The project's main homepage.
url='https://github.com/openspending/ckanext-openspending_registry',
# Author details
author='''''',
author_email='''''',
# Choose your license
license='AGPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='''CKAN''',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
include_package_data=True,
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points='''
[ckan.plugins]
openspending_registry=ckanext.openspending_registry.plugin:Openspending_RegistryPlugin
''',
)
| agpl-3.0 | 2,471,371,732,877,692,400 | 36.771084 | 98 | 0.681021 | false |
KnHuq/Dynamic-Tensorflow-Tutorial | GRU/GRU.py | 2 | 6780 | import tensorflow as tf
from sklearn import datasets
from sklearn.cross_validation import train_test_split
import sys
# # Vhanilla RNN class and functions
class RNN_cell(object):
"""
RNN cell object which takes 3 arguments for initialization.
input_size = Input Vector size
hidden_layer_size = Hidden layer size
target_size = Output vector size
"""
def __init__(self, input_size, hidden_layer_size, target_size):
# Initialization of given values
self.input_size = input_size
self.hidden_layer_size = hidden_layer_size
self.target_size = target_size
# Weights for input and hidden tensor
self.Wx = tf.Variable(
tf.zeros([self.input_size, self.hidden_layer_size]))
self.Wr = tf.Variable(
tf.zeros([self.input_size, self.hidden_layer_size]))
self.Wz = tf.Variable(
tf.zeros([self.input_size, self.hidden_layer_size]))
self.br = tf.Variable(tf.truncated_normal(
[self.hidden_layer_size], mean=1))
self.bz = tf.Variable(tf.truncated_normal(
[self.hidden_layer_size], mean=1))
self.Wh = tf.Variable(
tf.zeros([self.hidden_layer_size, self.hidden_layer_size]))
# Weights for output layer
self.Wo = tf.Variable(tf.truncated_normal(
[self.hidden_layer_size, self.target_size], mean=1, stddev=.01))
self.bo = tf.Variable(tf.truncated_normal(
[self.target_size], mean=1, stddev=.01))
# Placeholder for input vector with shape[batch, seq, embeddings]
self._inputs = tf.placeholder(tf.float32,
shape=[None, None, self.input_size],
name='inputs')
# Processing inputs to work with scan function
self.processed_input = process_batch_input_for_RNN(self._inputs)
'''
Initial hidden state's shape is [1,self.hidden_layer_size]
In First time stamp, we are doing dot product with weights to
get the shape of [batch_size, self.hidden_layer_size].
For this dot product tensorflow use broadcasting. But during
Back propagation a low level error occurs.
So to solve the problem it was needed to initialize initial
hiddden state of size [batch_size, self.hidden_layer_size].
So here is a little hack !!!! Getting the same shaped
initial hidden state of zeros.
'''
self.initial_hidden = self._inputs[:, 0, :]
self.initial_hidden = tf.matmul(
self.initial_hidden, tf.zeros([input_size, hidden_layer_size]))
# Function for GRU cell
def Gru(self, previous_hidden_state, x):
"""
GRU Equations
"""
z = tf.sigmoid(tf.matmul(x, self.Wz) + self.bz)
r = tf.sigmoid(tf.matmul(x, self.Wr) + self.br)
h_ = tf.tanh(tf.matmul(x, self.Wx) +
tf.matmul(previous_hidden_state, self.Wh) * r)
current_hidden_state = tf.multiply(
(1 - z), h_) + tf.multiply(previous_hidden_state, z)
return current_hidden_state
# Function for getting all hidden state.
def get_states(self):
"""
Iterates through time/ sequence to get all hidden state
"""
# Getting all hidden state throuh time
all_hidden_states = tf.scan(self.Gru,
self.processed_input,
initializer=self.initial_hidden,
name='states')
return all_hidden_states
# Function to get output from a hidden layer
def get_output(self, hidden_state):
"""
This function takes hidden state and returns output
"""
output = tf.nn.relu(tf.matmul(hidden_state, self.Wo) + self.bo)
return output
# Function for getting all output layers
def get_outputs(self):
"""
Iterating through hidden states to get outputs for all timestamp
"""
all_hidden_states = self.get_states()
all_outputs = tf.map_fn(self.get_output, all_hidden_states)
return all_outputs
# Function to convert batch input data to use scan ops of tensorflow.
def process_batch_input_for_RNN(batch_input):
"""
Process tensor of size [5,3,2] to [3,5,2]
"""
batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1])
X = tf.transpose(batch_input_)
return X
"""
Example of using GRU
"""
# Initializing variables.
hidden_layer_size = 30
input_size = 8
target_size = 10
# Initializing placeholder
y = tf.placeholder(tf.float32, shape=[None, target_size], name='inputs')
# # Models
# Initializing rnn object
rnn = RNN_cell(input_size, hidden_layer_size, target_size)
# Getting all outputs from rnn
outputs = rnn.get_outputs()
# Getting final output through indexing after reversing
last_output = outputs[-1]
# As rnn model output the final layer through Relu activation softmax is
# used for final output.
output = tf.nn.softmax(last_output)
# Computing the Cross Entropy loss
cross_entropy = -tf.reduce_sum(y * tf.log(output))
# Trainning with Adadelta Optimizer
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
# Calculatio of correct prediction and accuracy
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(output, 1))
accuracy = (tf.reduce_mean(tf.cast(correct_prediction, tf.float32))) * 100
# # Dataset Preparation
# Function to get on hot
def get_on_hot(number):
on_hot = [0] * 10
on_hot[number] = 1
return on_hot
# Using Sklearn MNIST dataset.
digits = datasets.load_digits()
X = digits.images
Y_ = digits.target
Y = map(get_on_hot, Y_)
# Getting Train and test Dataset
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.22, random_state=42)
# Cuttting for simple iteration
X_train = X_train[:1400]
y_train = y_train[:1400]
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
# Iterations to do trainning
for epoch in range(200):
start = 0
end = 100
for i in range(14):
X = X_train[start:end]
Y = y_train[start:end]
start = end
end = start + 100
sess.run(train_step, feed_dict={rnn._inputs: X, y: Y})
Loss = str(sess.run(cross_entropy, feed_dict={rnn._inputs: X, y: Y}))
Train_accuracy = str(sess.run(accuracy, feed_dict={
rnn._inputs: X_train, y: y_train}))
Test_accuracy = str(sess.run(accuracy, feed_dict={
rnn._inputs: X_test, y: y_test}))
sys.stdout.flush()
print("\rIteration: %s Loss: %s Train Accuracy: %s Test Accuracy: %s" %
(epoch, Loss, Train_accuracy, Test_accuracy)),
sys.stdout.flush()
| mit | 5,840,347,899,413,291,000 | 28.350649 | 76 | 0.620059 | false |
sctigercat1/panda3d | direct/src/showutil/Rope.py | 8 | 5750 | from panda3d.core import *
import types
class Rope(NodePath):
"""
This class defines a NURBS curve whose control vertices are
defined based on points relative to one or more nodes in space, so
that the "rope" will animate as the nodes move around. It uses
the C++ RopeNode class to achieve fancy rendering effects like
thick lines built from triangle strips.
"""
showRope = base.config.GetBool('show-rope', 1)
def __init__(self, name = 'Rope'):
self.ropeNode = RopeNode(name)
self.curve = NurbsCurveEvaluator()
self.ropeNode.setCurve(self.curve)
NodePath.__init__(self, self.ropeNode)
self.name = name
def setup(self, order, verts, knots = None):
"""This must be called to define the shape of the curve
initially, and may be called again as needed to adjust the
curve's properties.
order must be either 1, 2, 3, or 4, and is one more than the
degree of the curve; most NURBS curves are order 4.
verts is a list of (NodePath, point) tuples, defining the
control vertices of the curve. For each control vertex, the
NodePath may refer to an arbitrary node in the scene graph,
indicating the point should be interpreted in the coordinate
space of that node (and it will automatically move when the
node is moved), or it may be the empty NodePath or None to
indicate the point should be interpreted in the coordinate
space of the Rope itself. Each point value may be either a
3-tuple or a 4-tuple (or a VBase3 or VBase4). If it is a
3-component vector, it represents a 3-d point in space; a
4-component vector represents a point in 4-d homogeneous
space; that is to say, a 3-d point and an additional weight
factor (which should have been multiplied into the x y z
components).
verts may be a list of dictionaries instead of a list of
tuples. In this case, each vertex dictionary may have any of
the following elements:
'node' : the NodePath indicating the coordinate space
'point' : the 3-D point relative to the node; default (0, 0, 0)
'color' : the color of the vertex, default (1, 1, 1, 1)
'thickness' : the thickness at the vertex, default 1
In order to enable the per-vertex color or thickness, you must
call rope.ropeNode.setUseVertexColor(1) or
rope.ropeNode.setUseVertexThickness(1).
knots is optional. If specified, it should be a list of
floats, and should be of length len(verts) + order. If it
is omitted, a default knot string is generated that consists
of the first (order - 1) and last (order - 1) values the
same, and the intermediate values incrementing by 1.
"""
self.order = order
self.verts = verts
self.knots = knots
self.recompute()
def recompute(self):
"""Recomputes the curve after its properties have changed.
Normally it is not necessary for the user to call this
directly."""
if not self.showRope:
return
numVerts = len(self.verts)
self.curve.reset(numVerts)
self.curve.setOrder(self.order)
defaultNodePath = None
defaultPoint = (0, 0, 0)
defaultColor = (1, 1, 1, 1)
defaultThickness = 1
useVertexColor = self.ropeNode.getUseVertexColor()
useVertexThickness = self.ropeNode.getUseVertexThickness()
vcd = self.ropeNode.getVertexColorDimension()
vtd = self.ropeNode.getVertexThicknessDimension()
for i in range(numVerts):
v = self.verts[i]
if isinstance(v, types.TupleType):
nodePath, point = v
color = defaultColor
thickness = defaultThickness
else:
nodePath = v.get('node', defaultNodePath)
point = v.get('point', defaultPoint)
color = v.get('color', defaultColor)
thickness = v.get('thickness', defaultThickness)
if isinstance(point, types.TupleType):
if (len(point) >= 4):
self.curve.setVertex(i, VBase4(point[0], point[1], point[2], point[3]))
else:
self.curve.setVertex(i, VBase3(point[0], point[1], point[2]))
else:
self.curve.setVertex(i, point)
if nodePath:
self.curve.setVertexSpace(i, nodePath)
if useVertexColor:
self.curve.setExtendedVertex(i, vcd + 0, color[0])
self.curve.setExtendedVertex(i, vcd + 1, color[1])
self.curve.setExtendedVertex(i, vcd + 2, color[2])
self.curve.setExtendedVertex(i, vcd + 3, color[3])
if useVertexThickness:
self.curve.setExtendedVertex(i, vtd, thickness)
if self.knots != None:
for i in range(len(self.knots)):
self.curve.setKnot(i, self.knots[i])
self.ropeNode.resetBound(self)
def getPoints(self, len):
"""Returns a list of len points, evenly distributed in
parametric space on the rope, in the coordinate space of the
Rope itself."""
result = self.curve.evaluate(self)
startT = result.getStartT()
sizeT = result.getEndT() - startT
numPts = len
ropePts = []
for i in range(numPts):
pt = Point3()
result.evalPoint(sizeT * i / float(numPts - 1) + startT, pt)
ropePts.append(pt)
return ropePts
| bsd-3-clause | 3,706,250,605,326,841,000 | 39.20979 | 91 | 0.601565 | false |
cheery/pytci | character_stream.py | 1 | 2684 | """
The preprocessing tokens in C are particularly hard, and
this character stream makes it slightly easier.
"""
class CharacterStream(object):
def __init__(self, generator, line=1, filename=""):
self.comments = True
self.generator = discard_comments(self, logical_characters(self, generator))
self.line = line
self.filename = filename
self.character = '\n' # Marks beginning of new line
# Doesn't go through line incrementing, so
# it is sufficient for denoting beginning of the first line.
def get_next(self):
assert self.character, "error in tokenizing"
character = self.character
self.character = pull(self.generator)
return character
def is_space(self):
return self.character in spaces
def skip_spaces(self):
while self.character in spaces:
self.get_next()
def skip_spaces_and_newlines(self):
while self.character in spaces_and_newlines:
self.get_next()
@property
def position(self):
return self.line, self.filename
def logical_characters(stream, sequence):
backslash = False
for ch in sequence:
if backslash:
backslash = False
if ch == "\n":
stream.line += 1
continue
yield "\\"
if ch != "\\":
yield ch
continue
elif ch != "\\":
if ch == "\n":
stream.line += 1
yield ch
else:
backslash = True
if backslash:
yield "\\"
def discard_comments(stream, sequence):
"C tokenizer is fat!"
state = 0
for ch in sequence:
if state == 0:
if ch != '/':
yield ch
elif stream.comments:
state = 1
else:
yield ch
elif state == 1:
if ch == '/':
state = 2
elif ch == '*':
state = 3
else:
yield '/'
yield ch
state = 0
elif state == 2 and ch == '\n':
yield ch
state = 0
elif state == 3 and ch == '*':
state = 4
elif state == 4:
if ch == '/':
yield ' '
state = 0
elif ch != '*':
state = 3
def pull(generator):
try:
return generator.next()
except StopIteration as stop:
return ""
spaces = set(['\x00', ' ', '\t', '\r'])
spaces_and_newlines = set(['\x00', ' ', '\t', '\r', '\n'])
| mit | -5,332,401,609,422,961,000 | 26.387755 | 90 | 0.477273 | false |
abbot/geocaching-py | save-for-address.py | 1 | 2065 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import operator
import optparse
import sys
from geocaching import Geo, tools
def main():
geo = Geo()
parser = optparse.OptionParser(usage="%prog radius address...",
description="Download caches for the given address and radius in km")
opts, args = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit(1)
radius = int(args[0])
address = ' '.join(args[1:])
print "Logged in as %s" % geo.login_from_config()
count, pages, et, url = geo.find_by_address(address, radius)
print "Found %d caches on %d result pages." % (count, pages)
print "Please enter the number of caches to download"
print "(or just hit enter for all):"
count = raw_input().strip()
if count == '':
count = None
else:
count = int(count)
caches = geo.get_search_results(et, url, count)
print "%-12s|%-8s|%s| %s" % ("Type", "Code", "X", "Title")
print "------------+--------+-+-----------------------------"
for cache in caches:
print "%-12s|%-8s|%s| %s" % (cache[0], cache[2], cache[3] and '-' or '+', cache[4])
print "Continue to download (only available caches will be downloaded)?"
yesno = raw_input().strip().lower()
if yesno[0] != 'y':
sys.exit(0)
valid = [cache[1] for cache in caches if not cache[3]]
for i, guid in enumerate(valid):
print ">>>>>>>>> Downloading information for cache %d of %d" % (i+1, len(valid))
gpx = geo.cache_gpx(guid)
if gpx is None:
print "Cache %s not found." % arg
continue
geocode = tools.geocode(gpx)
if geocode is None:
print "Can't parse cache %s, skipping", arg
continue
filename = "%s.gpx" % geocode
gpx.write(open(filename, "w"),
encoding="utf-8", xml_declaration=True,
pretty_print=True)
print ">>>>>>>>> Wrote %s" % filename
if __name__ == '__main__':
main()
| bsd-3-clause | 6,273,749,173,915,928,000 | 32.306452 | 104 | 0.539952 | false |
sftd/scons | scons-local/SCons/Tool/packaging/targz.py | 8 | 1818 | """SCons.Tool.Packaging.targz
The targz SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/targz.py 2014/03/02 14:18:15 garyo"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source, TARFLAGS='-zc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -6,276,820,009,228,197,000 | 40.318182 | 119 | 0.749725 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.