repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
cedi4155476/QGIS | python/plugins/fTools/tools/doSpatialJoin.py | 9 | 12355 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import QObject, SIGNAL, QVariant, QFile
from PyQt4.QtGui import QDialog, QDialogButtonBox, QMessageBox
from qgis.core import QGis, QgsVectorFileWriter, QgsVectorLayer, QgsMapLayerRegistry, QgsFields, QgsField, QgsFeature, QgsGeometry, NULL
import ftools_utils
from ui_frmSpatialJoin import Ui_Dialog
def myself(L):
#median computation
nVal = len(L)
if nVal == 1:
return L[0]
L.sort()
#test for list length
medianVal = 0
if nVal > 1:
if (nVal % 2) == 0:
#index begin at 0
#remove 1 to index in standard median computation
medianVal = 0.5 * ((L[(nVal) / 2 - 1]) + (L[(nVal) / 2]))
else:
medianVal = L[(nVal + 1) / 2 - 1]
return medianVal
def filter_null(vals):
"""Takes an iterator of values and returns a new iterator returning the same values but skipping any NULL values"""
return (v for v in vals if v is not None)
class Dialog(QDialog, Ui_Dialog):
def __init__(self, iface):
QDialog.__init__(self, iface.mainWindow())
self.iface = iface
# Set up the user interface from Designer.
self.setupUi(self)
QObject.connect(self.toolOut, SIGNAL("clicked()"), self.outFile)
self.setWindowTitle(self.tr("Join attributes by location"))
self.buttonOk = self.buttonBox_2.button(QDialogButtonBox.Ok)
# populate layer list
self.progressBar.setValue(0)
layers = ftools_utils.getLayerNames([QGis.Point, QGis.Line, QGis.Polygon])
self.inShape.addItems(layers)
self.joinShape.addItems(layers)
def accept(self):
self.buttonOk.setEnabled(False)
if self.inShape.currentText() == "":
QMessageBox.information(self, self.tr("Spatial Join"), self.tr("Please specify target vector layer"))
elif self.outShape.text() == "":
QMessageBox.information(self, self.tr("Spatial Join"), self.tr("Please specify output shapefile"))
elif self.joinShape.currentText() == "":
QMessageBox.information(self, self.tr("Spatial Join"), self.tr("Please specify join vector layer"))
elif self.rdoSummary.isChecked() and not (self.chkMean.isChecked() or self.chkSum.isChecked() or self.chkMin.isChecked() or self.chkMax.isChecked() or self.chkMean.isChecked() or self.chkMedian.isChecked()):
QMessageBox.information(self, self.tr("Spatial Join"), self.tr("Please specify at least one summary statistic"))
else:
inName = self.inShape.currentText()
joinName = self.joinShape.currentText()
outPath = self.outShape.text()
if self.rdoSummary.isChecked():
summary = True
sumList = []
if self.chkSum.isChecked():
sumList.append("SUM")
if self.chkMean.isChecked():
sumList.append("MEAN")
if self.chkMin.isChecked():
sumList.append("MIN")
if self.chkMax.isChecked():
sumList.append("MAX")
if self.chkMedian.isChecked():
sumList.append("MED")
else:
summary = False
sumList = ["all"]
if self.rdoKeep.isChecked():
keep = True
else:
keep = False
outName = ftools_utils.getShapefileName(outPath)
res = self.compute(inName, joinName, outPath, summary, sumList, keep, self.progressBar)
self.outShape.clear()
if res:
addToTOC = QMessageBox.question(
self, self.tr("Spatial Join"),
self.tr("Created output shapefile:\n%s\n\nWould you like to add the new layer to the TOC?") % (unicode(outPath)),
QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton)
if addToTOC == QMessageBox.Yes:
self.vlayer = QgsVectorLayer(outPath, unicode(outName), "ogr")
QgsMapLayerRegistry.instance().addMapLayers([self.vlayer])
self.progressBar.setValue(0)
self.buttonOk.setEnabled(True)
def outFile(self):
self.outShape.clear()
(self.shapefileName, self.encoding) = ftools_utils.saveDialog(self)
if self.shapefileName is None or self.encoding is None:
return
self.outShape.setText(self.shapefileName)
def compute(self, inName, joinName, outName, summary, sumList, keep, progressBar):
layer1 = ftools_utils.getVectorLayerByName(inName)
provider1 = layer1.dataProvider()
fieldList1 = ftools_utils.getFieldList(layer1)
layer2 = ftools_utils.getVectorLayerByName(joinName)
provider2 = layer2.dataProvider()
fieldList2 = ftools_utils.getFieldList(layer2)
fieldList = QgsFields()
if provider1.crs() != provider2.crs():
QMessageBox.warning(self, self.tr("CRS warning!"), self.tr("Warning: Input layers have non-matching CRS.\nThis may cause unexpected results."))
if not summary:
fieldList2 = ftools_utils.testForUniqueness(fieldList1, fieldList2)
seq = range(0, len(fieldList1) + len(fieldList2))
fieldList1.extend(fieldList2)
fieldList1 = dict(zip(seq, fieldList1))
else:
numFields = {}
for j in xrange(len(fieldList2)):
if fieldList2[j].type() == QVariant.Int or fieldList2[j].type() == QVariant.Double:
numFields[j] = []
for i in sumList:
field = QgsField(i + unicode(fieldList2[j].name()), QVariant.Double, "real", 24, 16, self.tr("Summary field"))
fieldList.append(field)
field = QgsField("COUNT", QVariant.Double, "real", 24, 16, self.tr("Summary field"))
fieldList.append(field)
fieldList2 = ftools_utils.testForUniqueness(fieldList1, fieldList)
fieldList1.extend(fieldList)
seq = range(0, len(fieldList1))
fieldList1 = dict(zip(seq, fieldList1))
sRs = provider1.crs()
progressBar.setValue(13)
check = QFile(self.shapefileName)
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile(self.shapefileName):
QMessageBox.warning(
self, self.tr('Error deleting shapefile'),
self.tr("Can't delete existing shapefile\n%s") % (self.shapefileName))
return False
fields = QgsFields()
for f in fieldList1.values():
fields.append(f)
writer = QgsVectorFileWriter(self.shapefileName, self.encoding, fields, provider1.geometryType(), sRs)
#writer = QgsVectorFileWriter(outName, "UTF-8", fieldList1, provider1.geometryType(), sRs)
inFeat = QgsFeature()
outFeat = QgsFeature()
inFeatB = QgsFeature()
inGeom = QgsGeometry()
progressBar.setValue(15)
start = 15.00
add = 85.00 / provider1.featureCount()
index = ftools_utils.createIndex(provider2)
# cache all features from provider2 to avoid huge number of feature requests in the inner loop
mapP2 = {}
for f in provider2.getFeatures():
mapP2[f.id()] = QgsFeature(f)
fit1 = provider1.getFeatures()
while fit1.nextFeature(inFeat):
inGeom = inFeat.geometry()
atMap1 = inFeat.attributes()
outFeat.setGeometry(inGeom)
none = True
joinList = []
if inGeom.type() == QGis.Point:
#(check, joinList) = layer2.featuresInRectangle(inGeom.buffer(10,2).boundingBox(), True, True)
#layer2.select(inGeom.buffer(10,2).boundingBox(), False)
#joinList = layer2.selectedFeatures()
joinList = index.intersects(inGeom.buffer(10, 2).boundingBox())
if len(joinList) > 0:
check = 0
else:
check = 1
else:
#(check, joinList) = layer2.featuresInRectangle(inGeom.boundingBox(), True, True)
#layer2.select(inGeom.boundingBox(), False)
#joinList = layer2.selectedFeatures()
joinList = index.intersects(inGeom.boundingBox())
if len(joinList) > 0:
check = 0
else:
check = 1
if check == 0:
count = 0
for i in joinList:
inFeatB = mapP2[i] # cached feature from provider2
if inGeom.intersects(inFeatB.geometry()):
count = count + 1
none = False
atMap2 = inFeatB.attributes()
if not summary:
atMap = atMap1
atMap2 = atMap2
atMap.extend(atMap2)
atMap = dict(zip(seq, atMap))
break
else:
for j in numFields.keys():
numFields[j].append(atMap2[j])
if summary and not none:
atMap = atMap1
for j in numFields.keys():
for k in sumList:
if k == "SUM":
atMap.append(sum(filter_null(numFields[j])))
elif k == "MEAN":
try:
nn_count = sum(1 for _ in filter_null(numFields[j]))
atMap.append(sum(filter_null(numFields[j])) / nn_count)
except ZeroDivisionError:
atMap.append(NULL)
elif k == "MIN":
try:
atMap.append(min(filter_null(numFields[j])))
except ValueError:
atMap.append(NULL)
elif k == "MED":
atMap.append(myself(numFields[j]))
else:
try:
atMap.append(max(filter_null(numFields[j])))
except ValueError:
atMap.append(NULL)
numFields[j] = []
atMap.append(count)
atMap = dict(zip(seq, atMap))
if none:
outFeat.setAttributes(atMap1)
else:
outFeat.setAttributes(atMap.values())
if keep: # keep all records
writer.addFeature(outFeat)
else: # keep only matching records
if not none:
writer.addFeature(outFeat)
start = start + add
progressBar.setValue(start)
del writer
return True
| gpl-2.0 | 9,062,584,997,537,995,000 | -5,463,727,048,236,942,000 | 43.764493 | 215 | 0.542533 | false |
z-plot/z-plot | examples/basics-svg/verticalbars.py | 1 | 2343 | #! /usr/bin/env python
from zplot import *
# populate zplot table from data file
t = table('verticalbars.data')
# create the postscript file we'll use as our canvas
canvas = svg('verticalbars.svg')
# on the x-axis, we want categories, not numbers. Thus, we
# determine the number of categories by checking the max
# "rownumber" (a field automatically added by zplot). We want a
# half bar width (0.5) to the left and right of the bar locations
# so we don't overflow the drawable.
d = drawable(canvas, xrange=[-0.5,t.getmax('rownumber')+0.5], yrange=[0,80])
# xmanual is a list of the form [(label1,x1), (label2,x2), ...].
# We want to use the "op" field from the data file as our labels
# and use "rownumber" as our x coordinate.
axis(d, xtitle='Operation', xmanual=t.query(select='op,rownumber'),
ytitle='Latency (ms)', yauto=[0,80,20])
# we are going to create several bars with similar arguments. One
# easy way to do this is to put all the arguments in a dict, and
# use Python's special syntax ("**") for using the dict as named
# args. Then we can tweak the args between each call to
# verticalbars.
#
# yfield determines the bar height, and stackfields determines
# where the bottom of a bar starts. This is useful for showing
# several bar sections to indicate a breakdown. After the first
# bar, we append the previous yfield to stackfields to stack the bars.
p = plotter()
L = legend()
barargs = {'drawable':d, 'table':t, 'xfield':'rownumber',
'linewidth':0.5, 'fill':True, 'barwidth':0.8,
'legend':L, 'stackfields':[]}
# compute bar
barargs['yfield'] = 'compute'
barargs['legendtext'] = 'CPU'
barargs['fillcolor'] = 'red'
p.verticalbars(**barargs)
# network bar
barargs['stackfields'].append(barargs['yfield'])
barargs['yfield'] = 'network'
barargs['legendtext'] = 'Net'
barargs['fillcolor'] = 'green'
p.verticalbars(**barargs)
# storage bar
barargs['stackfields'].append(barargs['yfield'])
barargs['yfield'] = 'storage'
barargs['legendtext'] = 'Disk'
barargs['fillcolor'] = 'blue'
p.verticalbars(**barargs)
# we want legend entries to be all on one line. Thus, we use
# skipnext=1 to get one row. We specify the horizontal space
# between legend symbols (not considering text) with skipspace.
L.draw(canvas, coord=[d.left()+30, d.top()-5], skipnext=1, skipspace=40)
canvas.render()
| bsd-3-clause | -1,980,210,417,752,998,700 | 6,558,931,188,578,208,000 | 35.046154 | 76 | 0.703799 | false |
jocave/snapcraft | snapcraft/_options.py | 3 | 4856 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import multiprocessing
import os
import platform
logger = logging.getLogger(__name__)
_ARCH_TRANSLATIONS = {
'armv7l': {
'kernel': 'arm',
'deb': 'armhf',
'cross-compiler-prefix': 'arm-linux-gnueabihf-',
'cross-build-packages': ['gcc-arm-linux-gnueabihf'],
'triplet': 'arm-linux-gnueabihf',
},
'aarch64': {
'kernel': 'arm64',
'deb': 'arm64',
'cross-compiler-prefix': 'aarch64-linux-gnu-',
'cross-build-packages': ['gcc-aarch64-linux-gnu'],
'triplet': 'aarch64-linux-gnu',
},
'i686': {
'kernel': 'x86',
'deb': 'i386',
'triplet': 'i386-linux-gnu',
},
'ppc64le': {
'kernel': 'powerpc',
'deb': 'ppc64el',
'cross-compiler-prefix': 'powerpc64le-linux-gnu-',
'cross-build-packages': ['gcc-powerpc64le-linux-gnu'],
'triplet': 'powerpc64le-linux-gnu',
},
'x86_64': {
'kernel': 'x86',
'deb': 'amd64',
'triplet': 'x86_64-linux-gnu',
},
's390x': {
'kernel': 's390x',
'deb': 's390x',
'cross-compiler-prefix': 's390x-linux-gnu-',
'cross-build-packages': ['gcc-s390x-linux-gnu'],
'triplet': 's390x-linux-gnu',
}
}
class ProjectOptions:
@property
def use_geoip(self):
return self.__use_geoip
@property
def parallel_builds(self):
return self.__parallel_builds
@property
def parallel_build_count(self):
build_count = 1
if self.__parallel_builds:
try:
build_count = multiprocessing.cpu_count()
except NotImplementedError:
logger.warning(
'Unable to determine CPU count; disabling parallel builds')
return build_count
@property
def is_cross_compiling(self):
return self.__target_machine != self.__host_machine
@property
def cross_compiler_prefix(self):
try:
return self.__machine_info['cross-compiler-prefix']
except KeyError:
raise EnvironmentError(
'Cross compilation not support for target arch {!}'.format(
self.__machine_target))
@property
def additional_build_packages(self):
packages = []
if self.is_cross_compiling:
packages.extend(self.__machine_info.get(
'cross-build-packages', []))
return packages
@property
def arch_triplet(self):
return self.__machine_info['triplet']
@property
def deb_arch(self):
return self.__machine_info['deb']
@property
def kernel_arch(self):
return self.__machine_info['kernel']
@property
def local_plugins_dir(self):
return os.path.join(self.parts_dir, 'plugins')
@property
def parts_dir(self):
return os.path.join(self.__project_dir, 'parts')
@property
def stage_dir(self):
return os.path.join(self.__project_dir, 'stage')
@property
def snap_dir(self):
return os.path.join(self.__project_dir, 'prime')
def __init__(self, use_geoip=False, parallel_builds=True,
target_deb_arch=None):
# TODO: allow setting a different project dir and check for
# snapcraft.yaml
self.__project_dir = os.getcwd()
self.__use_geoip = use_geoip
self.__parallel_builds = parallel_builds
self._set_machine(target_deb_arch)
def _set_machine(self, target_deb_arch):
self.__host_machine = platform.machine()
if not target_deb_arch:
self.__target_machine = self.__host_machine
else:
self.__target_machine = _find_machine(target_deb_arch)
logger.info('Setting target machine to {!r}'.format(
target_deb_arch))
self.__machine_info = _ARCH_TRANSLATIONS[self.__target_machine]
def _find_machine(deb_arch):
for machine in _ARCH_TRANSLATIONS:
if _ARCH_TRANSLATIONS[machine].get('deb', '') == deb_arch:
return machine
raise EnvironmentError(
'Cannot set machine from deb_arch {!r}'.format(deb_arch))
| gpl-3.0 | 7,774,707,117,463,719,000 | 1,776,374,321,257,862,400 | 28.430303 | 79 | 0.593081 | false |
svfat/django-docs | setup.py | 2 | 1415 | import os
from setuptools import setup, find_packages
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
try:
REQUIREMENTS = read_file('requirements.txt').splitlines()
except:
REQUIREMENTS = [
'Django',
]
setup(
name='django-docs',
version=__import__('docs').__version__,
author='Evgeny Demchenko',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
url='https://github.com/littlepea/django-docs',
license='BSD',
description=u' '.join(__import__('docs').__doc__.splitlines()).strip(),
classifiers=[
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
],
long_description=read_file('README.rst'),
test_suite='docs.tests.runtests.runtests',
tests_require=[
'django-nose',
'coverage',
'django-coverage',
],
zip_safe=False,
install_requires=REQUIREMENTS,
)
| bsd-3-clause | -3,102,324,235,263,189,500 | 1,591,037,022,846,030,600 | 26.745098 | 75 | 0.600707 | false |
ChrisBeaumont/luigi | test/date_interval_test.py | 13 | 5774 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from helpers import unittest
import luigi
from luigi.parameter import DateIntervalParameter as DI
class DateIntervalTest(unittest.TestCase):
def test_date(self):
di = DI().parse('2012-01-01')
self.assertEqual(di.dates(), [datetime.date(2012, 1, 1)])
self.assertEqual(di.next().dates(), [datetime.date(2012, 1, 2)])
self.assertEqual(di.prev().dates(), [datetime.date(2011, 12, 31)])
self.assertEqual(str(di), '2012-01-01')
def test_month(self):
di = DI().parse('2012-01')
self.assertEqual(di.dates(), [datetime.date(2012, 1, 1) + datetime.timedelta(i) for i in range(31)])
self.assertEqual(di.next().dates(), [datetime.date(2012, 2, 1) + datetime.timedelta(i) for i in range(29)])
self.assertEqual(di.prev().dates(), [datetime.date(2011, 12, 1) + datetime.timedelta(i) for i in range(31)])
self.assertEqual(str(di), '2012-01')
def test_year(self):
di = DI().parse('2012')
self.assertEqual(di.dates(), [datetime.date(2012, 1, 1) + datetime.timedelta(i) for i in range(366)])
self.assertEqual(di.next().dates(), [datetime.date(2013, 1, 1) + datetime.timedelta(i) for i in range(365)])
self.assertEqual(di.prev().dates(), [datetime.date(2011, 1, 1) + datetime.timedelta(i) for i in range(365)])
self.assertEqual(str(di), '2012')
def test_week(self):
# >>> datetime.date(2012, 1, 1).isocalendar()
# (2011, 52, 7)
# >>> datetime.date(2012, 12, 31).isocalendar()
# (2013, 1, 1)
di = DI().parse('2011-W52')
self.assertEqual(di.dates(), [datetime.date(2011, 12, 26) + datetime.timedelta(i) for i in range(7)])
self.assertEqual(di.next().dates(), [datetime.date(2012, 1, 2) + datetime.timedelta(i) for i in range(7)])
self.assertEqual(str(di), '2011-W52')
di = DI().parse('2013-W01')
self.assertEqual(di.dates(), [datetime.date(2012, 12, 31) + datetime.timedelta(i) for i in range(7)])
self.assertEqual(di.prev().dates(), [datetime.date(2012, 12, 24) + datetime.timedelta(i) for i in range(7)])
self.assertEqual(str(di), '2013-W01')
def test_interval(self):
di = DI().parse('2012-01-01-2012-02-01')
self.assertEqual(di.dates(), [datetime.date(2012, 1, 1) + datetime.timedelta(i) for i in range(31)])
self.assertRaises(NotImplementedError, di.next)
self.assertRaises(NotImplementedError, di.prev)
self.assertEquals(di.to_string(), '2012-01-01-2012-02-01')
def test_exception(self):
self.assertRaises(ValueError, DI().parse, 'xyz')
def test_comparison(self):
a = DI().parse('2011')
b = DI().parse('2013')
c = DI().parse('2012')
self.assertTrue(a < b)
self.assertTrue(a < c)
self.assertTrue(b > c)
d = DI().parse('2012')
self.assertTrue(d == c)
self.assertEqual(d, min(c, b))
self.assertEqual(3, len(set([a, b, c, d])))
def test_comparison_different_types(self):
x = DI().parse('2012')
y = DI().parse('2012-01-01-2013-01-01')
self.assertRaises(TypeError, lambda: x == y)
def test_parameter_parse_and_default(self):
month = luigi.date_interval.Month(2012, 11)
other = luigi.date_interval.Month(2012, 10)
class MyTask(luigi.Task):
di = DI(default=month)
class MyTaskNoDefault(luigi.Task):
di = DI()
task = luigi.interface._ArgParseInterface().parse(["MyTask"])[0]
self.assertEqual(task.di, month)
task = luigi.interface._ArgParseInterface().parse(["MyTask", "--di", "2012-10"])[0]
self.assertEqual(task.di, other)
task = MyTask(month)
self.assertEqual(task.di, month)
task = MyTask(di=month)
self.assertEqual(task.di, month)
task = MyTask(other)
self.assertNotEquals(task.di, month)
def fail1():
luigi.interface._ArgParseInterface().parse(["MyTaskNoDefault"])[0]
self.assertRaises(luigi.parameter.MissingParameterException, fail1)
task = luigi.interface._ArgParseInterface().parse(["MyTaskNoDefault", "--di", "2012-10"])[0]
self.assertEqual(task.di, other)
def test_hours(self):
d = DI().parse('2015')
self.assertEquals(len(list(d.hours())), 24 * 365)
def test_cmp(self):
operators = [lambda x, y: x == y,
lambda x, y: x != y,
lambda x, y: x < y,
lambda x, y: x > y,
lambda x, y: x <= y,
lambda x, y: x >= y]
dates = [(1, 30, DI().parse('2015-01-01-2015-01-30')),
(1, 15, DI().parse('2015-01-01-2015-01-15')),
(10, 20, DI().parse('2015-01-10-2015-01-20')),
(20, 30, DI().parse('2015-01-20-2015-01-30'))]
for from_a, to_a, di_a in dates:
for from_b, to_b, di_b in dates:
for op in operators:
self.assertEquals(
op((from_a, to_a), (from_b, to_b)),
op(di_a, di_b))
| apache-2.0 | 7,394,293,193,814,205,000 | 1,556,661,220,776,836,000 | 39.950355 | 116 | 0.588673 | false |
cs-au-dk/Artemis | WebKit/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py | 1 | 7183 | # Copyright (c) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults, ORWTResultsHTMLParser
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class ORWTResultsHTMLParserTest(unittest.TestCase):
_example_results_html = """
<html>
<head>
<title>Layout Test Results</title>
</head>
<body>
<p>Tests that had stderr output:</p>
<table>
<tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td>
<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td>
</tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td>
<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td>
</tr>
</table><p>Tests that had no expected results (probably new):</p>
<table>
<tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td>
<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td>
</tr>
</table></body>
</html>
"""
_example_results_html_with_failing_tests = """
<html>
<head>
<title>Layout Test Results</title>
</head>
<body>
<p>Tests where results did not match expected results:</p>
<table>
<tr>
<td><a href="http://trac.webkit.org/export/91245/trunk/LayoutTests/compositing/plugins/composited-plugin.html">compositing/plugins/composited-plugin.html</a></td>
<td>
<a href="compositing/plugins/composited-plugin-expected.txt">expected</a>
</td>
<td>
<a href="compositing/plugins/composited-plugin-actual.txt">actual</a>
</td>
<td>
<a href="compositing/plugins/composited-plugin-diffs.txt">diff</a>
</td>
<td>
<a href="compositing/plugins/composited-plugin-pretty-diff.html">pretty diff</a>
</td>
</tr>
</table>
<p>Tests that had stderr output:</p>
<table>
<tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td>
<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td>
</tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td>
<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td>
</tr>
</table><p>Tests that had no expected results (probably new):</p>
<table>
<tr>
<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td>
<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td>
</tr>
</table></body>
</html>
"""
def test_parse_layout_test_results(self):
failures = [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html'
expected_results = [test_results.TestResult(testname, failures)]
results = ORWTResultsHTMLParser.parse_results_html(self._example_results_html)
self.assertEqual(expected_results, results)
def test_failures_from_fail_row(self):
row = BeautifulSoup("<tr><td><a>test.hml</a></td><td><a>expected image</a></td><td><a>25%</a></td></tr>")
test_name = unicode(row.find("a").string)
# Even if the caller has already found the test name, findAll inside _failures_from_fail_row will see it again.
failures = OutputCapture().assert_outputs(self, ORWTResultsHTMLParser._failures_from_fail_row, [row])
self.assertEqual(len(failures), 1)
self.assertEqual(type(sorted(failures)[0]), test_failures.FailureImageHashMismatch)
row = BeautifulSoup("<tr><td><a>test.hml</a><a>foo</a></td></tr>")
expected_stderr = "Unhandled link text in results.html parsing: foo. Please file a bug against webkitpy.\n"
OutputCapture().assert_outputs(self, ORWTResultsHTMLParser._failures_from_fail_row, [row], expected_stderr=expected_stderr)
class LayoutTestResultsTest(unittest.TestCase):
def test_set_failure_limit_count(self):
results = LayoutTestResults([])
self.assertEquals(results.failure_limit_count(), None)
results.set_failure_limit_count(10)
self.assertEquals(results.failure_limit_count(), 10)
def test_results_from_string(self):
self.assertEqual(LayoutTestResults.results_from_string(None), None)
self.assertEqual(LayoutTestResults.results_from_string(""), None)
results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html)
self.assertEqual(len(results.failing_tests()), 0)
def test_tests_matching_failure_types(self):
results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html_with_failing_tests)
failing_tests = results.tests_matching_failure_types([test_failures.FailureTextMismatch])
self.assertEqual(len(results.failing_tests()), 1)
| gpl-3.0 | -6,708,348,564,911,715,000 | 7,609,408,793,729,637,000 | 48.19863 | 219 | 0.747459 | false |
Chitrank-Dixit/django-fcm | setup.py | 1 | 1550 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-fcm',
version='0.1.1',
packages=find_packages(),
include_package_data=True,
description='A Django package that enables sending messages using FCM (Firebase Cloud Messaging).',
long_description=README,
url='https://django-fcm.readthedocs.io/en/latest/',
author='Chitrank Dixit',
author_email='[email protected]',
zip_safe=False,
license='MIT License',
platforms=['any'],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=[
'django>=1.9',
'djangorestframework>=3.3.2',
'pytz>=2015.7',
'requests>=2.9.1'
],
)
| mit | 7,120,973,331,287,593,000 | -2,124,481,090,322,335,500 | 34.227273 | 103 | 0.61871 | false |
kennedyshead/home-assistant | tests/components/zha/conftest.py | 2 | 7337 | """Test configuration for the ZHA component."""
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import pytest
import zigpy
from zigpy.application import ControllerApplication
import zigpy.config
import zigpy.group
import zigpy.types
from homeassistant.components.zha import DOMAIN
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.device as zha_core_device
from homeassistant.setup import async_setup_component
from .common import FakeDevice, FakeEndpoint, get_zha_gateway
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa: F401
FIXTURE_GRP_ID = 0x1001
FIXTURE_GRP_NAME = "fixture group"
@pytest.fixture
def zigpy_app_controller():
"""Zigpy ApplicationController fixture."""
app = MagicMock(spec_set=ControllerApplication)
app.startup = AsyncMock()
app.shutdown = AsyncMock()
groups = zigpy.group.Groups(app)
groups.add_group(FIXTURE_GRP_ID, FIXTURE_GRP_NAME, suppress_event=True)
app.configure_mock(groups=groups)
type(app).ieee = PropertyMock()
app.ieee.return_value = zigpy.types.EUI64.convert("00:15:8d:00:02:32:4f:32")
type(app).nwk = PropertyMock(return_value=zigpy.types.NWK(0x0000))
type(app).devices = PropertyMock(return_value={})
return app
@pytest.fixture(name="config_entry")
async def config_entry_fixture(hass):
"""Fixture representing a config entry."""
entry = MockConfigEntry(
version=2,
domain=zha_const.DOMAIN,
data={
zigpy.config.CONF_DEVICE: {zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB0"},
zha_const.CONF_RADIO_TYPE: "ezsp",
},
options={
zha_const.CUSTOM_CONFIGURATION: {
zha_const.ZHA_ALARM_OPTIONS: {
zha_const.CONF_ALARM_ARM_REQUIRES_CODE: False,
zha_const.CONF_ALARM_MASTER_CODE: "4321",
zha_const.CONF_ALARM_FAILED_TRIES: 2,
}
}
},
)
entry.add_to_hass(hass)
return entry
@pytest.fixture
def setup_zha(hass, config_entry, zigpy_app_controller):
"""Set up ZHA component."""
zha_config = {zha_const.CONF_ENABLE_QUIRKS: False}
p1 = patch(
"bellows.zigbee.application.ControllerApplication.new",
return_value=zigpy_app_controller,
)
async def _setup(config=None):
config = config or {}
with p1:
status = await async_setup_component(
hass, zha_const.DOMAIN, {zha_const.DOMAIN: {**zha_config, **config}}
)
assert status is True
await hass.async_block_till_done()
return _setup
@pytest.fixture
def channel():
"""Channel mock factory fixture."""
def channel(name: str, cluster_id: int, endpoint_id: int = 1):
ch = MagicMock()
ch.name = name
ch.generic_id = f"channel_0x{cluster_id:04x}"
ch.id = f"{endpoint_id}:0x{cluster_id:04x}"
ch.async_configure = AsyncMock()
ch.async_initialize = AsyncMock()
return ch
return channel
@pytest.fixture
def zigpy_device_mock(zigpy_app_controller):
"""Make a fake device using the specified cluster classes."""
def _mock_dev(
endpoints,
ieee="00:0d:6f:00:0a:90:69:e7",
manufacturer="FakeManufacturer",
model="FakeModel",
node_descriptor=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
nwk=0xB79C,
patch_cluster=True,
):
"""Make a fake device using the specified cluster classes."""
device = FakeDevice(
zigpy_app_controller, ieee, manufacturer, model, node_descriptor, nwk=nwk
)
for epid, ep in endpoints.items():
endpoint = FakeEndpoint(manufacturer, model, epid)
endpoint.device = device
device.endpoints[epid] = endpoint
endpoint.device_type = ep["device_type"]
profile_id = ep.get("profile_id")
if profile_id:
endpoint.profile_id = profile_id
for cluster_id in ep.get("in_clusters", []):
endpoint.add_input_cluster(cluster_id, _patch_cluster=patch_cluster)
for cluster_id in ep.get("out_clusters", []):
endpoint.add_output_cluster(cluster_id, _patch_cluster=patch_cluster)
return device
return _mock_dev
@pytest.fixture
def zha_device_joined(hass, setup_zha):
"""Return a newly joined ZHA device."""
async def _zha_device(zigpy_dev):
await setup_zha()
zha_gateway = get_zha_gateway(hass)
await zha_gateway.async_device_initialized(zigpy_dev)
await hass.async_block_till_done()
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture
def zha_device_restored(hass, zigpy_app_controller, setup_zha, hass_storage):
"""Return a restored ZHA device."""
async def _zha_device(zigpy_dev, last_seen=None):
zigpy_app_controller.devices[zigpy_dev.ieee] = zigpy_dev
if last_seen is not None:
hass_storage[f"{DOMAIN}.storage"] = {
"key": f"{DOMAIN}.storage",
"version": 1,
"data": {
"devices": [
{
"ieee": str(zigpy_dev.ieee),
"last_seen": last_seen,
"name": f"{zigpy_dev.manufacturer} {zigpy_dev.model}",
}
],
},
}
await setup_zha()
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture(params=["zha_device_joined", "zha_device_restored"])
def zha_device_joined_restored(request):
"""Join or restore ZHA device."""
named_method = request.getfixturevalue(request.param)
named_method.name = request.param
return named_method
@pytest.fixture
def zha_device_mock(hass, zigpy_device_mock):
"""Return a zha Device factory."""
def _zha_device(
endpoints=None,
ieee="00:11:22:33:44:55:66:77",
manufacturer="mock manufacturer",
model="mock model",
node_desc=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
patch_cluster=True,
):
if endpoints is None:
endpoints = {
1: {
"in_clusters": [0, 1, 8, 768],
"out_clusters": [0x19],
"device_type": 0x0105,
},
2: {
"in_clusters": [0],
"out_clusters": [6, 8, 0x19, 768],
"device_type": 0x0810,
},
}
zigpy_device = zigpy_device_mock(
endpoints, ieee, manufacturer, model, node_desc, patch_cluster=patch_cluster
)
zha_device = zha_core_device.ZHADevice(hass, zigpy_device, MagicMock())
return zha_device
return _zha_device
@pytest.fixture
def hass_disable_services(hass):
"""Mock service register."""
with patch.object(hass.services, "async_register"), patch.object(
hass.services, "has_service", return_value=True
):
yield hass
| apache-2.0 | -1,359,862,552,603,227,600 | 4,227,615,057,689,470,500 | 30.761905 | 88 | 0.598201 | false |
klynch/emacs.d-old | python-libs/ropemode/refactor.py | 23 | 15727 | import re
import rope.base.change
import rope.contrib.generate
import rope.refactor.change_signature
import rope.refactor.extract
import rope.refactor.inline
import rope.refactor.introduce_factory
import rope.refactor.method_object
import rope.refactor.move
import rope.refactor.rename
import rope.refactor.restructure
import rope.refactor.usefunction
from rope.base import taskhandle
from ropemode import dialog, filter
class Refactoring(object):
key = None
confs = {}
optionals = {}
saveall = True
def __init__(self, interface, env):
self.interface = interface
self.env = env
def show(self, initial_asking=True):
self.interface._check_project()
self.interface._save_buffers(only_current=not self.saveall)
self._create_refactoring()
action, result = dialog.show_dialog(
self.interface._askdata, ['perform', 'preview', 'cancel'],
self._get_confs(), self._get_optionals(),
initial_asking=initial_asking)
if action == 'cancel':
self.env.message('Cancelled!')
return
def calculate(handle):
return self._calculate_changes(result, handle)
name = 'Calculating %s changes' % self.name
changes = runtask(self.env, calculate, name=name)
if action == 'perform':
self._perform(changes)
if action == 'preview':
if changes is not None:
diffs = changes.get_description()
if self.env.preview_changes(diffs):
self._perform(changes)
else:
self.env.message('Thrown away!')
else:
self.env.message('No changes!')
@property
def project(self):
return self.interface.project
@property
def resource(self):
return self.interface._get_resource()
@property
def offset(self):
return self.env.get_offset()
@property
def region(self):
return self.env.get_region()
@property
def name(self):
return refactoring_name(self.__class__)
def _calculate_changes(self, option_values, task_handle):
pass
def _create_refactoring(self):
pass
def _done(self):
pass
def _perform(self, changes):
if changes is None:
self.env.message('No changes!')
return
def perform(handle, self=self, changes=changes):
self.project.do(changes, task_handle=handle)
self.interface._reload_buffers(changes)
self._done()
runtask(self.env, perform, 'Making %s changes' % self.name,
interrupts=False)
self.env.message(str(changes.description) + ' finished')
def _get_confs(self):
return self.confs
def _get_optionals(self):
return self.optionals
@property
def resources_option(self):
return dialog.Data('Files to apply this refactoring on: ',
decode=self._decode_resources)
def _decode_resources(self, value):
return _resources(self.project, value)
class Rename(Refactoring):
key = 'r'
saveall = True
def _create_refactoring(self):
self.renamer = rope.refactor.rename.Rename(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.renamer.get_changes(task_handle=task_handle, **values)
def _get_optionals(self):
opts = {}
opts['docs'] = dialog.Boolean('Search comments and docs: ', True)
if self.renamer.is_method():
opts['in_hierarchy'] = dialog.Boolean('Rename methods in '
'class hierarchy: ')
opts['resources'] = self.resources_option
opts['unsure'] = dialog.Data('Unsure occurrences: ',
decode=self._decode_unsure,
values=['ignore', 'match'],
default='ignore')
return opts
def _get_confs(self):
oldname = str(self.renamer.get_old_name())
return {'new_name': dialog.Data('New name: ', default=oldname)}
def _decode_unsure(self, value):
unsure = value == 'match'
return lambda occurrence: unsure
class RenameCurrentModule(Rename):
key = '1 r'
offset = None
class Restructure(Refactoring):
key = 'x'
confs = {'pattern': dialog.Data('Restructuring pattern: '),
'goal': dialog.Data('Restructuring goal: ')}
def _calculate_changes(self, values, task_handle):
restructuring = rope.refactor.restructure.Restructure(
self.project, values['pattern'], values['goal'],
args=values['args'], imports=values['imports'])
return restructuring.get_changes(resources=values['resources'],
task_handle=task_handle)
def _get_optionals(self):
return {
'args': dialog.Data('Arguments: ', decode=self._decode_args),
'imports': dialog.Data('Imports: ', decode=self._decode_imports),
'resources': self.resources_option}
def _decode_args(self, value):
if value:
args = {}
for raw_check in value.split('\n'):
if raw_check:
key, value = raw_check.split(':', 1)
args[key.strip()] = value.strip()
return args
def _decode_imports(self, value):
if value:
return [line.strip() for line in value.split('\n')]
class UseFunction(Refactoring):
key = 'u'
def _create_refactoring(self):
self.user = rope.refactor.usefunction.UseFunction(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.user.get_changes(task_handle=task_handle, **values)
def _get_optionals(self):
return {'resources': self.resources_option}
class Move(Refactoring):
key = 'v'
def _create_refactoring(self):
self.mover = rope.refactor.move.create_move(self.project,
self.resource,
self.offset)
def _calculate_changes(self, values, task_handle):
destination = values['destination']
resources = values.get('resources', None)
if isinstance(self.mover, rope.refactor.move.MoveGlobal):
return self._move_global(destination, resources, task_handle)
if isinstance(self.mover, rope.refactor.move.MoveModule):
return self._move_module(destination, resources, task_handle)
if isinstance(self.mover, rope.refactor.move.MoveMethod):
return self._move_method(destination, resources, task_handle)
def _move_global(self, dest, resources, handle):
destination = self.project.pycore.find_module(dest)
return self.mover.get_changes(
destination, resources=resources, task_handle=handle)
def _move_method(self, dest, resources, handle):
return self.mover.get_changes(
dest, self.mover.get_method_name(),
resources=resources, task_handle=handle)
def _move_module(self, dest, resources, handle):
destination = self.project.pycore.find_module(dest)
return self.mover.get_changes(
destination, resources=resources, task_handle=handle)
def _get_confs(self):
if isinstance(self.mover, rope.refactor.move.MoveGlobal):
prompt = 'Destination module: '
if isinstance(self.mover, rope.refactor.move.MoveModule):
prompt = 'Destination package: '
if isinstance(self.mover, rope.refactor.move.MoveMethod):
prompt = 'Destination attribute: '
return {'destination': dialog.Data(prompt)}
def _get_optionals(self):
return {'resources': self.resources_option}
class MoveCurrentModule(Move):
key = '1 v'
offset = None
class ModuleToPackage(Refactoring):
key = '1 p'
saveall = False
def _create_refactoring(self):
self.packager = rope.refactor.ModuleToPackage(
self.project, self.resource)
def _calculate_changes(self, values, task_handle):
return self.packager.get_changes()
class Inline(Refactoring):
key = 'i'
def _create_refactoring(self):
self.inliner = rope.refactor.inline.create_inline(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.inliner.get_changes(task_handle=task_handle, **values)
def _get_optionals(self):
opts = {'resources': self.resources_option}
if self.inliner.get_kind() == 'parameter':
opts['in_hierarchy'] = dialog.Boolean(
'Apply on all matching methods in class hierarchy: ', False)
else:
opts['remove'] = dialog.Boolean('Remove the definition: ', True)
opts['only_current'] = dialog.Boolean('Inline this '
'occurrence only: ')
return opts
class _Extract(Refactoring):
saveall = False
optionals = {'similar': dialog.Boolean('Extract similar pieces: ', True),
'global_': dialog.Boolean('Make global: ')}
kind = None
constructor = None
def _create_refactoring(self):
start, end = self.region
self.extractor = self.constructor(self.project,
self.resource, start, end)
def _calculate_changes(self, values, task_handle):
similar = values.get('similar')
global_ = values.get('global_')
return self.extractor.get_changes(values['name'], similar=similar,
global_=global_)
def _get_confs(self):
return {'name': dialog.Data('Extracted %s name: ' % self.kind)}
class ExtractVariable(_Extract):
key = 'l'
kind = 'variable'
constructor = rope.refactor.extract.ExtractVariable
class ExtractMethod(_Extract):
key = 'm'
kind = 'method'
constructor = rope.refactor.extract.ExtractMethod
class OrganizeImports(Refactoring):
key = 'o'
saveall = False
def _create_refactoring(self):
self.organizer = rope.refactor.ImportOrganizer(self.project)
def _calculate_changes(self, values, task_handle):
return self.organizer.organize_imports(self.resource)
class MethodObject(Refactoring):
saveall = False
confs = {'classname': dialog.Data('New class name: ',
default='_ExtractedClass')}
def _create_refactoring(self):
self.objecter = rope.refactor.method_object.MethodObject(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
classname = values.get('classname')
return self.objecter.get_changes(classname)
class IntroduceFactory(Refactoring):
saveall = True
key = 'f'
def _create_refactoring(self):
self.factory = rope.refactor.introduce_factory.IntroduceFactory(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.factory.get_changes(task_handle=task_handle, **values)
def _get_confs(self):
default = 'create_%s' % self.factory.old_name.lower()
return {'factory_name': dialog.Data('Factory name: ', default)}
def _get_optionals(self):
return {'global_factory': dialog.Boolean('Make global: ', True),
'resources': self.resources_option}
class ChangeSignature(Refactoring):
saveall = True
key = 's'
def _create_refactoring(self):
self.changer = rope.refactor.change_signature.ChangeSignature(
self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
signature = values.get('signature')
args = re.sub(r'[\s\(\)]+', '', signature).split(',')
olds = [arg[0] for arg in self._get_args()]
changers = []
for arg in list(olds):
if arg in args:
continue
changers.append(rope.refactor.change_signature.
ArgumentRemover(olds.index(arg)))
olds.remove(arg)
order = []
for index, arg in enumerate(args):
if arg not in olds:
changers.append(rope.refactor.change_signature.
ArgumentAdder(index, arg))
olds.insert(index, arg)
order.append(olds.index(arg))
changers.append(rope.refactor.change_signature.
ArgumentReorderer(order, autodef='None'))
del values['signature']
return self.changer.get_changes(changers, task_handle=task_handle,
**values)
def _get_args(self):
if hasattr(self.changer, 'get_args'):
return self.changer.get_args()
return self.changer.get_definition_info().args_with_defaults
def _get_confs(self):
args = []
for arg, default in self._get_args():
args.append(arg)
signature = '(' + ', '.join(args) + ')'
return {'signature': dialog.Data('Change the signature: ',
default=signature)}
def _get_optionals(self):
opts = {'resources': self.resources_option}
if self.changer.is_method():
opts['in_hierarchy'] = dialog.Boolean('Rename methods in '
'class hierarchy: ')
return opts
class _GenerateElement(Refactoring):
def _create_refactoring(self):
kind = self.name.split('_')[-1]
self.generator = rope.contrib.generate.create_generate(
kind, self.project, self.resource, self.offset)
def _calculate_changes(self, values, task_handle):
return self.generator.get_changes()
def _done(self):
resource, lineno = self.generator.get_location()
self.interface._goto_location(resource, lineno)
class GenerateVariable(_GenerateElement):
key = 'n v'
class GenerateFunction(_GenerateElement):
key = 'n f'
class GenerateClass(_GenerateElement):
key = 'n c'
class GenerateModule(_GenerateElement):
key = 'n m'
class GeneratePackage(_GenerateElement):
key = 'n p'
def refactoring_name(refactoring):
classname = refactoring.__name__
result = []
for c in classname:
if result and c.isupper():
result.append('_')
result.append(c.lower())
name = ''.join(result)
return name
def _resources(project, text):
if text is None or text.strip() == '':
return None
return filter.resources(project, text)
def runtask(env, command, name, interrupts=True):
return RunTask(env, command, name, interrupts)()
class RunTask(object):
def __init__(self, env, task, name, interrupts=True):
self.env = env
self.task = task
self.name = name
self.interrupts = interrupts
def __call__(self):
handle = taskhandle.TaskHandle(name=self.name)
progress = self.env.create_progress(self.name)
def update_progress():
jobset = handle.current_jobset()
if jobset:
percent = jobset.get_percent_done()
if percent is not None:
progress.update(percent)
handle.add_observer(update_progress)
result = self.task(handle)
progress.done()
return result
| gpl-3.0 | -8,179,167,911,721,635,000 | 1,477,647,068,590,182,700 | 31.03055 | 77 | 0.596045 | false |
BradAJ/zipflights | web_app/app/models.py | 1 | 5577 | from app import *
def ita_search(faa_orig, faa_dest, start_date, end_date, duration = None, out_constraints = None, return_constraints = None, month_search = True):
"""
faa_orig, faa_dest: FAA airport code strs e.g. 'SFO'
start_date, end_date: datetime objs e.g. datetime.date.today(), datetime.date(2015, 2, 28)
NOTE: start_date is used as departure date if NOT month_search, similarly for end_date
duration: int number of nights at destination e.g. 7. If None => One-way flight, SET duration = True for specificDate roundtrips!
out/return_constraints: ITA flags e.g. 'N' for nonstops, 'ORD' to transfer there, or 'UA+' for 1 or more United flights.
"""
search_url = 'http://matrix.itasoftware.com/xhr/shop/search'
payload_d = {"pax":{"adults":1},"cabin":"COACH","changeOfAirport":False,"checkAvailability":True,"firstDayOfWeek":"SUNDAY"}
trip_slice = {"originPreferCity":False,"destinationPreferCity":False, "isArrivalDate":False}
def apt_code_parser(codes_in):
return [codes_in] if type(codes_in) is not list else codes_in
outbound_d = trip_slice.copy()
outbound_d['origins'] = apt_code_parser(faa_orig)
outbound_d['destinations'] = apt_code_parser(faa_dest)
if out_constraints is not None:
outbound_d['routeLanguage'] = out_constraints
if month_search:
search_type = 'calendar&summarizers=itineraryCarrierList%2Ccalendar'
payload_d['startDate'] = start_date
payload_d['endDate'] = end_date
payload_d['layover'] = {"max":duration, "min":duration}
else:
search_type = 'specificDates&summarizers=solutionList%2CitineraryCarrierList%2CitineraryOrigins%2CitineraryDestinations'
outbound_d['date'] = start_date
outbound_d['dateModifier'] = {"minus":0, "plus":0}
if duration is not None:
return_d = trip_slice.copy()
return_d['origins'] = apt_code_parser(faa_dest)
return_d['destinations'] = apt_code_parser(faa_orig)
if return_constraints is not None:
return_d['routeLanguage'] = return_constraints
if not month_search:
return_d['date'] = end_date
return_d['dateModifier'] = {"minus":0, "plus":0}
payload_d['slices'] = [outbound_d, return_d]
else:
payload_d['slices'] = [outbound_d]
payload = urllib.quote_plus(json.dumps(payload_d))
url_start_search = 'http://matrix.itasoftware.com/xhr/shop/search?name='
return requests.post(url_start_search + search_type + '&format=JSON&inputs=' + payload)
def ita_response_airline_parse(response):
airline_fares = ita_response_d(response)['result']['itineraryCarrierList']['groups']
airlines = []
for fare in airline_fares:
if 'minPriceInSummary' in fare:
route_price = fare['minPrice']
airlines.append(fare['label']['shortName'])
return route_price, airlines
def ita_response_hidden_parse(response, faa_orig, faa_dest):
resp_d = ita_response_d(response)
flights_d = dict()
minprice = float(resp_d['result']['solutionList']['minPrice'].strip('USD'))
flights_d['minprice'] = minprice
for itin in resp_d['result']['solutionList']['solutions']:
flightprice = float(itin['displayTotal'].strip('USD'))
if flightprice <= (minprice + 1.0): #fixes sensitivity to cents.
for slic in itin['itinerary']['slices']:
flight = slic['flights'][0] #only interested in first flight here!
if flight not in flights_d:
result_d = dict()
result_d['carrier'] = itin['itinerary']['ext']['dominantCarrier']['shortName']
result_d['departing'] = slic['departure']
result_d['fake_dest'] = slic['destination']['code']
result_d['true_orig'] = slic['origin']['code']
if 'stops' in slic:
result_d['stops'] = slic['stops'][0]['code'] #Looking for non-stops only!
flights_d[flight] = result_d
flights_d['out_flights'] = set()
flights_d['back_flights'] = set()
flights_d['carriers'] = set()
for key in flights_d:
if type(flights_d[key]) is dict and 'true_orig' in flights_d[key]:
if faa_orig == flights_d[key]['true_orig']:
flights_d['out_flights'].add(key)
flights_d['carriers'].add(flights_d[key]['carrier'])
elif faa_dest == flights_d[key]['true_orig']:
flights_d['back_flights'].add(key)
flights_d['carriers'].add(flights_d[key]['carrier'])
flights_d['out_flights'] = sorted(list(flights_d['out_flights'])) if len(flights_d['out_flights']) != 0 else None
flights_d['back_flights'] = sorted(list(flights_d['back_flights'])) if len(flights_d['back_flights']) != 0 else None
return flights_d
def ita_response_d(response):
return json.loads(response.content[4:])
def date_obj_to_s(date_obj):
y = str(date_obj.year)
m = '0' + str(date_obj.month) if date_obj.month < 10 else str(date_obj.month)
d = '0' + str(date_obj.day) if date_obj.day < 10 else str(date_obj.day)
return y + '-' + m + '-' + d
def stars_from_price(delta_p, distrib_mean, distrib_std):
z_score = (delta_p - distrib_mean) / (1.0 * distrib_std)
if abs(z_score) <= (1./3.):
return 3
elif abs(z_score) < (4./3.):
return 3 + (1 if z_score >= 0 else -1)
else:
return 3 + (2 if z_score >= 0 else -2)
| mit | 4,682,677,209,039,323,000 | 125,011,155,844,954,910 | 43.261905 | 146 | 0.607674 | false |
onceuponatimeforever/oh-mainline | vendor/packages/Django/django/utils/safestring.py | 208 | 4185 | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from django.utils.functional import curry, Promise
from django.utils import six
class EscapeData(object):
pass
class EscapeBytes(bytes, EscapeData):
"""
A byte string that should be HTML-escaped when output.
"""
pass
class EscapeText(six.text_type, EscapeData):
"""
A unicode string object that should be HTML-escaped when output.
"""
pass
if six.PY3:
EscapeString = EscapeText
else:
EscapeString = EscapeBytes
# backwards compatibility for Python 2
EscapeUnicode = EscapeText
class SafeData(object):
pass
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeBytes, self).__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
decode = curry(_proxy_method, method=bytes.decode)
class SafeText(six.text_type, SafeData):
"""
A unicode (Python 2) / str (Python 3) subclass that has been specifically
marked as "safe" for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe unicode string with another safe byte string or
safe unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeText, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
encode = curry(_proxy_method, method=six.text_type.encode)
if six.PY3:
SafeString = SafeText
else:
SafeString = SafeBytes
# backwards compatibility for Python 2
SafeUnicode = SafeText
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if isinstance(s, SafeData):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return SafeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return SafeText(s)
return SafeString(str(s))
def mark_for_escaping(s):
"""
Explicitly mark a string as requiring HTML escaping upon output. Has no
effect on SafeData subclasses.
Can be called multiple times on a single string (the resulting escaping is
only applied once).
"""
if isinstance(s, (SafeData, EscapeData)):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return EscapeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return EscapeText(s)
return EscapeBytes(bytes(s))
| agpl-3.0 | -4,035,479,337,326,654,500 | -1,812,964,755,604,182,500 | 30.231343 | 79 | 0.643967 | false |
vadimtk/chrome4sdp | native_client_sdk/src/build_tools/nacl-mono-builder.py | 51 | 5243 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
import tarfile
import buildbot_common
from build_paths import SCRIPT_DIR
SDK_BUILD_DIR = SCRIPT_DIR
MONO_BUILD_DIR = os.path.join(SDK_BUILD_DIR, 'mono_build')
MONO_DIR = os.path.join(MONO_BUILD_DIR, 'nacl-mono')
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--arch',
help='Target architecture',
dest='arch',
default='x86-32')
parser.add_argument('--sdk-revision',
help='SDK Revision'
' (default=buildbot revision)',
dest='sdk_revision',
default=None)
parser.add_argument('--sdk-url',
help='SDK Download URL',
dest='sdk_url',
default=None)
parser.add_argument('--install-dir',
help='Install Directory',
dest='install_dir',
default='naclmono')
options = parser.parse_args(args)
assert sys.platform.find('linux') != -1
buildbot_revision = os.environ.get('BUILDBOT_REVISION', '')
build_prefix = options.arch + ' '
buildbot_common.BuildStep(build_prefix + 'Clean Old SDK')
buildbot_common.MakeDir(MONO_BUILD_DIR)
buildbot_common.RemoveDir(os.path.join(MONO_BUILD_DIR, 'pepper_*'))
buildbot_common.BuildStep(build_prefix + 'Setup New SDK')
sdk_dir = None
sdk_revision = options.sdk_revision
sdk_url = options.sdk_url
if not sdk_url:
if not sdk_revision:
assert buildbot_revision
sdk_revision = buildbot_revision.split(':')[0]
sdk_url = 'gs://nativeclient-mirror/nacl/nacl_sdk/'\
'trunk.%s/naclsdk_linux.tar.bz2' % sdk_revision
sdk_url = sdk_url.replace('https://storage.googleapis.com/', 'gs://')
sdk_file = sdk_url.split('/')[-1]
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp', sdk_url, sdk_file],
cwd=MONO_BUILD_DIR)
tar_file = None
try:
tar_file = tarfile.open(os.path.join(MONO_BUILD_DIR, sdk_file))
pepper_dir = os.path.commonprefix(tar_file.getnames())
tar_file.extractall(path=MONO_BUILD_DIR)
sdk_dir = os.path.join(MONO_BUILD_DIR, pepper_dir)
finally:
if tar_file:
tar_file.close()
assert sdk_dir
buildbot_common.BuildStep(build_prefix + 'Checkout Mono')
# TODO(elijahtaylor): Get git URL from master/trigger to make this
# more flexible for building from upstream and release branches.
if options.arch == 'arm':
git_url = 'git://github.com/igotti-google/mono.git'
git_rev = 'arm_nacl'
else:
git_url = 'git://github.com/elijahtaylor/mono.git'
git_rev = 'HEAD'
if buildbot_revision:
# Unfortunately, we use different git branches/revisions
# for ARM and x86 now, so ignore buildbot_revision variable for ARM.
# Need to rethink this approach, if we'll plan to support
# more flexible repo selection mechanism.
if options.arch != 'arm':
git_rev = buildbot_revision.split(':')[1]
# ARM and x86 is built out of different git trees, so distinguish
# them by appending the arch. It also makes 32 and 64 bit x86 separated,
# which is good.
# TODO(olonho): maybe we need to avoid modifications of global.
global MONO_DIR
tag = options.arch
MONO_DIR = "%s-%s" % (MONO_DIR, tag)
if not os.path.exists(MONO_DIR):
buildbot_common.MakeDir(MONO_DIR)
buildbot_common.Run(['git', 'clone', git_url, MONO_DIR])
else:
buildbot_common.Run(['git', 'fetch'], cwd=MONO_DIR)
if git_rev:
buildbot_common.Run(['git', 'checkout', git_rev], cwd=MONO_DIR)
arch_to_bitsize = {'x86-32': '32',
'x86-64': '64',
'arm': 'arm'}
arch_to_output_folder = {'x86-32': 'runtime-x86-32-build',
'x86-64': 'runtime-x86-64-build',
'arm': 'runtime-arm-build'}
buildbot_common.BuildStep(build_prefix + 'Configure Mono')
os.environ['NACL_SDK_ROOT'] = sdk_dir
os.environ['TARGET_ARCH'] = options.arch
os.environ['TARGET_BITSIZE'] = arch_to_bitsize[options.arch]
buildbot_common.Run(['./autogen.sh'], cwd=MONO_DIR)
buildbot_common.Run(['make', 'distclean'], cwd=MONO_DIR)
buildbot_common.BuildStep(build_prefix + 'Build and Install Mono')
nacl_interp_script = os.path.join(SDK_BUILD_DIR, 'nacl_interp_loader_mono.sh')
os.environ['NACL_INTERP_LOADER'] = nacl_interp_script
buildbot_common.Run(['./nacl-mono-runtime.sh',
MONO_DIR, # Mono directory with 'configure'
arch_to_output_folder[options.arch], # Build dir
options.install_dir],
cwd=SDK_BUILD_DIR)
# TODO(elijahtaylor,olonho): Re-enable tests on arm when they compile/run.
if options.arch != 'arm':
buildbot_common.BuildStep(build_prefix + 'Test Mono')
buildbot_common.Run(['make', 'check', '-j8'],
cwd=os.path.join(SDK_BUILD_DIR, arch_to_output_folder[options.arch]))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -7,775,468,009,102,742,000 | -3,484,490,658,360,679,000 | 35.664336 | 80 | 0.625215 | false |
Pablo126/SSBW | Entrega1/lib/python3.5/site-packages/pymongo/collection.py | 15 | 113009 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import collections
import datetime
import warnings
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import (_unicode,
integer_types,
string_type)
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
from bson.son import SON
from pymongo import (common,
helpers,
message)
from pymongo.bulk import BulkOperationBuilder, _Bulk
from pymongo.command_cursor import CommandCursor
from pymongo.collation import validate_collation_or_none
from pymongo.cursor import Cursor
from pymongo.errors import ConfigurationError, InvalidName, OperationFailure
from pymongo.helpers import _check_write_command_response
from pymongo.helpers import _UNICODE_REPLACE_CODEC_OPTIONS
from pymongo.operations import _WriteOp, IndexModel
from pymongo.read_concern import DEFAULT_READ_CONCERN
from pymongo.read_preferences import ReadPreference
from pymongo.results import (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
from pymongo.write_concern import WriteConcern
try:
from collections import OrderedDict
_ORDERED_TYPES = (SON, OrderedDict)
except ImportError:
_ORDERED_TYPES = (SON,)
_NO_OBJ_ERROR = "No matching object found"
_UJOIN = u"%s.%s"
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
**kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True``, `collation` is specified, or any additional
keyword arguments are present, a ``create`` command will be
sent. Otherwise, a ``create`` command will not be sent and the
collection will be created implicitly on first use.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) database.read_concern is used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. If a collation is provided,
it will be passed to the create collection command. This option is
only supported on MongoDB 3.4 and above.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
Removed the uuid_subtype attribute.
:class:`~pymongo.collection.Collection` no longer returns an
instance of :class:`~pymongo.collection.Collection` for attribute
names with leading underscores. You must use dict-style lookups
instead::
collection['__my_collection__']
Not:
collection.__my_collection__
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. mongodoc:: collections
"""
super(Collection, self).__init__(
codec_options or database.codec_options,
read_preference or database.read_preference,
write_concern or database.write_concern,
read_concern or database.read_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__database = database
self.__name = _unicode(name)
self.__full_name = _UJOIN % (self.__database.name, self.__name)
if create or kwargs or collation:
self.__create(kwargs, collation)
self.__write_response_codec_options = self.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict)
def _socket_for_reads(self):
return self.__database.client._socket_for_reads(self.read_preference)
def _socket_for_primary_reads(self):
return self.__database.client._socket_for_reads(ReadPreference.PRIMARY)
def _socket_for_writes(self):
return self.__database.client._socket_for_writes()
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=DEFAULT_READ_CONCERN,
write_concern=None,
parse_write_concern_error=False,
collation=None):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `parse_write_concern_error` (optional): Whether to parse a
``writeConcernError`` field in the command response.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
:Returns:
# todo: don't return address
(result document, address of server the command was run on)
"""
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self.read_preference,
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=parse_write_concern_error,
collation=collation)
def __create(self, options, collation):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self.write_concern,
parse_write_concern_error=True,
collation=collation)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
full_name = _UJOIN % (self.__name, name)
raise AttributeError(
"Collection has no attribute %r. To access the %s"
" collection, use database['%s']." % (
name, full_name, full_name))
return self.__getitem__(name)
def __getitem__(self, name):
return Collection(self.__database, _UJOIN % (self.__name, name))
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
return (self.__database == other.database and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`."""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
"""
return self.__database
def with_options(
self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference
Primary()
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Collection`
is used.
"""
return Collection(self.__database,
self.__name,
False,
codec_options or self.codec_options,
read_preference or self.read_preference,
write_concern or self.write_concern,
read_concern or self.read_concern)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
return BulkOperationBuilder(self, False, bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
return BulkOperationBuilder(self, True, bypass_document_validation)
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(requests, list):
raise TypeError("requests must be a list")
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
if not isinstance(request, _WriteOp):
raise TypeError("%r is not a valid request" % (request,))
request._add_to_bulk(blk)
bulk_api_result = blk.execute(self.write_concern.document)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False)
def _legacy_write(self, sock_info, name, cmd, acknowledged, op_id,
bypass_doc_val, func, *args):
"""Internal legacy write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if (bypass_doc_val and not acknowledged and
sock_info.max_wire_version >= 4):
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(
rqst_id, msg, max_size, acknowledged)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address, op_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id)
return result
def _insert_one(
self, sock_info, doc, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val):
"""Internal helper for inserting a single document."""
if manipulate:
doc = self.__database._apply_incoming_manipulators(doc, self)
if not isinstance(doc, RawBSONDocument) and '_id' not in doc:
doc['_id'] = ObjectId()
doc = self.__database._apply_incoming_copying_manipulators(doc,
self)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# Insert command.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options,
check_keys=check_keys)
_check_write_command_response([(0, result)])
else:
# Legacy OP_INSERT.
self._legacy_write(
sock_info, 'insert', command, acknowledged, op_id,
bypass_doc_val, message.insert, self.__full_name, [doc],
check_keys, acknowledged, concern, False,
self.__write_response_codec_options)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
def _insert(self, sock_info, docs, ordered=True, check_keys=True,
manipulate=False, write_concern=None, op_id=None,
bypass_doc_val=False):
"""Internal insert helper."""
if isinstance(docs, collections.Mapping):
return self._insert_one(
sock_info, docs, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val)
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId()
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id'))
yield doc
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
command = SON([('insert', self.name),
('ordered', ordered)])
if concern:
command['writeConcern'] = concern
if op_id is None:
op_id = message._randint()
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
bwc = message._BulkWriteContext(
self.database.name, command, sock_info, op_id,
self.database.client._event_listeners)
if sock_info.max_wire_version > 1 and acknowledged:
# Batched insert command.
results = message._do_batched_write_command(
self.database.name + ".$cmd", message._INSERT, command,
gen(), check_keys, self.__write_response_codec_options, bwc)
_check_write_command_response(results)
else:
# Legacy batched OP_INSERT.
message._do_batched_insert(self.__full_name, gen(), check_keys,
acknowledged, concern, not ordered,
self.__write_response_codec_options, bwc)
return ids
def insert_one(self, document, bypass_document_validation=False):
"""Insert a single document.
>>> db.test.count({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mutable mapping
type. If the document does not have an _id field one will be
added automatically.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_document_type("document", document)
if not (isinstance(document, RawBSONDocument) or "_id" in document):
document["_id"] = ObjectId()
with self._socket_for_writes() as sock_info:
return InsertOneResult(
self._insert(sock_info, document,
bypass_doc_val=bypass_document_validation),
self.write_concern.acknowledged)
def insert_many(self, documents, ordered=True,
bypass_document_validation=False):
"""Insert an iterable of documents.
>>> db.test.count()
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count()
2
:Parameters:
- `documents`: A iterable of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(documents, collections.Iterable) or not documents:
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_document_type("document", document)
if not isinstance(document, RawBSONDocument):
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (message._INSERT, document)
blk = _Bulk(self, ordered, bypass_document_validation)
blk.ops = [doc for doc in gen()]
blk.execute(self.write_concern.document)
return InsertManyResult(inserted_ids, self.write_concern.acknowledged)
def _update(self, sock_info, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None):
"""Internal update / replace helper."""
common.validate_boolean("upsert", upsert)
if manipulate:
document = self.__database._fix_incoming(document, self)
collation = validate_collation_or_none(collation)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
update_doc = SON([('q', criteria),
('u', document),
('multi', multi),
('upsert', upsert)])
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
update_doc['collation'] = collation
command = SON([('update', self.name),
('ordered', ordered),
('updates', [update_doc])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
# Update command.
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# The command result has to be published for APM unmodified
# so we make a shallow copy here before adding updatedExisting.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options).copy()
_check_write_command_response([(0, result)])
# Add the updatedExisting field for compatibility.
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if 'upserted' in result:
result['upserted'] = result['upserted'][0]['_id']
return result
else:
# Legacy OP_UPDATE.
return self._legacy_write(
sock_info, 'update', command, acknowledged, op_id,
bypass_doc_val, message.update, self.__full_name, upsert,
multi, criteria, document, acknowledged, concern, check_keys,
self.__write_response_codec_options)
def replace_one(self, filter, replacement, upsert=False,
bypass_document_validation=False, collation=None):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_replace(replacement)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, replacement, upsert,
bypass_doc_val=bypass_document_validation,
collation=collation)
return UpdateResult(result, self.write_concern.acknowledged)
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False,
collation=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, update, upsert,
check_keys=False,
bypass_doc_val=bypass_document_validation,
collation=collation)
return UpdateResult(result, self.write_concern.acknowledged)
def update_many(self, filter, update, upsert=False,
bypass_document_validation=False, collation=None):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 4, u'_id': 1}
{u'x': 4, u'_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation` (optional): If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, update, upsert,
check_keys=False, multi=True,
bypass_doc_val=bypass_document_validation,
collation=collation)
return UpdateResult(result, self.write_concern.acknowledged)
def drop(self):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
"""
self.__database.drop_collection(self.__name)
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
delete_doc = SON([('q', criteria),
('limit', int(not multi))])
collation = validate_collation_or_none(collation)
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
delete_doc['collation'] = collation
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [delete_doc])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
# Delete command.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options)
_check_write_command_response([(0, result)])
return result
else:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, acknowledged, op_id,
False, message.delete, self.__full_name, criteria,
acknowledged, concern, self.__write_response_codec_options,
int(not multi))
def delete_one(self, filter, collation=None):
"""Delete a single document matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
with self._socket_for_writes() as sock_info:
return DeleteResult(self._delete(sock_info, filter, False,
collation=collation),
self.write_concern.acknowledged)
def delete_many(self, filter, collation=None):
"""Delete one or more documents matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
with self._socket_for_writes() as sock_info:
return DeleteResult(self._delete(sock_info, filter, True,
collation=collation),
self.write_concern.acknowledged)
def find_one(self, filter=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
The :meth:`find_one` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
- `max_time_ms` (optional): a value for max_time_ms may be
specified as part of `**kwargs`, e.g.
>>> find_one(max_time_ms=100)
"""
if (filter is not None and not
isinstance(filter, collections.Mapping)):
filter = {"_id": filter}
max_time_ms = kwargs.pop("max_time_ms", None)
cursor = self.find(filter,
*args, **kwargs).max_time_ms(max_time_ms)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `filter` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `projection` argument is used to specify a subset
of fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
The :meth:`find` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
- `cursor_type` (optional): the type of cursor to return. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error.
- `oplog_replay` (optional): If True, set the oplogReplay query
flag.
- `modifiers` (optional): A dict specifying the MongoDB `query
modifiers`_ that should be used for this query. For example::
>>> db.test.find(modifiers={"$maxTimeMS": 500})
- `batch_size` (optional): Limits the number of documents returned in
a single batch.
- `manipulate` (optional): **DEPRECATED** - If True (the default),
apply any outgoing SON manipulators before returning.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
.. note:: There are a number of caveats to using
:attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type:
- The `limit` option can not be used with an exhaust cursor.
- Exhaust cursors are not supported by mongos and can not be
used with a sharded cluster.
- A :class:`~pymongo.cursor.Cursor` instance created with the
:attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an
exclusive :class:`~socket.socket` connection to MongoDB. If the
:class:`~pymongo.cursor.Cursor` is discarded without being
completely iterated the underlying :class:`~socket.socket`
connection will be closed and discarded without being returned to
the connection pool.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.0
Changed the parameter names `spec`, `fields`, `timeout`, and
`partial` to `filter`, `projection`, `no_cursor_timeout`, and
`allow_partial_results` respectively.
Added the `cursor_type`, `oplog_replay`, and `modifiers` options.
Removed the `network_timeout`, `read_preference`, `tag_sets`,
`secondary_acceptable_latency_ms`, `max_scan`, `snapshot`,
`tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay
parameters. Removed `compile_re` option: PyMongo now always
represents BSON regular expressions as :class:`~bson.regex.Regex`
objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to
convert from a BSON regular expression to a Python regular
expression object. Soft deprecated the `manipulate` option.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionadded:: 2.3
The `tag_sets` and `secondary_acceptable_latency_ms` parameters.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. _query modifiers:
http://docs.mongodb.org/manual/reference/operator/query-modifier/
.. mongodoc:: find
"""
return Cursor(self, *args, **kwargs)
def parallel_scan(self, num_cursors, **kwargs):
"""Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors result
sets.
For example, to process each document in a collection using some
thread-safe ``process_document()`` function:
>>> def process_cursor(cursor):
... for document in cursor:
... # Some thread-safe processing function:
... process_document(document)
>>>
>>> # Get up to 4 cursors.
...
>>> cursors = collection.parallel_scan(4)
>>> threads = [
... threading.Thread(target=process_cursor, args=(cursor,))
... for cursor in cursors]
>>>
>>> for thread in threads:
... thread.start()
>>>
>>> for thread in threads:
... thread.join()
>>>
>>> # All documents have now been processed.
The :meth:`parallel_scan` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `num_cursors`: the number of cursors to return
- `**kwargs`: additional options for the parallelCollectionScan
command can be passed as keyword arguments.
.. note:: Requires server version **>= 2.5.5**.
.. versionchanged:: 3.4
Added back support for arbitrary keyword arguments. MongoDB 3.4
adds support for maxTimeMS as an option to the
parallelCollectionScan command.
.. versionchanged:: 3.0
Removed support for arbitrary keyword arguments, since
the parallelCollectionScan command has no optional arguments.
"""
cmd = SON([('parallelCollectionScan', self.__name),
('numCursors', num_cursors)])
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
result = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern)
return [CommandCursor(self, cursor['cursor'], sock_info.address)
for cursor in result['cursors']]
def _count(self, cmd, collation=None):
"""Internal count helper."""
with self._socket_for_reads() as (sock_info, slave_ok):
res = self._command(
sock_info, cmd, slave_ok,
allowable_errors=["ns missing"],
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation)
if res.get("errmsg", "") == "ns missing":
return 0
return int(res["n"])
def count(self, filter=None, **kwargs):
"""Get the number of documents in this collection.
All optional count parameters should be passed as keyword arguments
to this method. Valid options include:
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
- `limit` (int): The maximum number of documents to count.
- `skip` (int): The number of matching documents to skip before
returning results.
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`count` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): A query document that selects which documents
to count in the collection.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.4
Support the `collation` option.
"""
cmd = SON([("count", self.__name)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
return self._count(cmd, collation)
def create_indexes(self, indexes):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
.. note:: `create_indexes` uses the ``createIndexes`` command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
"""
if not isinstance(indexes, list):
raise TypeError("indexes must be a list")
names = []
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError("%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self.write_concern,
parse_write_concern_error=True)
return names
def __create_index(self, keys, index_options):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
collation = validate_collation_or_none(
index_options.pop('collation', None))
index.update(index_options)
with self._socket_for_writes() as sock_info:
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
try:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self.write_concern,
parse_write_concern_error=True)
except OperationFailure as exc:
if exc.code in common.COMMAND_NOT_FOUND_CODES:
index["ns"] = self.__full_name
wcn = (self.write_concern if
self.write_concern.acknowledged else WriteConcern())
self.__database.system.indexes._insert(
sock_info, index, True, False, False, wcn)
else:
raise
def create_index(self, keys, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
"""
keys = helpers._index_list(keys)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
self.__create_index(keys, kwargs)
return name
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
"""**DEPRECATED** - Ensures that an index exists on this collection.
.. versionchanged:: 3.0
**DEPRECATED**
"""
warnings.warn("ensure_index is deprecated. Use create_index instead.",
DeprecationWarning, stacklevel=2)
# The types supported by datetime.timedelta.
if not (isinstance(cache_for, integer_types) or
isinstance(cache_for, float)):
raise TypeError("cache_for must be an integer or float.")
if "drop_dups" in kwargs:
kwargs["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
kwargs["bucketSize"] = kwargs.pop("bucket_size")
keys = helpers._index_list(key_or_list)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
# Note that there is a race condition here. One thread could
# check if the index is cached and be preempted before creating
# and caching the index. This means multiple threads attempting
# to create the same index concurrently could send the index
# to the server two or more times. This has no practical impact
# other than wasted round trips.
if not self.__database.client._cached(self.__database.name,
self.__name, name):
self.__create_index(keys, kwargs)
self.__database.client._cache_index(self.__database.name,
self.__name, name, cache_for)
return name
return None
def drop_indexes(self):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*")
def drop_index(self, index_or_name):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index` or
:meth:`ensure_index`) the index **must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
name = index_or_name
if isinstance(index_or_name, list):
name = helpers._gen_index_name(index_or_name)
if not isinstance(name, string_type):
raise TypeError("index_or_name must be an index name or list")
self.__database.client._purge_index(
self.__database.name, self.__name, name)
cmd = SON([("dropIndexes", self.__name), ("index", name)])
with self._socket_for_writes() as sock_info:
self._command(sock_info,
cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=["ns not found"],
write_concern=self.write_concern,
parse_write_concern_error=True)
def reindex(self):
"""Rebuilds all indexes on this collection.
.. warning:: reindex blocks all other operations (indexes
are built in the foreground) and will be slow for large
collections.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
cmd = SON([("reIndex", self.__name)])
with self._socket_for_writes() as sock_info:
return self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self.write_concern,
parse_write_concern_error=True)
def list_indexes(self):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([(u'v', 1), (u'key', SON([(u'_id', 1)])),
(u'name', u'_id_'), (u'ns', u'test.test')])
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options)
with self._socket_for_primary_reads() as (sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
cursor = self._command(sock_info, cmd, slave_ok,
ReadPreference.PRIMARY,
codec_options)["cursor"]
return CommandCursor(coll, cursor, sock_info.address)
else:
namespace = _UJOIN % (self.__database.name, "system.indexes")
res = helpers._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
ReadPreference.PRIMARY, cmd,
self.database.client._event_listeners)
data = res["data"]
cursor = {
"id": res["cursor_id"],
"firstBatch": data,
"ns": namespace,
}
# Note that a collection can only have 64 indexes, so we don't
# technically have to pass len(data) here. There will never be
# an OP_GET_MORE call.
return CommandCursor(
coll, cursor, sock_info.address, len(data))
def index_information(self):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.ensure_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
"""
cursor = self.list_indexes()
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
"""
with self._socket_for_primary_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version > 2:
criteria = {"name": self.__name}
else:
criteria = {"name": self.__full_name}
cursor = self.__database._list_collections(sock_info,
slave_ok,
criteria)
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def aggregate(self, pipeline, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional aggregate parameters should be passed as keyword arguments
to this method. Valid options include, but are not limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch. Ignored if the connected mongod or mongos does not support
returning aggregate results using a cursor, or `useCursor` is
``False``.
- `useCursor` (bool): Requests that the `server` provide results
using a cursor, if possible. Ignored if the connected mongod or
mongos does not support returning aggregate results using a cursor.
The default is ``True``. Set this to ``False`` when upgrading a 2.4
or older sharded cluster to 2.6 or newer (see the warning below).
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`. Please note that using the ``$out`` pipeline stage
requires a read preference of
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` (the default).
The server will raise an error if the ``$out`` pipeline stage is used
with any other read preference.
.. warning:: When upgrading a 2.4 or older sharded cluster to 2.6 or
newer the `useCursor` option **must** be set to ``False``
until all shards have been upgraded to 2.6 or newer.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
if not isinstance(pipeline, list):
raise TypeError("pipeline must be a list")
if "explain" in kwargs:
raise ConfigurationError("The explain option is not supported. "
"Use Database.command instead.")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("aggregate", self.__name),
("pipeline", pipeline)])
# Remove things that are not command options.
batch_size = common.validate_positive_integer_or_none(
"batchSize", kwargs.pop("batchSize", None))
use_cursor = common.validate_boolean(
"useCursor", kwargs.pop("useCursor", True))
# If the server does not support the "cursor" option we
# ignore useCursor and batchSize.
with self._socket_for_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version > 0:
if use_cursor:
if "cursor" not in kwargs:
kwargs["cursor"] = {}
if batch_size is not None:
kwargs["cursor"]["batchSize"] = batch_size
dollar_out = pipeline and '$out' in pipeline[-1]
if (sock_info.max_wire_version >= 5 and dollar_out and
self.write_concern):
cmd['writeConcern'] = self.write_concern.document
cmd.update(kwargs)
# Apply this Collection's read concern if $out is not in the
# pipeline.
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
if dollar_out:
result = self._command(sock_info, cmd, slave_ok,
parse_write_concern_error=True,
collation=collation)
else:
result = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation)
else:
result = self._command(sock_info, cmd, slave_ok,
parse_write_concern_error=dollar_out,
collation=collation)
if "cursor" in result:
cursor = result["cursor"]
else:
# Pre-MongoDB 2.6. Fake a cursor.
cursor = {
"id": 0,
"firstBatch": result["result"],
"ns": self.full_name,
}
return CommandCursor(
self, cursor, sock_info.address).batch_size(batch_size or 0)
# key and condition ought to be optional, but deprecation
# would be painful as argument order would have to change.
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
Returns an array of grouped items.
The `key` parameter can be:
- ``None`` to use the entire document as a key.
- A :class:`list` of keys (each a :class:`basestring`
(:class:`str` in python 3)) to group by.
- A :class:`basestring` (:class:`str` in python 3), or
:class:`~bson.code.Code` instance containing a JavaScript
function to be applied to each document, returning the key
to group by.
The :meth:`group` method obeys the :attr:`read_preference` of this
:class:`Collection`.
:Parameters:
- `key`: fields to group by (see above description)
- `condition`: specification of rows to be
considered (as a :meth:`find` query specification)
- `initial`: initial value of the aggregation counter object
- `reduce`: aggregation function as a JavaScript string
- `finalize`: function to be called on each object in output list.
- `**kwargs` (optional): additional arguments to the group command
may be passed as keyword arguments to this helper method
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated argument: command
"""
group = {}
if isinstance(key, string_type):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key, "key")}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
cmd = SON([("group", group)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
collation=collation)["retval"]
def rename(self, new_name, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 5 and self.write_concern:
cmd['writeConcern'] = self.write_concern.document
cmd.update(kwargs)
sock_info.command('admin', cmd, parse_write_concern_error=True)
def distinct(self, key, filter=None, **kwargs):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
All optional distinct parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`distinct` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `key`: name of the field for which we want to get the distinct
values
- `filter` (optional): A query document that specifies the documents
from which to retrieve the distinct values.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.4
Support the `collation` option.
"""
if not isinstance(key, string_type):
raise TypeError("key must be an "
"instance of %s" % (string_type.__name__,))
cmd = SON([("distinct", self.__name),
("key", key)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation)["values"]
def map_reduce(self, map, reduce, out, full_response=False, **kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: The :meth:`map_reduce` method does **not** obey the
:attr:`read_preference` of this :class:`Collection`. To run
mapReduce on a secondary use the :meth:`inline_map_reduce` method
instead.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation (if the
output is not inline) when using MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/
.. mongodoc:: mapreduce
"""
if not isinstance(out, (string_type, collections.Mapping)):
raise TypeError("'out' must be an instance of "
"%s or a mapping" % (string_type.__name__,))
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
inline = 'inline' in cmd['out']
with self._socket_for_primary_reads() as (sock_info, slave_ok):
if (sock_info.max_wire_version >= 5 and self.write_concern and
not inline):
cmd['writeConcern'] = self.write_concern.document
cmd.update(kwargs)
if (sock_info.max_wire_version >= 4 and 'readConcern' not in cmd and
inline):
# No need to parse 'writeConcernError' here, since the command
# is an inline map reduce.
response = self._command(
sock_info, cmd, slave_ok, ReadPreference.PRIMARY,
read_concern=self.read_concern,
collation=collation)
else:
response = self._command(
sock_info, cmd, slave_ok, ReadPreference.PRIMARY,
parse_write_concern_error=not inline,
collation=collation)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.client[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, **kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
The :meth:`inline_map_reduce` method obeys the :attr:`read_preference`
of this :class:`Collection`.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
.. versionchanged:: 3.4
Added the `collation` option.
"""
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", {"inline": 1})])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
res = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation)
else:
res = self._command(sock_info, cmd, slave_ok,
collation=collation)
if full_response:
return res
else:
return res.get("results")
def __find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Internal findAndModify helper."""
common.validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("return_document must be "
"ReturnDocument.BEFORE or ReturnDocument.AFTER")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = helpers._fields_list_to_dict(projection,
"projection")
if sort is not None:
cmd["sort"] = helpers._index_document(sort)
if upsert is not None:
common.validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 4 and 'writeConcern' not in cmd:
wc_doc = self.write_concern.document
if wc_doc:
cmd['writeConcern'] = wc_doc
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR],
collation=collation)
_check_write_command_response([(0, out)])
return out.get("value")
def find_one_and_delete(self, filter,
projection=None, sort=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{u'_id': 665, u'done': False, u'count': 25}}
By default :meth:`find_one_and_update` returns the original version of
the document before the update was applied. To return the updated
version of the document instead, use the *return_document* option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{u'_id': u'userid', u'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{u'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{u'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{u'_id': 665, u'done': True, u'result': {u'count': 26}}
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_update(update)
kwargs['update'] = update
return self.__find_and_modify(filter, projection,
sort, upsert, return_document, **kwargs)
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
"""Save a document in this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
common.validate_is_document_type("to_save", to_save)
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save):
return self._insert(sock_info, to_save, True,
check_keys, manipulate, write_concern)
else:
self._update(sock_info, {"_id": to_save["_id"]}, to_save, True,
check_keys, False, manipulate, write_concern,
collation=collation)
return to_save.get("_id")
def insert(self, doc_or_docs, manipulate=True,
check_keys=True, continue_on_error=False, **kwargs):
"""Insert a document(s) into this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._insert(sock_info, doc_or_docs, not continue_on_error,
check_keys, manipulate, write_concern)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=True, **kwargs):
"""Update a document(s) in this collection.
**DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or
:meth:`update_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
common.validate_is_mapping("spec", spec)
common.validate_is_mapping("document", document)
if document:
# If a top level key begins with '$' this is a modify operation
# and we should skip key validation. It doesn't matter which key
# we check here. Passing a document with a mix of top level keys
# starting with and without a '$' is invalid and the server will
# raise an appropriate exception.
first = next(iter(document))
if first.startswith('$'):
check_keys = False
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._update(sock_info, spec, document, upsert,
check_keys, multi, manipulate, write_concern,
collation=collation)
def remove(self, spec_or_id=None, multi=True, **kwargs):
"""Remove a document(s) from this collection.
**DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, collections.Mapping):
spec_or_id = {"_id": spec_or_id}
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._delete(sock_info, spec_or_id, multi, write_concern,
collation=collation)
def find_and_modify(self, query={}, update=None,
upsert=False, sort=None, full_response=False,
manipulate=False, **kwargs):
"""Update and return an object.
**DEPRECATED** - Use :meth:`find_one_and_delete`,
:meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead.
"""
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
if not update and not kwargs.get('remove', None):
raise ValueError("Must either update or remove")
if update and kwargs.get('remove', None):
raise ValueError("Can't do both update and remove")
# No need to include empty args
if query:
kwargs['query'] = query
if update:
kwargs['update'] = update
if upsert:
kwargs['upsert'] = upsert
if sort:
# Accept a list of tuples to match Cursor's sort parameter.
if isinstance(sort, list):
kwargs['sort'] = helpers._index_document(sort)
# Accept OrderedDict, SON, and dict with len == 1 so we
# don't break existing code already using find_and_modify.
elif (isinstance(sort, _ORDERED_TYPES) or
isinstance(sort, dict) and len(sort) == 1):
warnings.warn("Passing mapping types for `sort` is deprecated,"
" use a list of (key, direction) pairs instead",
DeprecationWarning, stacklevel=2)
kwargs['sort'] = sort
else:
raise TypeError("sort must be a list of (key, direction) "
"pairs, a dict of len 1, or an instance of "
"SON or OrderedDict")
fields = kwargs.pop("fields", None)
if fields is not None:
kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 4 and 'writeConcern' not in cmd:
wc_doc = self.write_concern.document
if wc_doc:
cmd['writeConcern'] = wc_doc
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR],
collation=collation)
_check_write_command_response([(0, out)])
if not out['ok']:
if out["errmsg"] == _NO_OBJ_ERROR:
return None
else:
# Should never get here b/c of allowable_errors
raise ValueError("Unexpected Error: %s" % (out,))
if full_response:
return out
else:
document = out.get('value')
if manipulate:
document = self.__database._fix_outgoing(document, self)
return document
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Collection' object is not iterable")
next = __next__
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
| gpl-3.0 | 3,371,341,507,320,623,600 | -7,218,710,030,140,653,000 | 42.067454 | 91 | 0.566265 | false |
kvar/ansible | lib/ansible/modules/cloud/vmware/_vmware_host_vmhba_facts.py | 21 | 8352 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Christian Kotte <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_vmhba_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vmware_host_vmhba_info) instead.
short_description: Gathers facts about vmhbas available on the given ESXi host
description:
- This module can be used to gather facts about vmhbas available on the given ESXi host.
- If C(cluster_name) is provided, then vmhba facts about all hosts from given cluster will be returned.
- If C(esxi_hostname) is provided, then vmhba facts about given host system will be returned.
version_added: '2.8'
author:
- Christian Kotte (@ckotte)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
esxi_hostname:
description:
- Name of the host system to work with.
- Vmhba facts about this ESXi server will be returned.
- This parameter is required if C(cluster_name) is not specified.
type: str
cluster_name:
description:
- Name of the cluster from which all host systems will be used.
- Vmhba facts about each ESXi server will be returned for the given cluster.
- This parameter is required if C(esxi_hostname) is not specified.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather facts about vmhbas of all ESXi Host in the given Cluster
vmware_host_vmhba_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: cluster_host_vmhbas
- name: Gather facts about vmhbas of an ESXi Host
vmware_host_vmhba_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
delegate_to: localhost
register: host_vmhbas
'''
RETURN = r'''
hosts_vmhbas_facts:
description:
- dict with hostname as key and dict with vmhbas facts as value.
returned: hosts_vmhbas_facts
type: dict
sample:
{
"10.76.33.204": {
"vmhba_details": [
{
"adapter": "HPE Smart Array P440ar",
"bus": 3,
"device": "vmhba0",
"driver": "nhpsa",
"location": "0000:03:00.0",
"model": "Smart Array P440ar",
"node_wwn": "50:01:43:80:37:18:9e:a0",
"status": "unknown",
"type": "SAS"
},
{
"adapter": "QLogic Corp ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
"bus": 5,
"device": "vmhba1",
"driver": "qlnativefc",
"location": "0000:05:00.0",
"model": "ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
"node_wwn": "57:64:96:32:15:90:23:95:82",
"port_type": "unknown",
"port_wwn": "57:64:96:32:15:90:23:95:82",
"speed": 8,
"status": "online",
"type": "Fibre Channel"
},
{
"adapter": "QLogic Corp ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
"bus": 8,
"device": "vmhba2",
"driver": "qlnativefc",
"location": "0000:08:00.0",
"model": "ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
"node_wwn": "57:64:96:32:15:90:23:95:21",
"port_type": "unknown",
"port_wwn": "57:64:96:32:15:90:23:95:21",
"speed": 8,
"status": "online",
"type": "Fibre Channel"
}
],
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class HostVmhbaMgr(PyVmomi):
"""Class to manage vmhba facts"""
def __init__(self, module):
super(HostVmhbaMgr, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system.")
def gather_host_vmhba_facts(self):
"""Gather vmhba facts"""
hosts_vmhba_facts = {}
for host in self.hosts:
host_vmhba_facts = dict()
host_st_system = host.configManager.storageSystem
if host_st_system:
device_info = host_st_system.storageDeviceInfo
host_vmhba_facts['vmhba_details'] = []
for hba in device_info.hostBusAdapter:
hba_facts = dict()
if hba.pci:
hba_facts['location'] = hba.pci
for pci_device in host.hardware.pciDevice:
if pci_device.id == hba.pci:
hba_facts['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName
break
else:
hba_facts['location'] = 'PCI'
hba_facts['device'] = hba.device
# contains type as string in format of 'key-vim.host.FibreChannelHba-vmhba1'
hba_type = hba.key.split(".")[-1].split("-")[0]
if hba_type == 'SerialAttachedHba':
hba_facts['type'] = 'SAS'
elif hba_type == 'FibreChannelHba':
hba_facts['type'] = 'Fibre Channel'
else:
hba_facts['type'] = hba_type
hba_facts['bus'] = hba.bus
hba_facts['status'] = hba.status
hba_facts['model'] = hba.model
hba_facts['driver'] = hba.driver
try:
hba_facts['node_wwn'] = self.format_number(hba.nodeWorldWideName)
except AttributeError:
pass
try:
hba_facts['port_wwn'] = self.format_number(hba.portWorldWideName)
except AttributeError:
pass
try:
hba_facts['port_type'] = hba.portType
except AttributeError:
pass
try:
hba_facts['speed'] = hba.speed
except AttributeError:
pass
host_vmhba_facts['vmhba_details'].append(hba_facts)
hosts_vmhba_facts[host.name] = host_vmhba_facts
return hosts_vmhba_facts
@staticmethod
def format_number(number):
"""Format number"""
string = str(number)
return ':'.join(a + b for a, b in zip(string[::2], string[1::2]))
def main():
"""Main"""
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
host_vmhba_mgr = HostVmhbaMgr(module)
module.exit_json(changed=False, hosts_vmhbas_facts=host_vmhba_mgr.gather_host_vmhba_facts())
if __name__ == "__main__":
main()
| gpl-3.0 | 9,109,853,662,466,697,000 | -1,020,512,605,507,192,300 | 36.621622 | 106 | 0.518319 | false |
ansible/ansible | test/units/config/test_data.py | 113 | 1266 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.config.data import ConfigData
from ansible.config.manager import Setting
mykey = Setting('mykey', 'myvalue', 'test', 'string')
mykey2 = Setting('mykey2', 'myvalue2', ['test', 'test2'], 'list')
mykey3 = Setting('mykey3', 'myvalue3', 11111111111, 'integer')
class TestConfigData(unittest.TestCase):
def setUp(self):
self.cdata = ConfigData()
def tearDown(self):
self.cdata = None
def test_update_setting(self):
for setting in [mykey, mykey2, mykey3]:
self.cdata.update_setting(setting)
self.assertEqual(setting, self.cdata._global_settings.get(setting.name))
def test_update_setting_with_plugin(self):
pass
def test_get_setting(self):
self.cdata._global_settings = {'mykey': mykey}
self.assertEqual(mykey, self.cdata.get_setting('mykey'))
def test_get_settings(self):
all_settings = {'mykey': mykey, 'mykey2': mykey2}
self.cdata._global_settings = all_settings
for setting in self.cdata.get_settings():
self.assertEqual(all_settings[setting.name], setting)
| gpl-3.0 | 8,377,476,712,505,383,000 | -6,700,452,449,592,253,000 | 29.878049 | 84 | 0.668246 | false |
legacysurvey/rapala | ninetyprime/linearitycheck.py | 2 | 17953 | #!/usr/bin/env python
import os
import glob
import numpy as np
import fitsio
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.backends.backend_pdf import PdfPages
from astropy.table import Table
from bokpipe import *
from bokpipe.bokoscan import _convertfitsreg
def init_data_map(datadir,outdir,expTimes=None,files=None):
dataMap = {}
if not os.path.exists(outdir):
os.mkdir(outdir)
dataMap['outdir'] = outdir
if files is None:
dataMap['files'] = sorted(glob.glob(datadir+'*.fits') +
glob.glob(datadir+'*.fits.gz') +
glob.glob(datadir+'*.fits.fz'))
else:
dataMap['files'] = files
dataMap['rawFiles'] = dataMap['files']
dataMap['oscan'] = bokio.FileNameMap(outdir)
dataMap['proc'] = bokio.FileNameMap(outdir,'_p')
dataMap['files'] = [ dataMap['oscan'](f) for f in dataMap['files'] ]
if expTimes is None:
dataMap['expTime'] = np.array([fitsio.read_header(f)['EXPTIME']
for f in dataMap['files']])
else:
dataMap['expTime'] = expTimes
try:
# assume they are all the same
dataMap['dataSec'] = \
_convertfitsreg(fitsio.read_header(
dataMap['files'][0],'IM4')['DATASEC'])
except IOError:
pass
return dataMap
def process_data(dataMap,redo=True,withvar=True,oscanims=False,bias2d=False):
oscanSubtract = BokOverscanSubtract(output_map=dataMap['oscan'],
overwrite=redo,
write_overscan_image=oscanims,
oscan_cols_file=dataMap['outdir']+'oscan_cols',
oscan_rows_file=dataMap['outdir']+'oscan_rows',
verbose=10)#method='median_value')
oscanSubtract.process_files(dataMap['rawFiles'])
if bias2d:
biasname = 'bias'
biasStack = bokproc.BokBiasStack(#reject=None,
overwrite=redo,
with_variance=withvar)
bias2dFile = os.path.join(dataMap['outdir'],biasname+'.fits')
biasStack.stack(dataMap['biasFiles'],bias2dFile)
#imProcess = bokproc.BokCCDProcess(bias2dFile,
# output_map=dataMap['proc'])
#imProcess.process_files(flatFrames)
def imstat(dataMap,outfn='stats'):
from astropy.stats import sigma_clip
from scipy.stats import mode,scoreatpercentile
array_stats = bokutil.array_stats
fnlen = len(os.path.basename(dataMap['files'][0]))
st = np.zeros(len(dataMap['flatSequence']),
dtype=[('file','S%d'%fnlen),
('expTime','f4'),
('median','16f4'),
('mean','16f4'),
('mode','16f4'),
('iqr25','16f4'),
('iqr75','16f4'),
('iqr10','16f4'),
('iqr90','16f4')])
for _i,i in enumerate(dataMap['flatSequence']):
expTime = dataMap['expTime'][i]
fn = os.path.basename(dataMap['files'][i])
fits = fitsio.FITS(dataMap['files'][i])
print '%s %4.1f ' % (fn,expTime),
st['file'][_i] = fn
st['expTime'][_i] = expTime
for j,extn in enumerate(['IM%d' % n for n in range(1,17)]):
modeVal,pix = array_stats(fits[extn].read()[dataMap['statsPix']],
method='mode',retArray=True)
st['mode'][_i,j] = modeVal
st['mean'][_i,j] = pix.mean()
st['median'][_i,j] = np.ma.median(pix)
st['iqr25'][_i,j] = scoreatpercentile(pix,25)
st['iqr75'][_i,j] = scoreatpercentile(pix,75)
st['iqr10'][_i,j] = scoreatpercentile(pix,10)
st['iqr90'][_i,j] = scoreatpercentile(pix,90)
print '%5d ' % (modeVal),
print
fitsio.write(outfn+'.fits',st,clobber=True)
def scaled_histograms(dataMap,nims=None,outfn='pixhist'):
pdf = PdfPages(outfn+'.pdf')
for _i,i in enumerate(dataMap['flatSequence']):
if nims is not None and _i==nims:
break
expTime = dataMap['expTime'][i]
expScale = dataMap['refExpTime'] / expTime
print dataMap['files'][i]
fn = os.path.basename(dataMap['files'][i])
fits = fitsio.FITS(dataMap['files'][i])
fig = plt.figure(figsize=(8.0,10))
plt.subplots_adjust(0.08,0.08,0.92,0.92,0.3,0.35)
for j,extn in enumerate(['IM%d' % n for n in range(1,17)]):
ax = plt.subplot(8,2,j+1)
pix = fits[extn].read()[dataMap['statsPix']]
ax.hist(expScale*pix.flatten(),100,(0,40000),edgecolor='none')
ax.text(0.05,0.9,extn,va='top',size=9,transform=ax.transAxes)
ax.set_xlim(0,40000)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10000))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(2000))
ax.yaxis.set_major_locator(ticker.MultipleLocator(50000))
plt.figtext(0.5,0.99,fn+' exp=%.1f' % expTime,ha='center',va='top')
pdf.savefig(fig)
plt.close(fig)
pdf.close()
def plot_sequence(dataMap,st,imNum,which='median'):
expScale = dataMap['refExpTime']/st['expTime']
seqno = 1 + np.arange(len(st))
ref = np.isclose(expScale,1.0)
j = imNum - 1
plt.figure(figsize=(8,6))
plt.subplots_adjust(0.11,0.08,0.96,0.95)
plt.errorbar(seqno[ref],expScale[ref]*st[which][ref,j],
[expScale[ref]*(st[which]-st['iqr10'])[ref,j],
expScale[ref]*(st['iqr90']-st[which])[ref,j]],
fmt='bs-')
plt.errorbar(seqno[~ref],expScale[~ref]*st[which][~ref,j],
[expScale[~ref]*(st[which]-st['iqr10'])[~ref,j],
expScale[~ref]*(st['iqr90']-st[which])[~ref,j]],
fmt='cs-')
#plt.scatter(seqno,expScale*st['mode'][:,j],marker='+',c='r')
#plt.scatter(seqno,expScale*st['mean'][:,j],marker='x',c='g')
plt.xlabel('sequence number')
plt.ylabel('counts scaled by exp time')
plt.title('IM%d'%imNum)
plt.xlim(0.5,len(st)+0.5)
def fit_ref_exposures(dataMap,st,imNum,
which='median',method='spline',doplot=False):
from scipy.interpolate import UnivariateSpline
seqno = 1 + np.arange(len(st))
t = st['expTime']
ref = np.isclose(t,dataMap['refExpTime'])
j = imNum - 1
refCounts = st[which][ref,j][0]
if method=='linear':
_fit = np.polyfit(seqno[ref],refCounts/st[which][ref,j],1)
fit = lambda x: np.polyval(_fit,x)
elif method=='spline':
fit = UnivariateSpline(seqno[ref],refCounts/st[which][ref,j],
s=1e-5,k=3)
else:
raise ValueError
if doplot:
plt.figure()
plt.subplot(211)
plt.plot(seqno[ref],st[which][ref,j],'bs-')
plt.plot(seqno,refCounts/fit(seqno),c='r')
plt.subplot(212)
plt.plot(seqno[ref],(st[which][ref,j]-refCounts/fit(seqno[ref]))
/st[which][ref,j],'bs-')
plt.axhline(0,c='r')
return fit
def plot_linearity_curves(dataMap,st,which='median',correct=True,isPTC=False,
refCor=None,fitmethod='spline',outfn='linearity',
onlyim=None):
seqno = 1 + np.arange(len(st))
t = st['expTime']
print seqno,t
refExpTime = dataMap['refExpTime']
ref = np.isclose(t,refExpTime)
refCorFit = None
ii = np.arange(len(st))
# only use the increasing sequence, not the reference exposures
ii = ii[~ref]
if isPTC:
# for PTCs skip every other image since they are done in pairs
ii = ii[::2]
# only fit to unsaturated frames
try:
firstsat = np.where(np.any(st[which][ii,:] > 55000,axis=1))[0][0]
except IndexError:
firstsat = -1
if onlyim is None:
pdf = PdfPages(outfn+'.pdf')
for imNum in range(1,17):
if onlyim is not None and imNum != onlyim:
continue
j = imNum - 1
# correct lamp variation
if correct:
if refCor is None:
fscl_fit = fit_ref_exposures(dataMap,st,imNum,which,
method=fitmethod)
else:
if refCorFit is None:
refCorFit = fit_ref_exposures(dataMap,st,imNum,which)
fscl_fit = refCorFit
fscl = fscl_fit(seqno)
else:
fscl = np.ones_like(seqno)
fit = np.polyfit(t[ii[:firstsat]],
fscl[ii[:firstsat]]*st[which][ii[:firstsat],j],1)
fitv = np.polyval(fit,t)
slope = fit[0] / (st[which][ref,j][0]/refExpTime)
#
pltindex = imNum % 4
if onlyim is None:
if pltindex == 1:
fig = plt.figure(figsize=(8,10))
plt.subplots_adjust(0.11,0.08,0.96,0.95,0.25,0.2)
ax = plt.subplot(4,2,2*(j%4)+1)
else:
fig = plt.figure(figsize=(6,2.5))
plt.subplots_adjust(0.11,0.23,0.99,0.98,0.35,0.2)
ax = plt.subplot(1,2,1)
plt.plot(t[ii],fscl[ii]*st[which][ii,j],'bs-')
plt.xlim(0.9*t.min(),t.max()+0.5)
plt.xscale('log')
plt.ylim(1e2,9e4)
plt.yscale('log')
plt.ylabel('counts [%s]' % which)
tt = np.logspace(-1,np.log10(1.3*t.max()),100)
plt.plot(tt,np.polyval(fit,tt),c='r')
plt.text(0.05,0.9,'IM%d'%imNum,va='top',transform=ax.transAxes)
plt.text(0.95,0.18,r'y = %.1f $\times$ t + %.1f' % tuple(fit),
ha='right',va='top',size=9,transform=ax.transAxes)
plt.text(0.95,0.10,r'y = %.3f $\times$ counts + %.1f' % (slope,fit[1]),
ha='right',va='top',size=9,transform=ax.transAxes)
if pltindex==0 or onlyim is not None:
plt.xlabel('exptime (s)')
#
if onlyim is None:
ax = plt.subplot(4,2,2*(j%4)+2)
else:
ax = plt.subplot(1,2,2)
plt.plot(t[ii],100*(fscl[ii]*st[which][ii,j]-fitv[ii])/fitv[ii],'bs-')
plt.axhline(0,c='r')
#ax.xaxis.set_major_locator(ticker.MultipleLocator(10))
#ax.xaxis.set_minor_locator(ticker.MultipleLocator(2))
ax.yaxis.set_major_locator(ticker.MultipleLocator(2))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.5))
plt.ylim(-5,5)
plt.xlim(0.9*t.min(),t.max()+0.5)
plt.xscale('log')
if pltindex==0 or onlyim is not None:
plt.xlabel('exptime (s)')
plt.ylabel('residual \%')
if onlyim is None:
if pltindex == 0:
pdf.savefig(fig)
plt.close(fig)
if onlyim is None:
pdf.close()
def rel_gain(dataMap,st,which='median',correct=True,fitmethod='spline',
nskip=0):
seqno = 1 + np.arange(len(st))
t = st['expTime']
refExpTime = dataMap['refExpTime']
ref = np.isclose(t,refExpTime)
refCorFit = None
ii = np.arange(len(st))
ii = ii[~ref]
ii = ii[nskip:]
sky4 = st[which][ii,3]
fit_ii = ii[np.where((sky4>5000)&(sky4<25000))[0]]
plt.figure()
for imNum in range(1,17):
j = imNum - 1
# correct lamp variation
if correct:
if True: #refCor is None:
fscl_fit = fit_ref_exposures(dataMap,st,imNum,which,
method=fitmethod)
else:
if refCorFit is None:
refCorFit = fit_ref_exposures(dataMap,st,imNum,which)
fscl_fit = refCorFit
fscl = fscl_fit(seqno)
else:
fscl = np.ones_like(seqno)
fit = np.polyfit(t[fit_ii],fscl[fit_ii]*st[which][fit_ii,j],1)
fitv = np.polyval(fit,t)
# slope = fit[0] / (st[which][ref,j][0]/refExpTime)
xx = np.array(0,1.1*t.max())
plt.subplot(4,4,imNum)
if False:
plt.scatter(t[ii],fscl[ii]*st[which][ii,j])
plt.plot(xx,np.polyval(fit,xx),c='r')
else:
plt.scatter(t[ii],fscl[ii]*st[which][ii,j]/fitv[ii])
plt.axhline(1,c='r')
plt.ylim(0.7,1.3)
if True:
plt.xscale('log')
plt.xlim(0.9*t.min(),1.1*t.max())
def get_first_saturated_frame(seq):
try:
firstsat = np.where(seq > 55000)[0][0]
except IndexError:
firstsat = -1
return firstsat
def compare_oscan_levels(dataMap,st):
files = [ dataMap['files'][i] for i in dataMap['flatSequence'] ]
oscans = np.zeros((len(files),16))
for j in range(16):
oscans[:,j] = [ fitsio.read_header(f,'IM%d'%(j+1))['OSCANMED']
for f in files ]
seqno = 1 + np.arange(len(st))
plt.figure()
for j in range(8,16):
ax = plt.subplot(8,2,2*(j%8)+1)
i1 = get_first_saturated_frame(st['median'][:,j])
plt.scatter(st['median'][:i1,j],oscans[:i1,j],c='b')
plt.ylabel('IM%d'%(j+1))
ax = plt.subplot(8,2,2*(j%8)+2)
plt.scatter(seqno[:i1],oscans[:i1,j],c='b')
def init_sep09bss_data_map():
datadir = os.environ.get('BASSDATA')+'/20150909/bss/20150908/'
exptimes = np.loadtxt(datadir+'../bss.20150909.log',usecols=(3,))
exptimes = exptimes[50:]
print exptimes
rdxdir = os.environ.get('GSCRATCH','tmp_sep')+'/bss_sep09/'
if not os.path.exists(rdxdir):
os.makedirs(rdxdir)
dataMap = init_data_map(datadir,rdxdir,
expTimes=exptimes,files=None)
dataMap['rawFiles'] = dataMap['rawFiles'][50:]
dataMap['files'] = dataMap['files'][50:]
dataMap['biasFiles'] = dataMap['files'][-5:]
#dataMap['flatSequence'] = range(50,68)
dataMap['flatSequence'] = range(18)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 40.0
return dataMap
def init_sep29ptc_data_map():
dataMap = init_data_map(
"/home/ian/dev/rapala/bokpipe/scratch/sep29ptcs/ptc/",'sep29ptcs/')
dataMap['biasFiles'] = [dataMap['files'][0],]
dataMap['flatSequence'] = range(1,len(dataMap['files']))
dataMap['statsPix'] = np.s_[20:-20,100:-100]
dataMap['refExpTime'] = 10.0
return dataMap
def init_oct02ptc_data_map():
dataMap = init_data_map(os.environ.get('GSCRATCH')+'/02oct15/ptc/',
os.environ.get('GSCRATCH')+'/02oct15/ptc_proc/')
dataMap['biasFiles'] = [dataMap['files'][0],]
dataMap['flatSequence'] = range(1,len(dataMap['files']))
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 10.0
return dataMap
def init_oct20_data_map():
datadir = os.environ.get('BASSDATA')+'/20151020/'
exptimes = np.loadtxt(datadir+'images.log',usecols=(6,))
nuse = 53
exptimes = exptimes[:nuse]
print exptimes
dataMap = init_data_map(datadir,'tmp_oct20',expTimes=exptimes)
dataMap['rawFiles'] = dataMap['rawFiles'][:nuse]
dataMap['files'] = [ dataMap['oscan'](f)
for f in dataMap['files'][:nuse] ]
dataMap['biasFiles'] = dataMap['files'][:20]
dataMap['flatSequence'] = range(20,nuse)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 3.0
return dataMap
def init_nov11g_data_map():
datadir = os.environ.get('BASSDATA')+'/Nov2015/'
log = Table.read(datadir+'bassLog_Nov2015.fits')
exptimes = log['expTime'][111:150]
files = [ datadir+f['utDir']+'/'+f['fileName']+'.fits'
for f in log[111:150] ]
dataMap = init_data_map(datadir,'tmp_nov11g',
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 3.0
return dataMap
def init_nov14_data_map(filt):
datadir = os.environ.get('BASSDATA')+'/Nov2015/'
log = Table.read(datadir+'bassLog_Nov2015.fits')
if filt=='g':
frames = np.r_[np.s_[297:345],np.s_[247:257]]
else:
frames = np.r_[np.s_[345:393],np.s_[247:257]]
exptimes = log['expTime'][frames]
files = [ datadir+f['utDir']+'/'+f['fileName']+'.fits'
for f in log[frames] ]
dataMap = init_data_map(datadir,'tmp_nov14'+filt,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]
return dataMap
def init_jan3_data_map(filt):
datadir = os.environ.get('BASSDATA')
log = Table.read('basslogs/log_ut20160103.fits')
if filt=='g':
frames = np.r_[np.s_[57:105],np.s_[160:170]]
else:
frames = np.r_[np.s_[105:160],np.s_[160:170]]
exptimes = log['expTime'][frames]
files = [ datadir+'/'+f['utDir'].strip()+'/'+f['fileName'].strip()+'.fits'
for f in log[frames] ]
dataMap = init_data_map(datadir,'tmp_jan3'+filt,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]
return dataMap
def init_data_map_fromfile(filename,outdir='tmp',nersc=True):
datadir = os.environ.get('BASSDATA')
if nersc:
datadir = os.path.join(datadir,'BOK_Raw')
log = np.loadtxt(filename,dtype=[('frameNum','i4'),('utDir','S8'),
('fileName','S35'),
('imType','S10'),('filter','S8'),
('expTime','f4')],skiprows=1)
exptimes = log['expTime']
files = [ datadir+'/'+f['utDir'].strip()+'/'+f['fileName'].strip()+'.fits'
for f in log ]
if nersc:
files = [ f+'.fz' for f in files ]
dataMap = init_data_map(datadir,outdir,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = np.array(dataMap['files'])[log['imType']=='zero']
dataMap['flatSequence'] = np.where(log['imType']=='flat')[0]
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
# assume it starts with reference
dataMap['refExpTime'] = exptimes[dataMap['flatSequence'][0]]
return dataMap
if __name__=='__main__':
import sys
dataset = sys.argv[1]
if dataset == 'sep09bss':
dataMap = init_sep09bss_data_map()
elif dataset == 'oct02':
dataMap = init_oct02ptc_data_map()
elif dataset == 'oct20':
dataMap = init_oct20_data_map()
elif dataset == 'nov11g':
dataMap = init_nov11g_data_map()
elif dataset == 'nov14g':
dataMap = init_nov14_data_map('g')
elif dataset == 'nov14Ha':
dataMap = init_nov14_data_map('Ha')
elif dataset == 'jan3g':
dataMap = init_jan3_data_map('g')
elif dataset == 'jan3Ha':
dataMap = init_jan3_data_map('Ha')
else:
dataMap = init_data_map_fromfile(sys.argv[2],dataset)
print 'processing ',dataset
if not os.path.exists('stats_'+dataset+'.fits'):
process_data(dataMap,bias2d=True)
imstat(dataMap,outfn='stats_'+dataset)
st = fitsio.read('stats_'+dataset+'.fits')
plot_linearity_curves(dataMap,st,outfn='linearity_'+dataset)
if True:
plot_linearity_curves(dataMap,st,outfn='linearity_'+dataset,
onlyim=4)
plt.savefig('linearity_IM4_%s.png'%dataset)
plot_sequence(dataMap,st,4)
plt.savefig('linsequence_IM4_%s.png'%dataset)
| bsd-3-clause | -8,044,236,560,152,155,000 | 7,636,230,916,783,978,000 | 35.638776 | 77 | 0.619284 | false |
Jailander/COSMOS | kriging_exploration/scripts/explorator.py | 1 | 34183 | #!/usr/bin/env python
import cv2
import sys
import yaml
import signal
import numpy as np
#import utm
import matplotlib as mpl
import matplotlib.cm as cm
import rospy
import argparse
import actionlib
from cosmos_msgs.msg import KrigInfo
from cosmos_msgs.srv import CompareModels
import kriging_exploration.map_coords
import std_msgs.msg
import open_nav.msg
from kriging_exploration.data_grid import DataGrid
from kriging_exploration.map_coords import MapCoords
from kriging_exploration.visualiser import KrigingVisualiser
from kriging_exploration.canvas import ViewerCanvas
from kriging_exploration.topological_map import TopoMap
from kriging_exploration.exploration import ExplorationPlan
from sensor_msgs.msg import NavSatFix
def overlay_image_alpha(img, img_overlay):
"""Overlay img_overlay on top of img at the position specified by
pos and blend using alpha_mask.
"""
show_image = img.copy()
alpha = img_overlay[:, :, 3] / 255.0 # Alpha mask must contain values
# within the range [0, 1]
# and be the same size as img_overlay.
# Image ranges
y1, y2 = 0, img.shape[0]
x1, x2 = 0, img.shape[1]
channels = img.shape[2]
alpha_inv = 1.0 - alpha
for c in range(channels):
show_image[y1:y2, x1:x2, c] = (alpha * img_overlay[y1:y2, x1:x2, c] + alpha_inv * img[y1:y2, x1:x2, c])
return show_image
class Explorator(KrigingVisualiser):
#_w_shape=[(0, 16), (1, 17), (3, 17), (5, 16), (8, 15), (10, 15), (12, 14), (14, 13), (12, 12), (10, 11), (8, 11), (5, 10), (8, 9), (10, 9), (12, 8), (14, 7), (12, 6), (10, 5), (8, 5), (6, 4), (4, 3), (3, 2), (4, 1), (5, 0), (7, 0)]
#_w_shape=[(17, 0), (17, 1), (17, 3), (16, 5), (15, 8), (15, 10), (14, 12), (13, 14), (12, 12), (11, 10), (11, 8), (10, 5), (9, 8), (9, 10), (8, 12), (7, 14), (6, 12), (5, 10), (5, 8), (4, 6), (3, 4), (2, 3), (1, 4), (0, 5), (0, 7)]
#_w_shape=[(17, 0), (17,1), (17, 2), (17, 4), (16, 4), (16, 6), (16, 8), (15, 8), (15, 10), (14, 10), (14, 12), (13, 12), (13, 14), (12, 14), (12, 12), (11, 12), (11, 10), (10, 10), (10, 8), (10, 6), (10, 4), (9, 4), (9, 6), (9, 8), (9, 10), (8, 10), (8, 12), (7, 12), (7, 14), (6, 14), (6, 12), (5, 12), (5, 10), (4, 10), (4, 8), (4, 6), (4, 4), (3, 4), (3, 3), (2, 3), (2, 4), (1,4), (1, 6), (0,6), (1, 8), (0,8), (1, 10), (0, 10), (0, 12), (0, 14)]
_w_shape=[(17, 0), (16, 1), (14, 6), (12, 11), (10, 14), (8, 9), (5, 14), (3, 11), (2, 6), (0, 3)]
def __init__(self, lat_deg, lon_deg, zoom, size, args):
self.targets = []
self.results =[]
self.result_counter=0
self.explodist=0
self.running = True
self.last_coord=None
signal.signal(signal.SIGINT, self.signal_handler)
self.expid=args.experiment_name
print "Creating visualiser object"
super(Explorator, self).__init__(lat_deg, lon_deg, zoom, size)
cv2.namedWindow('explorator')
cv2.setMouseCallback('explorator', self.click_callback)
self.current_model=-1
self.draw_mode = 'none'
self.grid = DataGrid(args.limits_file, args.cell_size)
self.topo_map= TopoMap(self.grid)
self.visited_wp=[]
explo_type = args.area_coverage_type
self.define_exploration_type(explo_type)
self.navigating = False
self.pause_exp = False
self.exploring = 0
self.n_inputs = 0
print "NUMBER OF TARGETS:"
print len(self.explo_plan.targets)
self.limits_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.grid_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.exploration_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.gps_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.limits_canvas.draw_polygon(self.grid.limits, (0,0,255,128), thickness=1)
self.grid_canvas.draw_grid(self.grid.cells, args.cell_size, (128,128,128,2), thickness=1)
self.redraw()
self.redraw_kriged=True
self.redraw_var=True
self.redraw_devi=True
self.model_canvas=[]
self.model_legend=[]
self.kriging_canvas=[]
self.klegend_canvas=[]
self.klegend2_canvas=[]
self.klegend3_canvas=[]
self.sigma_canvas=[]
self.sigma2_canvas=[]
self.model_canvas_names=[]
self.mean_out_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_out_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_var_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_var_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_dev_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_dev_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
rospy.loginfo("Subscribing to Krig Info")
rospy.Subscriber("/kriging_data", KrigInfo, self.data_callback)
rospy.Subscriber("/fix", NavSatFix, self.gps_callback)
rospy.Subscriber('/penetrometer_scan', std_msgs.msg.String, self.scan_callback)
self.req_data_pub = rospy.Publisher('/request_scan', std_msgs.msg.String, latch=False, queue_size=1)
rospy.loginfo(" ... Connecting to Open_nav")
self.open_nav_client = actionlib.SimpleActionClient('/open_nav', open_nav.msg.OpenNavAction)
self.open_nav_client.wait_for_server()
rospy.loginfo(" ... done")
tim1 = rospy.Timer(rospy.Duration(0.2), self.drawing_timer_callback)
tim2 = rospy.Timer(rospy.Duration(0.1), self.control_timer_callback)
self.refresh()
while(self.running):
cv2.imshow('explorator', self.show_image)
k = cv2.waitKey(20) & 0xFF
self._change_mode(k)
tim1.shutdown()
tim2.shutdown()
cv2.destroyAllWindows()
sys.exit(0)
# EXPLORATION PARAMS HERE!!!!
def define_exploration_type(self, explo_type):
self.exploration_strategy=explo_type
self.n_goals=10
if explo_type=='area_split':
self.grid._split_area(3,3)
sb=[]
for i in self.grid.area_splits_coords:
(y, x) = self.grid.get_cell_inds_from_coords(i)
sb.append((x,y))
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=sb)
elif explo_type=='random':
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent)
elif explo_type=='w_shape':
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=self._w_shape)
else: #greedy
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, exploration_type='greedy', ac_model=explo_type)
def drawing_timer_callback(self, event):
self.refresh()
def control_timer_callback(self, event):
if self.navigating:
if self.open_nav_client.simple_state ==2:
print "DONE NAVIGATING"
self.navigating = False
if self.exploring==1:
self.exploring=2
elif self.exploring==2:
if not self.pause_exp:
self.explo_plan.explored_wp.append(self.explo_plan.route.pop(0))
info_str='Do_reading'
self.req_data_pub.publish(info_str)
self.exploring=3
elif self.exploring==4:
if not self.pause_exp:
if len(self.explo_plan.route) >0:
gg=self.explo_plan.route[0]
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=gg.coord.lat
targ.goal.coords.longitude=gg.coord.lon
print "Going TO: ", gg
self.exploring=1
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
else:
print "Done Exploring"
self.exploring = 0
# else:
# if self.exploring:
# print "waiting for new goal"
def gps_callback(self, data):
if not np.isnan(data.latitude):
self.gps_canvas.clear_image()
gps_coord = MapCoords(data.latitude,data.longitude)
self.gps_canvas.draw_coordinate(gps_coord,'black',size=2, thickness=2, alpha=255)
if self.last_coord:
dist = gps_coord - self.last_coord
self.explodist+= dist[0]
self.last_coord=gps_coord
def data_callback(self, msg):
point_coord = kriging_exploration.map_coords.coord_from_satnav_fix(msg.coordinates)
for i in msg.data:
self.grid.add_data_point(i.model_name, point_coord, i.measurement)
self.vmin, self.vmax = self.grid.get_max_min_vals()
self.n_models=len(self.grid.models)
for i in self.grid.models:
if i.name not in self.model_canvas_names:
print i.name
self.model_canvas_names.append(i.name)
self.model_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.model_legend.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.kriging_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend3_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.sigma_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.sigma2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.draw_inputs(self.model_canvas_names.index(i.name))
self.n_inputs+=1
if self.exploring==3:
if self.n_inputs>3:
self.krieg_all_mmodels()
rospy.sleep(0.1)
self.grid.calculate_mean_grid()
rospy.sleep(0.1)
self.draw_means()
self.draw_mode="means"
resp = self.get_errors()
self.result_counter+=1
d={}
d['step']=self.result_counter
d['id']=self.expid
d['ns']=len(self.explo_plan.targets)
d['coord']={}
d['coord']['lat']=self.last_coord.lat
d['coord']['lon']=self.last_coord.lon
d['dist']=float(self.explodist)
d['results']={}
d['results']['groundtruth']=resp
d['results']['var']={}
d['results']['var']['mean']={}
d['results']['var']['mean']['mean']= float(np.mean(self.grid.mean_variance))
d['results']['var']['mean']['max']= float(np.max(self.grid.mean_variance))
d['results']['var']['mean']['min']= float(np.min(self.grid.mean_variance))
# d['results']['var']['std']['mean']= np.mean(self.grid.mean_deviation)
# d['results']['var']['std']['max']= np.max(self.grid.mean_deviation)
# d['results']['var']['std']['min']= np.min(self.grid.mean_deviation)
means=[]
maxs=[]
mins=[]
for i in range(self.n_models):
means.append(float(np.mean(self.grid.models[i].variance)))
maxs.append(float(np.max(self.grid.models[i].variance)))
mins.append(float(np.min(self.grid.models[i].variance)))
d['results']['models']={}
d['results']['models']['means']=means
d['results']['models']['maxs']=maxs
d['results']['models']['mins']=mins
rospy.sleep(0.1)
self.results.append(d)
if self.exploration_strategy == 'greedy':
nwp = len(self.explo_plan.route) + len(self.explo_plan.explored_wp)
print nwp, " nodes in plan"
if nwp <= self.n_goals:
#THIS IS the ONE
#self.explo_plan.add_limited_greedy_goal(self.grid.mean_variance, self.last_coord)
self.explo_plan.add_greedy_goal(self.grid.mean_variance)
#self.explo_plan.add_montecarlo_goal(self.grid.mean_variance, self.last_coord)
#self.draw_mode="deviation"
# self.current_model=0
# if self.redraw_devi:
# self.draw_all_devs()
self.redraw()
rospy.sleep(0.1)
self.exploring=4
def scan_callback(self, msg):
if msg.data == 'Reading':
print "GOT READING!!!"
cx, cy = self.grid.get_cell_inds_from_coords(self.last_coord)
if cx <0 or cy<0:
print "Reading outside the grid"
else:
print 'Reading at: ', cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
print 'Setting: ', i.name, i.coord, "as Visited"
i.visited= True
self.visited_wp.append(i)
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
def refresh(self):
#self.show_image = self.image.copy()
#self.show_image = cv2.addWeighted(self.gps_canvas.image, 0.7, self.image, 1.0, 0)
#self.show_image = transparentOverlay(self.image, self.gps_canvas.image)
self.show_image = overlay_image_alpha(self.image,self.gps_canvas.image)
def redraw(self):
self.image = cv2.addWeighted(self.grid_canvas.image, 0.5, self.base_image, 1.0, 0)
self.image = cv2.addWeighted(self.limits_canvas.image, 0.75, self.image, 1.0, 0)
self.image = cv2.addWeighted(self.exploration_canvas.image, 0.75, self.image, 1.0, 0)
if self.draw_mode == "inputs" and self.current_model>=0 :
self.image = cv2.addWeighted(self.model_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.model_legend[self.current_model].image)
if self.draw_mode == "kriging":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.kriging_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend_canvas[self.current_model].image)
if self.draw_mode == "deviation":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.sigma_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend3_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend3_canvas[self.current_model].image)
if self.draw_mode == "variance":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.sigma2_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend2_canvas[self.current_model].image)
if self.draw_mode == "means":
self.image = cv2.addWeighted(self.mean_dev_canvas.image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.mean_dev_legend_canvas.image)
self.show_image = self.image.copy()
def click_callback(self, event, x, y, flags, param):
if event == cv2.EVENT_RBUTTONDOWN:
click_coord = self.satellite._pix2coord(x,y)
cx, cy = self.grid.get_cell_inds_from_coords(click_coord)
if cx <0 or cy<0:
print "click outside the grid"
else:
print cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
print i.name, i.coord.easting, i.coord.northing
i.visited= True
self.visited_wp.append(i)
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
if event == cv2.EVENT_LBUTTONDOWN:
click_coord = self.satellite._pix2coord(x,y)
cx, cy = self.grid.get_cell_inds_from_coords(click_coord)
if cx <0 or cy<0:
print "click outside the grid"
else:
print cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
#goal.goal.goal.header.
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=i.coord.lat
targ.goal.coords.longitude=i.coord.lon
print targ
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
#self.client.wait_for_result()
# Prints out the result of executing the action
#ps = self.client.get_result()
#print ps
def draw_inputs(self, nm):
minv = self.grid.models[nm].lims[0]
maxv = self.grid.models[nm].lims[1]
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.model_canvas[nm].clear_image()
self.model_legend[nm].clear_image()
for i in self.grid.models[nm].orig_data:
cell = self.grid.cells[i.y][i.x]
a= colmap.to_rgba(int(i.value))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.model_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.model_canvas[nm].put_text(self.grid.models[nm].name)
self.model_legend[nm].put_text(self.grid.models[nm].name)
self.model_legend[nm].draw_legend(minv, maxv, colmap, title="Kriging")
def draw_krigged(self, nm):
print "drawing kriging" + str(nm)
minv = self.grid.models[nm].min_val
maxv = self.grid.models[nm].max_val
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.kriging_canvas[nm].clear_image()
self.klegend_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].output[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.kriging_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend_canvas[nm].draw_legend(minv, maxv, colmap, title="Kriging")
self.redraw()
def draw_variance(self, nm):
print "drawing variance" + str(nm)
minv = self.grid.models[nm].min_var
maxv = self.grid.models[nm].max_var
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax= maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.sigma_canvas[nm].clear_image()
self.klegend2_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].variance[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.sigma2_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend2_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend2_canvas[nm].draw_legend(minv, maxv, colmap, title="Variance")
self.redraw()
def draw_means(self):
print "drawing mean deviation ..."
minv = self.grid.min_mean_deviation
maxv = self.grid.max_mean_deviation
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.mean_dev_canvas.clear_image()
self.mean_dev_legend_canvas.clear_image()
for i in range(self.grid.shape[0]):
for j in range(self.grid.shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.mean_deviation[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.mean_dev_canvas.draw_cell(cell, self.grid.cell_size, b, thickness=-1)
#self.mean_dev_legend_canvas.put_text(self.grid.models[nm].name)
self.mean_dev_legend_canvas.draw_legend(minv, maxv, colmap, title="Mean Deviation")
#self.draw_mode="means"
self.redraw()
def draw_deviation(self, nm):
print "drawing deviation" + str(nm)
minv = self.grid.models[nm].min_dev
maxv = self.grid.models[nm].max_dev
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.sigma_canvas[nm].clear_image()
self.klegend3_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].deviation[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.sigma_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend3_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend3_canvas[nm].draw_legend(minv, maxv, colmap, title="Deviation")
self.redraw()
def krieg_all_mmodels(self):
for i in self.grid.models:
i.do_krigging()
self.redraw_kriged=True
self.redraw_var=True
self.redraw_devi=True
def draw_all_outputs(self):
for i in self.grid.models:
self.draw_krigged(self.model_canvas_names.index(i.name))
self.redraw_kriged=False
def draw_all_vars(self):
for i in self.grid.models:
self.draw_variance(self.model_canvas_names.index(i.name))
self.redraw_var=False
def draw_all_devs(self):
for i in self.grid.models:
self.draw_deviation(self.model_canvas_names.index(i.name))
self.redraw_devi=False
def _change_mode(self, k):
if k == 27:
self.running = False
elif k == ord('q'):
self.running = False
elif k == ord('n'):
print len(self.grid.models)
elif k == ord('i'):
if self.n_models > 0:
self.draw_mode="inputs"
self.current_model=0
self.redraw()
elif k == ord('d'):
if self.n_models > 0:
self.draw_mode="deviation"
self.current_model=0
if self.redraw_devi:
self.draw_all_devs()
self.redraw()
elif k == ord('v'):
if self.n_models > 0:
self.draw_mode="variance"
self.current_model=0
if self.redraw_var:
self.draw_all_vars()
self.redraw()
elif k == ord('t'):
self.krieg_all_mmodels()
self.grid.calculate_mean_grid()
if self.n_models > 0:
self.draw_all_outputs()
self.draw_mode="kriging"
self.current_model=0
self.redraw()
elif k == ord('k'):
if self.n_models > 0:
self.draw_mode="kriging"
self.current_model=0
if self.redraw_kriged:
self.draw_all_outputs()
self.redraw()
elif k == ord('>'):
self.current_model+=1
if self.current_model >= self.n_models:
self.current_model=0
self.redraw()
elif k == ord('<'):
self.current_model-=1
if self.current_model < 0:
self.current_model=self.n_models-1
self.redraw()
elif k == ord('w'):
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
elif k == ord('e'):
self.exploration_canvas.draw_waypoints(self.explo_plan.targets, (255,200,128,255), thickness=3)
self.exploration_canvas.draw_plan(self.explo_plan.route, 'cyan', thickness=1)
self.redraw()
#xnames = [x.name for x in self.explo_plan.route]
#print xnames
elif k == ord('g'):
if len(self.explo_plan.route) >0:
gg=self.explo_plan.route[0]
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=gg.coord.lat
targ.goal.coords.longitude=gg.coord.lon
print "Going TO: ", gg
self.exploring=1
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
self.result_counter=0
self.explodist=0
else:
print "Done Exploring"
self.exploring = 0
elif k == ord('y'):
vwp = []
for i in self.visited_wp:
vwp.append(i.name)
yml = yaml.safe_dump(vwp, default_flow_style=False)
fh = open("visited.yaml", "w")
s_output = str(yml)
fh.write(s_output)
fh.close
elif k == ord('l'):
print "loading visited"
with open("visited.yaml", 'r') as f:
visited = yaml.load(f)
for i in visited:
for l in self.topo_map.waypoints:
if i == l.name:
self.visited_wp.append(l)
break
elif k == ord('a'):
self.grid.calculate_mean_grid()
self.draw_means()
self.draw_mode="means"
elif k == ord('p'):
self.pause_exp= not self.pause_exp
elif k == ord('c'):
print self.grid.limits
print "Area: ", self.grid.calculate_area(self.grid.limits)
print "Area of Area: ", self.grid.area.area_size
colours=['magenta','cyan', 'grey','white','red','yellow','green','blue']
nc=0
for j in self.grid.area_splits:
print j.area_size
#self.limits_canvas.draw_coordinate(j.centre, 'crimson', size=3, thickness=2)
for i in j.limit_lines:
#self.limits_canvas.draw_line(i, colours[nc], thickness=1)
self.limits_canvas.draw_line(i, 'white', thickness=1)
if nc < len(colours)-1:
nc+=1
else:
nc=0
self.redraw()
elif k== ord('r'):
#diff = (self.grid.models[1].output - self.grid.models[0].output)
#print np.mean(diff), np.std(diff), diff.dtype
print self.get_errors()
elif k== ord('o'):
print self.results
outfile = self.expid + '.yaml'
#print self.data_out
yml = yaml.safe_dump(self.results, default_flow_style=False)
fh = open(outfile, "w")
s_output = str(yml)
#print s_output
fh.write(s_output)
fh.close
def get_errors(self):
error_chain=[]
shapeo = self.grid.models[0].output.shape
#print vals
print "Waiting for Service"
rospy.wait_for_service('/compare_model')
compare_serv = rospy.ServiceProxy('/compare_model', CompareModels)
for i in range(self.n_models):
try:
d={}
print "going for it ", i
vals = np.reshape(self.grid.models[i].output, -1)
resp1 = compare_serv('kriging', i, shapeo[0], shapeo[1], vals.tolist())
d['name']= self.grid.models[i].name
d['type']= 'kriging'
d['errors']={}
d['errors']['error']=resp1.error
d['errors']['mse']=resp1.mse
d['errors']['std']=resp1.std
d['errors']['var']=resp1.var
#print resp1
error_chain.append(d)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
try:
d={}
print "Mean "
vals = np.reshape(self.grid.mean_output, -1)
resp1 = compare_serv('mean', 0, shapeo[0], shapeo[1], vals.tolist())
#print self.grid.mean_output
d['name']= 'mean'
d['type']= 'mean'
d['errors']={}
d['errors']['error']=resp1.error
d['errors']['mse']=resp1.mse
d['errors']['std']=resp1.std
d['errors']['var']=resp1.var
#print resp1
error_chain.append(d)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return error_chain
def signal_handler(self, signal, frame):
self.running = False
print('You pressed Ctrl+C!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--cell_size", type=int, default=10,
help="cell size in meters")
parser.add_argument("--initial_percent", type=float, default=0.05,
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--limits_file", type=str, default='limits.coords',
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--initial_waypoint", type=str, default='WayPoint498',
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--area_coverage_type", type=str, default='area_split',
help="Type of area coverage, random or area_split")
parser.add_argument("--experiment_name", type=str, default='exp1',
help="Experiment ID")
args = parser.parse_args()
rospy.init_node('kriging_exploration')
#Explorator(53.261685, -0.527158, 16, 640, args.cell_size)
#Explorator(53.267213, -0.533420, 17, 640, args) #Football Field
Explorator(53.261576, -0.526648, 17, 640, args) #Half cosmos field
#Explorator(53.261685, -0.525158, 17, 640, args) #COSMOS Field
| mit | 7,337,803,448,498,687,000 | 3,066,014,644,435,228,000 | 39.694048 | 455 | 0.537519 | false |
ntt-sic/cinder | cinder/api/contrib/hosts.py | 3 | 10182 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from oslo.config import cfg
import webob.exc
from xml.parsers import expat
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume import api as volume_api
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('service-status')
elem.set('service')
elem.set('zone')
elem.set('service-state')
elem.set('host_name')
elem.set('last-update')
return xmlutil.MasterTemplate(root, 1)
class HostUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('status')
return xmlutil.MasterTemplate(root, 1)
class HostActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = utils.safe_minidom_parse_string(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
updates = {}
for child in node.childNodes[0].childNodes:
updates[child.tagName] = self.extract_text(child)
return dict(body=updates)
def _list_hosts(req, service=None):
"""Returns a summary list of hosts."""
curr_time = timeutils.utcnow()
context = req.environ['cinder.context']
services = db.service_get_all(context, False)
zone = ''
if 'zone' in req.GET:
zone = req.GET['zone']
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for host in services:
delta = curr_time - (host['updated_at'] or host['created_at'])
alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
status = (alive and "available") or "unavailable"
active = 'enabled'
if host['disabled']:
active = 'disabled'
LOG.debug('status, active and update: %s, %s, %s' %
(status, active, host['updated_at']))
hosts.append({'host_name': host['host'],
'service': host['topic'],
'zone': host['availability_zone'],
'service-status': status,
'service-state': active,
'last-update': host['updated_at']})
if service:
hosts = [host for host in hosts
if host["service"] == service]
return hosts
def check_host(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, service=None, *args, **kwargs):
listed_hosts = _list_hosts(req, service)
hosts = [h["host_name"] for h in listed_hosts]
if id in hosts:
return fn(self, req, id, *args, **kwargs)
else:
message = _("Host '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=message)
return wrapped
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = volume_api.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
authorize(req.environ['cinder.context'])
return {'hosts': _list_hosts(req)}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostDeserializer)
@check_host
def update(self, req, id, body):
authorize(req.environ['cinder.context'])
update_values = {}
for raw_key, raw_val in body.iteritems():
key = raw_key.lower().strip()
val = raw_val.lower().strip()
if key == "status":
if val in ("enable", "disable"):
update_values['status'] = val.startswith("enable")
else:
explanation = _("Invalid status: '%s'") % raw_val
raise webob.exc.HTTPBadRequest(explanation=explanation)
else:
explanation = _("Invalid update setting: '%s'") % raw_key
raise webob.exc.HTTPBadRequest(explanation=explanation)
update_setters = {'status': self._set_enabled_status}
result = {}
for key, value in update_values.iteritems():
result.update(update_setters[key](req, id, value))
return result
def _set_enabled_status(self, req, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
context = req.environ['cinder.context']
state = "enabled" if enabled else "disabled"
LOG.audit(_("Setting host %(host)s to %(state)s."),
{'host': host, 'state': state})
result = self.api.set_host_enabled(context,
host=host,
enabled=enabled)
if result not in ("enabled", "disabled"):
# An error message was returned
raise webob.exc.HTTPBadRequest(explanation=result)
return {"host": host, "status": result}
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the volume usage info given by hosts.
:param context: security context
:param host: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'volume_count': 1, 'total_volume_gb': 2048}
"""
host = id
context = req.environ['cinder.context']
if not context.is_admin:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
try:
host_ref = db.service_get_by_host_and_topic(context,
host,
CONF.volume_topic)
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
# Getting total available/used resource
# TODO(jdg): Add summary info for Snapshots
volume_refs = db.volume_get_all_by_host(context, host_ref['host'])
(count, sum) = db.volume_data_get_for_host(context,
host_ref['host'])
snap_count_total = 0
snap_sum_total = 0
resources = [{'resource': {'host': host, 'project': '(total)',
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count_total),
'total_snapshot_gb': str(snap_sum_total)}}]
project_ids = [v['project_id'] for v in volume_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
(count, sum) = db.volume_data_get_for_project(context, project_id)
(snap_count, snap_sum) = db.snapshot_data_get_for_project(
context,
project_id)
resources.append(
{'resource':
{'host': host,
'project': project_id,
'volume_count': str(count),
'total_volume_gb': str(sum),
'snapshot_count': str(snap_count),
'total_snapshot_gb': str(snap_sum)}})
snap_count_total += int(snap_count)
snap_sum_total += int(snap_sum)
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total)
return {"host": resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration"""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/volume/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={
'update': 'PUT'},
member_actions={
'startup': 'GET',
'shutdown': 'GET',
'reboot': 'GET'})]
return resources
| apache-2.0 | -5,824,751,601,370,955,000 | -2,715,119,986,384,955,400 | 36.851301 | 78 | 0.560794 | false |
azumimuo/family-xbmc-addon | script.module.youtube.dl/lib/youtube_dl/extractor/vyborymos.py | 73 | 2031 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
class VyboryMosIE(InfoExtractor):
_VALID_URL = r'https?://vybory\.mos\.ru/(?:#precinct/|account/channels\?.*?\bstation_id=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://vybory.mos.ru/#precinct/13636',
'info_dict': {
'id': '13636',
'ext': 'mp4',
'title': 're:^Участковая избирательная комиссия №2231 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Россия, Москва, улица Введенского, 32А',
'is_live': True,
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://vybory.mos.ru/account/channels?station_id=13636',
'only_matching': True,
}]
def _real_extract(self, url):
station_id = self._match_id(url)
channels = self._download_json(
'http://vybory.mos.ru/account/channels?station_id=%s' % station_id,
station_id, 'Downloading channels JSON')
formats = []
for cam_num, (sid, hosts, name, _) in enumerate(channels, 1):
for num, host in enumerate(hosts, 1):
formats.append({
'url': 'http://%s/master.m3u8?sid=%s' % (host, sid),
'ext': 'mp4',
'format_id': 'camera%d-host%d' % (cam_num, num),
'format_note': '%s, %s' % (name, host),
})
info = self._download_json(
'http://vybory.mos.ru/json/voting_stations/%s/%s.json'
% (compat_str(station_id)[:3], station_id),
station_id, 'Downloading station JSON', fatal=False)
return {
'id': station_id,
'title': self._live_title(info['name'] if info else station_id),
'description': info.get('address'),
'is_live': True,
'formats': formats,
}
| gpl-2.0 | 8,975,216,212,677,840,000 | -7,918,736,544,100,772,000 | 34.8 | 113 | 0.506348 | false |
mekanix/geonode | geonode/people/tests.py | 19 | 2719 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib.sites.models import Site
class PeopleTest(TestCase):
fixtures = ('people_data.json', 'bobby.json')
def test_forgot_username(self):
url = reverse('forgot_username')
# page renders
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
# and responds for a bad email
response = self.client.post(url, data={
'email': '[email protected]'
})
# self.assertContains(response, "No user could be found with that email address.")
admin = get_user_model().objects.get(username='bobby')
response = self.client.post(url, data={
'email': admin.email
})
# and sends a mail for a good one
self.assertEqual(len(mail.outbox), 1)
site = Site.objects.get_current()
# Verify that the subject of the first message is correct.
self.assertEqual(mail.outbox[0].subject, "Your username for " + site.name)
def test_account_email_sync(self):
'''verify we can create an account and modify it keeping emails in sync'''
from geonode.people.models import Profile
email = '[email protected]'
joebob = Profile.objects.create(username='joebob', email=email)
self.assertEqual(joebob.emailaddress_set.get(primary=True).email, email)
email = '[email protected]'
joebob.email = email
joebob.save()
self.assertEqual(joebob.emailaddress_set.get(primary=True).email, email)
email = joebob.emailaddress_set.get(primary=True)
email.email = '[email protected]'
email.save()
joebob = Profile.objects.get(id=joebob.id)
self.assertEqual(email.email, joebob.email)
| gpl-3.0 | 5,243,792,092,096,495,000 | -6,129,293,843,217,874,000 | 35.253333 | 90 | 0.638102 | false |
jkonecny12/anaconda | pyanaconda/modules/network/nm_client.py | 1 | 63957 | #
# utility functions using libnm
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import gi
gi.require_version("NM", "1.0")
from gi.repository import NM
import socket
from queue import Queue, Empty
from pykickstart.constants import BIND_TO_MAC
from pyanaconda.modules.network.constants import NM_CONNECTION_UUID_LENGTH, \
CONNECTION_ACTIVATION_TIMEOUT, NM_CONNECTION_TYPE_WIFI, NM_CONNECTION_TYPE_ETHERNET, \
NM_CONNECTION_TYPE_VLAN, NM_CONNECTION_TYPE_BOND, NM_CONNECTION_TYPE_TEAM, \
NM_CONNECTION_TYPE_BRIDGE, NM_CONNECTION_TYPE_INFINIBAND, CONNECTION_ADDING_TIMEOUT
from pyanaconda.modules.network.kickstart import default_ks_vlan_interface_name
from pyanaconda.modules.network.utils import is_s390, get_s390_settings, netmask2prefix, \
prefix2netmask
from pyanaconda.modules.network.config_file import is_config_file_for_system
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
NM_BRIDGE_DUMPED_SETTINGS_DEFAULTS = {
NM.SETTING_BRIDGE_MAC_ADDRESS: None,
NM.SETTING_BRIDGE_STP: True,
NM.SETTING_BRIDGE_PRIORITY: 32768,
NM.SETTING_BRIDGE_FORWARD_DELAY: 15,
NM.SETTING_BRIDGE_HELLO_TIME: 2,
NM.SETTING_BRIDGE_MAX_AGE: 20,
NM.SETTING_BRIDGE_AGEING_TIME: 300,
NM.SETTING_BRIDGE_GROUP_FORWARD_MASK: 0,
NM.SETTING_BRIDGE_MULTICAST_SNOOPING: True
}
def get_iface_from_connection(nm_client, uuid):
"""Get the name of device that would be used for the connection.
In installer it should be just one device.
We need to account also for the case of configurations bound to mac address
(HWADDR), eg network --bindto=mac command.
"""
connection = nm_client.get_connection_by_uuid(uuid)
if not connection:
return None
iface = connection.get_setting_connection().get_interface_name()
if not iface:
wired_setting = connection.get_setting_wired()
if wired_setting:
mac = wired_setting.get_mac_address()
if mac:
iface = get_iface_from_hwaddr(nm_client, mac)
return iface
def get_vlan_interface_name_from_connection(nm_client, connection):
"""Get vlan interface name from vlan connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
If no interface name is specified in the connection settings, infer the
value as <PARENT_IFACE>.<VLAN_ID> - same as NetworkManager.
"""
iface = connection.get_setting_connection().get_interface_name()
if not iface:
setting_vlan = connection.get_setting_vlan()
if setting_vlan:
vlanid = setting_vlan.get_id()
parent = setting_vlan.get_parent()
# if parent is specified by UUID
if len(parent) == NM_CONNECTION_UUID_LENGTH:
parent = get_iface_from_connection(nm_client, parent)
if vlanid is not None and parent:
iface = default_ks_vlan_interface_name(parent, vlanid)
return iface
def get_iface_from_hwaddr(nm_client, hwaddr):
"""Find the name of device specified by mac address."""
for device in nm_client.get_devices():
if device.get_device_type() in (NM.DeviceType.ETHERNET,
NM.DeviceType.WIFI):
try:
address = device.get_permanent_hw_address()
if not address:
address = device.get_hw_address()
except AttributeError as e:
log.warning("Device %s: %s", device.get_iface(), e)
address = device.get_hw_address()
else:
address = device.get_hw_address()
# per #1703152, at least in *some* case, we wind up with
# address as None here, so we need to guard against that
if address and address.upper() == hwaddr.upper():
return device.get_iface()
return None
def get_team_port_config_from_connection(nm_client, uuid):
connection = nm_client.get_connection_by_uuid(uuid)
if not connection:
return None
team_port = connection.get_setting_team_port()
if not team_port:
return None
config = team_port.get_config()
return config
def get_device_name_from_network_data(nm_client, network_data, supported_devices, bootif):
"""Get the device name from kickstart device specification.
Generally given by --device option. For vlans also --interfacename
and --vlanid comes into play.
Side effect: for vlan sets network_data.parent value from --device option
:param network_data: a kickstart device configuartion
:type network_data: kickstart NetworkData object
:param supported_devices: list of names of supported devices
:type supported_devices: list(str)
:param bootif: MAC addres of device to be used for --device=bootif specification
:type bootif: str
:returns: device name the configuration should be used for
:rtype: str
"""
spec = network_data.device
device_name = ""
msg = ""
if not spec:
msg = "device specification missing"
# Specification by device name
if spec in supported_devices:
device_name = spec
msg = "existing device found"
# Specification by mac address
elif ':' in spec:
device_name = get_iface_from_hwaddr(nm_client, spec) or ""
msg = "existing device found"
# Specification by BOOTIF boot option
elif spec == 'bootif':
if bootif:
device_name = get_iface_from_hwaddr(nm_client, bootif) or ""
msg = "existing device for {} found".format(bootif)
else:
msg = "BOOTIF value is not specified in boot options"
# First device with carrier (sorted lexicographically)
elif spec == 'link':
device_name = get_first_iface_with_link(nm_client, supported_devices) or ""
msg = "first device with link found"
if device_name:
if device_name not in supported_devices:
msg = "{} device found is not supported".format(device_name)
device_name = ""
# Virtual devices don't have to exist
elif spec and any((network_data.vlanid,
network_data.bondslaves,
network_data.teamslaves,
network_data.bridgeslaves)):
device_name = spec
msg = "virtual device does not exist, which is OK"
if network_data.vlanid:
network_data.parent = device_name
if network_data.interfacename:
device_name = network_data.interfacename
msg = "vlan device name specified by --interfacename"
else:
device_name = default_ks_vlan_interface_name(device_name, network_data.vlanid)
msg = "vlan device name inferred from parent and vlanid"
log.debug("kickstart specification --device=%s -> %s (%s)", spec, device_name, msg)
return device_name
def _create_vlan_bond_connection_from_ksdata(network_data):
con = _create_new_connection(network_data, network_data.device)
_update_bond_connection_from_ksdata(con, network_data)
# No ip configuration on vlan parent (bond)
s_ip4 = NM.SettingIP4Config.new()
s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD,
NM.SETTING_IP4_CONFIG_METHOD_DISABLED)
con.add_setting(s_ip4)
s_ip6 = NM.SettingIP6Config.new()
s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD,
NM.SETTING_IP6_CONFIG_METHOD_DISABLED)
con.add_setting(s_ip6)
return con
def _update_bond_connection_from_ksdata(connection, network_data):
"""Update connection with values from bond kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_BOND
s_bond = NM.SettingBond.new()
opts = network_data.bondopts
if opts:
for option in opts.split(';' if ';' in opts else ','):
key, _sep, value = option.partition("=")
if s_bond.validate_option(key, value):
s_bond.add_option(key, value)
else:
log.warning("ignoring invalid bond option '%s=%s'", key, value)
connection.add_setting(s_bond)
def _add_existing_virtual_device_to_bridge(nm_client, device_name, bridge_spec):
"""Add existing virtual device to a bridge.
:param device_name: name of the virtual device to be added
:type device_name: str
:param bridge_spec: specification of the bridge (interface name or connection uuid)
:type bridge_spec: str
:returns: uuid of the updated connection or None
:rtype: str
"""
supported_virtual_types = (
NM_CONNECTION_TYPE_BOND,
)
port_connection = None
cons = nm_client.get_connections()
for con in cons:
if con.get_interface_name() == device_name \
and con.get_connection_type() in supported_virtual_types:
port_connection = con
break
if not port_connection:
return None
update_connection_values(
port_connection,
[
(NM.SETTING_CONNECTION_SETTING_NAME,
NM.SETTING_CONNECTION_SLAVE_TYPE,
'bridge'),
(NM.SETTING_CONNECTION_SETTING_NAME,
NM.SETTING_CONNECTION_MASTER,
bridge_spec),
]
)
commit_changes_with_autoconnection_blocked(port_connection)
return port_connection.get_uuid()
def _update_team_connection_from_ksdata(connection, network_data):
"""Update connection with values from team kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
"""
s_con = connection.get_setting_connection()
s_con.props.type = "team"
s_team = NM.SettingTeam.new()
s_team.props.config = network_data.teamconfig
connection.add_setting(s_team)
def _update_vlan_connection_from_ksdata(connection, network_data):
"""Update connection with values from vlan kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
:returns: interface name of the device
:rtype: str
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_VLAN
if network_data.interfacename:
s_con.props.id = network_data.interfacename
s_con.props.interface_name = network_data.interfacename
else:
s_con.props.interface_name = None
s_vlan = NM.SettingVlan.new()
s_vlan.props.id = int(network_data.vlanid)
s_vlan.props.parent = network_data.parent
connection.add_setting(s_vlan)
return s_con.props.interface_name
def _update_bridge_connection_from_ksdata(connection, network_data):
"""Update connection with values from bridge kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_BRIDGE
s_bridge = NM.SettingBridge.new()
for opt in network_data.bridgeopts.split(","):
key, _sep, value = opt.partition("=")
if key in ("stp", "multicast-snooping"):
if value == "yes":
value = True
elif value == "no":
value = False
else:
try:
value = int(value)
except ValueError:
log.error("Invalid bridge option %s", opt)
continue
s_bridge.set_property(key, value)
connection.add_setting(s_bridge)
def _update_infiniband_connection_from_ksdata(connection, network_data):
"""Update connection with values from infiniband kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_INFINIBAND
s_ib = NM.SettingInfiniband.new()
s_ib.props.transport_mode = "datagram"
connection.add_setting(s_ib)
def _update_ethernet_connection_from_ksdata(connection, network_data, bound_mac):
"""Update connection with values from ethernet kickstart configuration.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
:param bound_mac: MAC address the device name is bound to (ifname=)
:type bound_mac: str
"""
s_con = connection.get_setting_connection()
s_con.props.type = NM_CONNECTION_TYPE_ETHERNET
s_wired = NM.SettingWired.new()
if bound_mac:
s_wired.props.mac_address = bound_mac
connection.add_setting(s_wired)
def _update_wired_connection_with_s390_settings(connection, s390cfg):
"""Update connection with values specific for s390 architecture.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param s390cfg: dictionary storing s390 specific settings
:type s390cfg: dict
"""
s_wired = connection.get_setting_wired()
if s390cfg['SUBCHANNELS']:
subchannels = s390cfg['SUBCHANNELS'].split(",")
s_wired.props.s390_subchannels = subchannels
if s390cfg['NETTYPE']:
s_wired.props.s390_nettype = s390cfg['NETTYPE']
if s390cfg['OPTIONS']:
opts = s390cfg['OPTIONS'].split(" ")
opts_dict = {k: v for k, v in (o.split("=") for o in opts)}
s_wired.props.s390_options = opts_dict
def _create_new_connection(network_data, device_name):
con_uuid = NM.utils_uuid_generate()
con = NM.SimpleConnection.new()
s_con = NM.SettingConnection.new()
s_con.props.uuid = con_uuid
s_con.props.id = device_name
s_con.props.interface_name = device_name
s_con.props.autoconnect = network_data.onboot
con.add_setting(s_con)
return con
def create_connections_from_ksdata(nm_client, network_data, device_name, ifname_option_values=None):
"""Create NM connections from kickstart configuration.
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
:param device_name: name of the device to be configured by kickstart
:type device_name: str
:param ifname_option_values: list of ifname boot option values
:type ifname_option_values: list(str)
:return: list of tuples (CONNECTION, NAME_OF_DEVICE_TO_BE_ACTIVATED)
:rtype: list((NM.RemoteConnection, str))
"""
ifname_option_values = ifname_option_values or []
port_connections = []
connections = []
device_to_activate = device_name
con = _create_new_connection(network_data, device_name)
bond_con = None
update_connection_ip_settings_from_ksdata(con, network_data)
# type "bond"
if network_data.bondslaves:
# vlan over bond
if network_data.vlanid:
# create bond connection, vlan connection will be created later
bond_controller = network_data.device
bond_con = _create_vlan_bond_connection_from_ksdata(network_data)
connections.append((bond_con, bond_controller))
else:
bond_controller = device_name
_update_bond_connection_from_ksdata(con, network_data)
for i, port in enumerate(network_data.bondslaves.split(","), 1):
port_con = create_port_connection('bond', i, port, bond_controller,
network_data.onboot)
bind_connection(nm_client, port_con, network_data.bindto, port)
port_connections.append((port_con, port))
# type "team"
if network_data.teamslaves:
_update_team_connection_from_ksdata(con, network_data)
for i, (port, cfg) in enumerate(network_data.teamslaves, 1):
s_team_port = NM.SettingTeamPort.new()
s_team_port.props.config = cfg
port_con = create_port_connection('team', i, port, device_name,
network_data.onboot, settings=[s_team_port])
bind_connection(nm_client, port_con, network_data.bindto, port)
port_connections.append((port_con, port))
# type "vlan"
if network_data.vlanid:
device_to_activate = _update_vlan_connection_from_ksdata(con, network_data) \
or device_to_activate
# type "bridge"
if network_data.bridgeslaves:
# bridge connection is autoactivated
_update_bridge_connection_from_ksdata(con, network_data)
for i, port in enumerate(network_data.bridgeslaves.split(","), 1):
if not _add_existing_virtual_device_to_bridge(nm_client, port, device_name):
port_con = create_port_connection('bridge', i, port, device_name,
network_data.onboot)
bind_connection(nm_client, port_con, network_data.bindto, port)
port_connections.append((port_con, port))
# type "infiniband"
if is_infiniband_device(nm_client, device_name):
_update_infiniband_connection_from_ksdata(con, network_data)
# type "802-3-ethernet"
if is_ethernet_device(nm_client, device_name):
bound_mac = bound_hwaddr_of_device(nm_client, device_name, ifname_option_values)
_update_ethernet_connection_from_ksdata(con, network_data, bound_mac)
if bound_mac:
log.debug("add connection: mac %s is bound to name %s",
bound_mac, device_name)
else:
bind_connection(nm_client, con, network_data.bindto, device_name)
# Add s390 settings
if is_s390():
s390cfg = get_s390_settings(device_name)
_update_wired_connection_with_s390_settings(con, s390cfg)
connections.append((con, device_to_activate))
connections.extend(port_connections)
return connections
def add_connection_from_ksdata(nm_client, network_data, device_name, activate=False,
ifname_option_values=None):
"""Add NM connection created from kickstart configuration.
:param network_data: kickstart configuration
:type network_data: pykickstart NetworkData
:param device_name: name of the device to be configured by kickstart
:type device_name: str
:param activate: activate the added connection
:type activate: bool
:param ifname_option_values: list of ifname boot option values
:type ifname_option_values: list(str)
"""
connections = create_connections_from_ksdata(
nm_client,
network_data,
device_name,
ifname_option_values
)
for connection, device_name in connections:
log.debug("add connection (activate=%s): %s for %s\n%s",
activate, connection.get_uuid(), device_name,
connection.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
added_connection = add_connection_sync(
nm_client,
connection,
)
if not added_connection:
continue
if activate:
if device_name:
device = nm_client.get_device_by_iface(device_name)
if device:
log.debug("activating with device %s", device.get_iface())
else:
log.debug("activating without device specified - device %s not found",
device_name)
else:
device = None
log.debug("activating without device specified")
nm_client.activate_connection_async(added_connection, device, None, None)
return connections
def add_connection_sync(nm_client, connection):
"""Add a connection synchronously and optionally activate asynchronously.
:param connection: connection to be added
:type connection: NM.SimpleConnection
:return: added connection or None on timeout
:rtype: NM.RemoteConnection
"""
sync_queue = Queue()
def finish_callback(nm_client, result, sync_queue):
con, result = nm_client.add_connection2_finish(result)
log.debug("connection %s added:\n%s", con.get_uuid(),
con.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
sync_queue.put(con)
nm_client.add_connection2(
connection.to_dbus(NM.ConnectionSerializationFlags.ALL),
(NM.SettingsAddConnection2Flags.TO_DISK |
NM.SettingsAddConnection2Flags.BLOCK_AUTOCONNECT),
None,
False,
None,
finish_callback,
sync_queue
)
try:
ret = sync_queue.get(timeout=CONNECTION_ADDING_TIMEOUT)
except Empty:
log.error("Adding of connection %s timed out.", connection.get_uuid())
ret = None
return ret
def create_port_connection(port_type, port_idx, port, controller, autoconnect, settings=None):
"""Create a port NM connection for virtual connection (bond, team, bridge).
:param port_type: type of port ("bond", "team", "bridge")
:type port_type: str
:param port_idx: index of the port for naming
:type port_idx: int
:param port: port's device name
:type port: str
:param controller: port's controller device name
:type controller: str
:param autoconnect: connection autoconnect value
:type autoconnect: bool
:param settings: list of other settings to be added to the connection
:type settings: list(NM.Setting)
:return: created connection
:rtype: NM.SimpleConnection
"""
settings = settings or []
port_name = "%s_slave_%d" % (controller, port_idx)
con = NM.SimpleConnection.new()
s_con = NM.SettingConnection.new()
s_con.props.uuid = NM.utils_uuid_generate()
s_con.props.id = port_name
s_con.props.slave_type = port_type
s_con.props.master = controller
s_con.props.type = NM_CONNECTION_TYPE_ETHERNET
s_con.props.autoconnect = autoconnect
con.add_setting(s_con)
s_wired = NM.SettingWired.new()
con.add_setting(s_wired)
for setting in settings:
con.add_setting(setting)
return con
def is_infiniband_device(nm_client, device_name):
"""Is the type of the device infiniband?"""
device = nm_client.get_device_by_iface(device_name)
if device and device.get_device_type() == NM.DeviceType.INFINIBAND:
return True
return False
def is_ethernet_device(nm_client, device_name):
"""Is the type of the device ethernet?"""
device = nm_client.get_device_by_iface(device_name)
if device and device.get_device_type() == NM.DeviceType.ETHERNET:
return True
return False
def is_ibft_connection(connection):
"""Is the connection generated by NM from iBFT?"""
return connection.get_id().startswith("iBFT Connection")
def bound_hwaddr_of_device(nm_client, device_name, ifname_option_values):
"""Check and return mac address of device bound by device renaming.
For example ifname=ens3:f4:ce:46:2c:44:7a should bind the device name ens3
to the MAC address (and rename the device in initramfs eventually). If
hwaddress of the device devname is the same as the MAC address, its value
is returned.
:param device_name: device name
:type device_name: str
:param ifname_option_values: list of ifname boot option values
:type ifname_option_values: list(str)
:return: hwaddress of the device if bound, or None
:rtype: str or None
"""
for ifname_value in ifname_option_values:
iface, mac = ifname_value.split(":", 1)
if iface == device_name:
if iface == get_iface_from_hwaddr(nm_client, mac):
return mac.upper()
else:
log.warning("MAC address of ifname %s does not correspond to ifname=%s",
iface, ifname_value)
return None
def update_connection_from_ksdata(nm_client, connection, network_data, device_name,
ifname_option_values=None):
"""Update NM connection specified by uuid from kickstart configuration.
:param connection: existing NetworkManager connection to be updated
:type connection: NM.RemoteConnection
:param network_data: kickstart network configuration
:type network_data: pykickstart NetworkData
:param device_name: device name the connection should be bound to eventually
:type device_name: str
:param ifname_option_values: list of ifname boot option values
:type ifname_option_values: list(str)
"""
log.debug("updating connection %s:\n%s", connection.get_uuid(),
connection.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
ifname_option_values = ifname_option_values or []
# IP configuration
update_connection_ip_settings_from_ksdata(connection, network_data)
s_con = connection.get_setting_connection()
s_con.set_property(NM.SETTING_CONNECTION_AUTOCONNECT, network_data.onboot)
if connection.get_connection_type() not in (NM_CONNECTION_TYPE_BOND,
NM_CONNECTION_TYPE_TEAM,
NM_CONNECTION_TYPE_VLAN,
NM_CONNECTION_TYPE_BRIDGE):
bound_mac = bound_hwaddr_of_device(nm_client, device_name, ifname_option_values)
if bound_mac:
log.debug("update connection: mac %s is bound to name %s", bound_mac, device_name)
# The connection is already bound to iface name by NM in initramfs,
# still bind also to MAC until this method of renaming is abandoned (rhbz#1875485)
bind_connection(nm_client, connection, BIND_TO_MAC, device_name,
bind_exclusively=False)
else:
bind_connection(nm_client, connection, network_data.bindto, device_name)
commit_changes_with_autoconnection_blocked(connection)
log.debug("updated connection %s:\n%s", connection.get_uuid(),
connection.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
def update_connection_ip_settings_from_ksdata(connection, network_data):
"""Update NM connection from kickstart IP configuration in place.
:param connection: existing NetworkManager connection to be updated
:type connection: NM.RemoteConnection
:param network_data: kickstart configuation containing the IP configuration
to be applied to the connection
:type network_data: pykickstart NetworkData
"""
# ipv4 settings
if network_data.noipv4:
method4 = "disabled"
elif network_data.bootProto == "static":
method4 = "manual"
else:
method4 = "auto"
connection.remove_setting(NM.SettingIP4Config)
s_ip4 = NM.SettingIP4Config.new()
s_ip4.set_property(NM.SETTING_IP_CONFIG_METHOD, method4)
if method4 == "manual":
prefix4 = netmask2prefix(network_data.netmask)
addr4 = NM.IPAddress.new(socket.AF_INET, network_data.ip, prefix4)
s_ip4.add_address(addr4)
if network_data.gateway:
s_ip4.props.gateway = network_data.gateway
if network_data.nodefroute:
s_ip4.props.never_default = True
connection.add_setting(s_ip4)
# ipv6 settings
if network_data.noipv6:
method6 = "ignore"
elif not network_data.ipv6 or network_data.ipv6 == "auto":
method6 = "auto"
elif network_data.ipv6 == "dhcp":
method6 = "dhcp"
else:
method6 = "manual"
connection.remove_setting(NM.SettingIP6Config)
s_ip6 = NM.SettingIP6Config.new()
s_ip6.set_property(NM.SETTING_IP_CONFIG_METHOD, method6)
s_ip6.set_property(NM.SETTING_IP6_CONFIG_ADDR_GEN_MODE,
NM.SettingIP6ConfigAddrGenMode.EUI64)
if method6 == "manual":
addr6, _slash, prefix6 = network_data.ipv6.partition("/")
if prefix6:
prefix6 = int(prefix6)
else:
prefix6 = 64
addr6 = NM.IPAddress.new(socket.AF_INET6, addr6, prefix6)
s_ip6.add_address(addr6)
if network_data.ipv6gateway:
s_ip6.props.gateway = network_data.ipv6gateway
connection.add_setting(s_ip6)
# nameservers
if network_data.nameserver:
for ns in [str.strip(i) for i in network_data.nameserver.split(",")]:
if NM.utils_ipaddr_valid(socket.AF_INET6, ns):
s_ip6.add_dns(ns)
elif NM.utils_ipaddr_valid(socket.AF_INET, ns):
s_ip4.add_dns(ns)
else:
log.error("IP address %s is not valid", ns)
def bind_settings_to_mac(nm_client, s_connection, s_wired, device_name=None, bind_exclusively=True):
"""Bind the settings to the mac address of the device.
:param s_connection: connection setting to be updated
:type s_connection: NM.SettingConnection
:param s_wired: wired setting to be updated
:type s_wired: NM.SettingWired
:param device_name: name of the device to be bound
:type device_name: str
:param bind_exclusively: remove reference to the device name from the settings
:type bind_exclusively: bool
:returns: True if the settings were modified, False otherwise
:rtype: bool
"""
mac_address = s_wired.get_mac_address()
interface_name = s_connection.get_interface_name()
modified = False
if mac_address:
log.debug("Bind to mac: already bound to %s", mac_address)
else:
iface = device_name or interface_name
if not iface:
log.warning("Bind to mac: no device name provided to look for mac")
return False
device = nm_client.get_device_by_iface(iface)
if device:
try:
perm_hwaddr = device.get_permanent_hw_address()
except AttributeError:
perm_hwaddr = None
hwaddr = perm_hwaddr or device.get_hw_address()
s_wired.props.mac_address = hwaddr
log.debug("Bind to mac: bound to %s", hwaddr)
modified = True
if bind_exclusively and interface_name:
s_connection.props.interface_name = None
log.debug("Bind to mac: removed interface-name %s from connection", interface_name)
modified = True
return modified
def bind_settings_to_device(nm_client, s_connection, s_wired, device_name=None,
bind_exclusively=True):
"""Bind the settings to the name of the device.
:param s_connection: connection setting to be updated
:type s_connection: NM.SettingConnection
:param s_wired: wired setting to be updated
:type s_wired: NM.SettingWired
:param device_name: name of the device to be bound
:type device_name: str
:param bind_exclusively: remove reference to the mac address from the settings
:type bind_exclusively: bool
:returns: True if the settings were modified, False otherwise
:rtype: bool
"""
mac_address = s_wired.get_mac_address()
interface_name = s_connection.get_interface_name()
modified = False
if device_name:
s_connection.props.interface_name = device_name
log.debug("Bind to device: %s -> %s", interface_name, device_name)
modified = interface_name != device_name
else:
if not interface_name:
log.debug("Bind to device: no device to bind to")
return False
else:
log.debug("Bind to device: already bound to %s", interface_name)
if bind_exclusively and mac_address:
s_wired.props.mac_address = None
log.debug("Bind to device: removed mac-address from connection")
modified = True
return modified
def bind_connection(nm_client, connection, bindto, device_name=None, bind_exclusively=True):
"""Bind the connection to device name or mac address.
:param connection: connection to be updated before adding to NM
:type connection: NM.SimpleConnection
:param bindto: type of binding of the connection (mac address of device name)
- BIND_TO_MAC for mac address
- None for device name (default)
:type bindto: pykickstart --bindto constant
:param device_name: device name for binding
:type device_name: str
:param bind_exclusively: when binding to an entity, remove reference to the other
:type bind_exclusively: bool
:returns: True if the connection was modified, False otherwise
:rtype: bool
"""
msg = "Bind connection {} to {}:".format(connection.get_uuid(), bindto or "iface")
s_con = connection.get_setting_connection()
if not s_con:
log.warning("%s no connection settings, bailing", msg)
return False
s_wired = connection.get_setting_wired()
if bindto == BIND_TO_MAC:
if not s_wired:
log.warning("%s no wired settings, bailing", msg)
return False
modified = bind_settings_to_mac(nm_client, s_con, s_wired, device_name, bind_exclusively)
else:
modified = bind_settings_to_device(nm_client, s_con, s_wired, device_name, bind_exclusively)
return modified
def get_connections_available_for_iface(nm_client, iface):
"""Get all connections available for given interface.
:param iface: interface name
:type iface: str
:return: list of all available connections
:rtype: list(NM.RemoteConnection)
"""
cons = []
device = nm_client.get_device_by_iface(iface)
if device:
cons = device.get_available_connections()
else:
# Try also non-existing (not real) virtual devices
for device in nm_client.get_all_devices():
if not device.is_real() and device.get_iface() == iface:
cons = device.get_available_connections()
if cons:
break
else:
# Getting available connections does not seem to work quite well for
# non-real team - try to look them up in all connections.
for con in nm_client.get_connections():
interface_name = con.get_interface_name()
if not interface_name and con.get_connection_type() == NM_CONNECTION_TYPE_VLAN:
interface_name = get_vlan_interface_name_from_connection(nm_client, con)
if interface_name == iface:
cons.append(con)
return cons
def update_connection_values(connection, new_values):
"""Update setting values of a connection.
:param connection: existing NetworkManager connection to be updated
:type connection: NM.RemoteConnection
:param new_values: list of properties to be updated
:type new_values: [(SETTING_NAME, SETTING_PROPERTY, VALUE)]
"""
for setting_name, setting_property, value in new_values:
setting = connection.get_setting_by_name(setting_name)
if setting:
setting.set_property(setting_property, value)
log.debug("updating connection %s setting '%s' '%s' to '%s'",
connection.get_uuid(), setting_name, setting_property, value)
else:
log.debug("setting '%s' not found while updating connection %s",
setting_name, connection.get_uuid())
log.debug("updated connection %s:\n%s", connection.get_uuid(),
connection.to_dbus(NM.ConnectionSerializationFlags.ALL))
def devices_ignore_ipv6(nm_client, device_types):
"""All connections of devices of given type ignore ipv6."""
device_types = device_types or []
for device in nm_client.get_devices():
if device.get_device_type() in device_types:
cons = device.get_available_connections()
for con in cons:
s_ipv6 = con.get_setting_ip6_config()
if s_ipv6 and s_ipv6.get_method() != NM.SETTING_IP6_CONFIG_METHOD_IGNORE:
return False
return True
def get_first_iface_with_link(nm_client, ifaces):
"""Find first iface having link (in lexicographical order)."""
for iface in sorted(ifaces):
device = nm_client.get_device_by_iface(iface)
if device and device.get_carrier():
return device.get_iface()
return None
def get_connections_dump(nm_client):
"""Dumps all connections for logging."""
con_dumps = []
for con in nm_client.get_connections():
con_dumps.append(str(con.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS)))
return "\n".join(con_dumps)
def commit_changes_with_autoconnection_blocked(connection, save_to_disk=True):
"""Implementation of NM CommitChanges() method with blocked autoconnection.
Update2() API is used to implement the functionality (called synchronously).
Prevents autoactivation of the connection on its update which would happen
with CommitChanges if "autoconnect" is set True.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param save_to_disk: should the changes be written also to disk?
:type save_to_disk: bool
:return: on success result of the Update2() call, None of failure
:rtype: GVariant of type "a{sv}" or None
"""
sync_queue = Queue()
def finish_callback(connection, result, sync_queue):
ret = connection.update2_finish(result)
sync_queue.put(ret)
flags = NM.SettingsUpdate2Flags.BLOCK_AUTOCONNECT
if save_to_disk:
flags |= NM.SettingsUpdate2Flags.TO_DISK
con2 = NM.SimpleConnection.new_clone(connection)
connection.update2(
con2.to_dbus(NM.ConnectionSerializationFlags.ALL),
flags,
None,
None,
finish_callback,
sync_queue
)
return sync_queue.get()
def clone_connection_sync(nm_client, connection, con_id=None, uuid=None):
"""Clone a connection synchronously.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param con_id: id of the cloned connection
:type con_id: str
:param uuid: uuid of the cloned connection (None to be generated)
:type uuid: str
:return: NetworkManager connection or None on timeout
:rtype: NM.RemoteConnection
"""
sync_queue = Queue()
def finish_callback(nm_client, result, sync_queue):
con, result = nm_client.add_connection2_finish(result)
log.debug("connection %s cloned:\n%s", con.get_uuid(),
con.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS))
sync_queue.put(con)
cloned_connection = NM.SimpleConnection.new_clone(connection)
s_con = cloned_connection.get_setting_connection()
s_con.props.uuid = uuid or NM.utils_uuid_generate()
s_con.props.id = con_id or "{}-clone".format(connection.get_id())
nm_client.add_connection2(
cloned_connection.to_dbus(NM.ConnectionSerializationFlags.ALL),
(NM.SettingsAddConnection2Flags.TO_DISK |
NM.SettingsAddConnection2Flags.BLOCK_AUTOCONNECT),
None,
False,
None,
finish_callback,
sync_queue
)
try:
ret = sync_queue.get(timeout=CONNECTION_ACTIVATION_TIMEOUT)
except Empty:
log.error("Cloning of a connection timed out.")
ret = None
return ret
def get_dracut_arguments_from_connection(nm_client, connection, iface, target_ip,
hostname, ibft=False):
"""Get dracut arguments for the iface and SAN target from NM connection.
Examples of SAN: iSCSI, FCoE
The dracut arguments would activate the iface in initramfs so that the
SAN target can be attached (usually to mount root filesystem).
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface used to connect to the target
(can be none if ibft is used)
:type iface: str
:param target_ip: IP of the SAN target
:type target_ip: str
:param hostname: static hostname to be configured
:type hostname: str
:param ibft: network should be configured from ibft
:type ibft: bool
:returns: dracut arguments
:rtype: set(str)
"""
netargs = set()
if ibft:
netargs.add("rd.iscsi.ibft")
elif target_ip:
if hostname is None:
hostname = ""
if ':' in target_ip:
# Using IPv6 target IP
ipv6_arg = _get_dracut_ipv6_argument(connection, iface, hostname)
if ipv6_arg:
netargs.add(ipv6_arg)
else:
log.error("No IPv6 configuration found in connection %s", connection.get_uuid())
else:
# Using IPv4 target IP
ipv4_arg = _get_dracut_ipv4_argument(connection, iface, hostname)
if ipv4_arg:
netargs.add(ipv4_arg)
else:
log.error("No IPv4 configuration found in connection %s", connection.get_uuid())
ifname_arg = _get_dracut_ifname_argument_from_connection(connection, iface)
if ifname_arg:
netargs.add(ifname_arg)
team_arg = _get_dracut_team_argument_from_connection(nm_client, connection, iface)
if team_arg:
netargs.add(team_arg)
vlan_arg, vlan_parent_connection = _get_dracut_vlan_argument_from_connection(nm_client,
connection,
iface)
if vlan_arg:
netargs.add(vlan_arg)
# For vlan the parent connection defines the s390 znet argument values
if vlan_parent_connection:
connection = vlan_parent_connection
znet_arg = _get_dracut_znet_argument_from_connection(connection)
if znet_arg:
netargs.add(znet_arg)
return netargs
def _get_dracut_ipv6_argument(connection, iface, hostname):
"""Get dracut ip IPv6 configuration for given interface and NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:param hostname: static hostname to be configured
:type hostname: str
:returns: dracut ip argument or "" if the configuration can't be find
:rtype: set(str)
"""
argument = ""
ip6_config = connection.get_setting_ip6_config()
ip6_method = ip6_config.get_method()
if ip6_method == NM.SETTING_IP6_CONFIG_METHOD_AUTO:
argument = "ip={}:auto6".format(iface)
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_DHCP:
# Most probably not working
argument = "ip={}:dhcp6".format(iface)
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_MANUAL:
ipaddr = ""
if ip6_config.get_num_addresses() > 0:
addr = ip6_config.get_address(0)
ipaddr = "[{}/{}]".format(addr.get_address(), addr.get_prefix())
gateway = ip6_config.get_gateway() or ""
if gateway:
gateway = "[{}]".format(gateway)
if ipaddr or gateway:
argument = ("ip={}::{}::{}:{}:none".format(ipaddr, gateway, hostname, iface))
return argument
def _get_dracut_ipv4_argument(connection, iface, hostname):
"""Get dracut ip IPv4 configuration for given interface and NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:param hostname: static hostname to be configured
:type hostname: str
:returns: dracut ip argument or "" if the configuration can't be find
:rtype: str
"""
argument = ""
ip4_config = connection.get_setting_ip4_config()
ip4_method = ip4_config.get_method()
if ip4_method == NM.SETTING_IP4_CONFIG_METHOD_AUTO:
argument = "ip={}:dhcp".format(iface)
elif ip4_method == NM.SETTING_IP4_CONFIG_METHOD_MANUAL:
if ip4_config.get_num_addresses() > 0:
addr = ip4_config.get_address(0)
ip = addr.get_address()
netmask = prefix2netmask(addr.get_prefix())
gateway = ip4_config.get_gateway() or ""
argument = "ip={}::{}:{}:{}:{}:none".format(ip, gateway, netmask, hostname, iface)
return argument
def _get_dracut_ifname_argument_from_connection(connection, iface):
"""Get dracut ifname configuration for given interface and NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:returns: dracut ifname argument or "" if the configuration does not apply
:rtype: str
"""
argument = ""
wired_setting = connection.get_setting_wired()
if wired_setting:
hwaddr = wired_setting.get_mac_address()
if hwaddr:
argument = "ifname={}:{}".format(iface, hwaddr.lower())
return argument
def _get_dracut_team_argument_from_connection(nm_client, connection, iface):
"""Get dracut team configuration for given interface and NM connection.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:returns: dracut team argument or "" if the configuration does not apply
:rtype: str
"""
argument = ""
if connection.get_connection_type() == NM_CONNECTION_TYPE_TEAM:
ports = get_ports_from_connections(
nm_client,
["team"],
[iface, connection.get_uuid()]
)
port_ifaces = sorted(s_iface for _name, s_iface, _uuid in ports if s_iface)
argument = "team={}:{}".format(iface, ",".join(port_ifaces))
return argument
def _get_dracut_vlan_argument_from_connection(nm_client, connection, iface):
"""Get dracut vlan configuration for given interface and NM connection.
Returns also parent vlan connection.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param iface: network interface to be used
:type iface: str
:returns: tuple (ARGUMENT, PARENT_CONNECTION) where
ARGUMENT is dracut vlan argument or "" if the configuration does not apply
PARENT_CONNECTION is vlan parent connection of the connection
:rtype: tuple(str, NM.RemoteConnection)
"""
argument = ""
parent_con = None
if connection.get_connection_type() == NM_CONNECTION_TYPE_VLAN:
setting_vlan = connection.get_setting_vlan()
parent_spec = setting_vlan.get_parent()
parent = None
# parent can be specified by connection uuid (eg from nm-c-e)
if len(parent_spec) == NM_CONNECTION_UUID_LENGTH:
parent_con = nm_client.get_connection_by_uuid(parent_spec)
if parent_con:
# On s390 with net.ifnames=0 there is no DEVICE so use NAME
parent = parent_con.get_interface_name() or parent_con.get_id()
# parent can be specified by interface
else:
parent = parent_spec
parent_cons = get_connections_available_for_iface(nm_client, parent)
if len(parent_cons) != 1:
log.error("unexpected number of connections found for vlan parent %s",
parent_spec)
if parent_cons:
parent_con = parent_cons[0]
if parent:
argument = "vlan={}:{}".format(iface, parent)
else:
log.error("can't find parent interface of vlan device %s specified by %s",
iface, parent_spec)
if not parent_con:
log.error("can't find parent connection of vlan device %s specified by %s",
iface, parent_spec)
return argument, parent_con
def _get_dracut_znet_argument_from_connection(connection):
"""Get dracut znet (s390) configuration for given NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:returns: dracut znet argument or "" if the configuration does not apply
:rtype: str
"""
argument = ""
wired_setting = connection.get_setting_wired()
if wired_setting and is_s390():
nettype = wired_setting.get_s390_nettype()
subchannels = wired_setting.get_s390_subchannels()
if nettype and subchannels:
argument = "rd.znet={},{}".format(nettype, subchannels)
options = wired_setting.get_property(NM.SETTING_WIRED_S390_OPTIONS)
if options:
options_string = ','.join("{}={}".format(key, val) for key, val in options.items())
argument += ",{}".format(options_string)
return argument
def get_ports_from_connections(nm_client, port_types, controller_specs):
"""Get ports of controller of given type specified by uuid or interface.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param port_types: type of the port - NM setting "slave-type" value (eg. "team")
:type port_types: list(str)
:param controller_specs: a list containing sepcification of a controller:
interface name or connection uuid or both
:type controller_specs: list(str)
:returns: ports specified by name, interface and connection uuid
:rtype: set((str,str,str))
"""
ports = set()
for con in nm_client.get_connections():
if not con.get_setting_connection().get_slave_type() in port_types:
continue
if con.get_setting_connection().get_master() in controller_specs:
iface = get_iface_from_connection(nm_client, con.get_uuid())
name = con.get_id()
ports.add((name, iface, con.get_uuid()))
return ports
def get_config_file_connection_of_device(nm_client, device_name, device_hwaddr=None):
"""Find connection of the device's configuration file.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param device_name: name of the device
:type device_name: str
:param device_hwaddr: hardware address of the device
:type device_hwaddr: str
:returns: uuid of NetworkManager connection
:rtype: str
"""
cons = []
for con in nm_client.get_connections():
filename = con.get_filename() or ""
# Ignore connections from initramfs in
# /run/NetworkManager/system-connections
if not is_config_file_for_system(filename):
continue
con_type = con.get_connection_type()
if con_type == NM_CONNECTION_TYPE_ETHERNET:
# Ignore ports
if con.get_setting_connection().get_master():
continue
interface_name = con.get_interface_name()
mac_address = None
wired_setting = con.get_setting_wired()
if wired_setting:
mac_address = wired_setting.get_mac_address()
if interface_name:
if interface_name == device_name:
cons.append(con)
elif mac_address:
if device_hwaddr:
if device_hwaddr.upper() == mac_address.upper():
cons.append(con)
else:
iface = get_iface_from_hwaddr(nm_client, mac_address)
if iface == device_name:
cons.append(con)
elif is_s390():
# s390 setting generated in dracut with net.ifnames=0
# has neither DEVICE/interface-name nor HWADDR/mac-address set (#1249750)
if con.get_id() == device_name:
cons.append(con)
elif con_type in (NM_CONNECTION_TYPE_BOND, NM_CONNECTION_TYPE_TEAM,
NM_CONNECTION_TYPE_BRIDGE, NM_CONNECTION_TYPE_INFINIBAND):
if con.get_interface_name() == device_name:
cons.append(con)
elif con_type == NM_CONNECTION_TYPE_VLAN:
interface_name = get_vlan_interface_name_from_connection(nm_client, con)
if interface_name and interface_name == device_name:
cons.append(con)
if len(cons) > 1:
log.debug("Unexpected number of config files found for %s: %s", device_name,
[con.get_filename() for con in cons])
if cons:
return cons[0].get_uuid()
else:
log.debug("Config file for %s not found", device_name)
return ""
def get_kickstart_network_data(connection, nm_client, network_data_class):
"""Get kickstart data from NM connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:param network_data_class: pykickstart network command data class
:type: pykickstart BaseData
:returns: network_data object corresponding to the connection
:rtype: network_data_class object instance
"""
# no network command for non-virtual device ports
if connection.get_connection_type() not in (NM_CONNECTION_TYPE_BOND, NM_CONNECTION_TYPE_TEAM):
if connection.get_setting_connection().get_master():
return None
# no support for wireless
if connection.get_connection_type() == NM_CONNECTION_TYPE_WIFI:
return None
network_data = network_data_class()
# connection
network_data.onboot = connection.get_setting_connection().get_autoconnect()
iface = get_iface_from_connection(nm_client, connection.get_uuid())
if iface:
network_data.device = iface
_update_ip4_config_kickstart_network_data(connection, network_data)
_update_ip6_config_kickstart_network_data(connection, network_data)
_update_nameserver_kickstart_network_data(connection, network_data)
# --mtu
s_wired = connection.get_setting_wired()
if s_wired:
if s_wired.get_mtu():
network_data.mtu = s_wired.get_mtu()
# vlan
if connection.get_connection_type() == NM_CONNECTION_TYPE_VLAN:
_update_vlan_kickstart_network_data(nm_client, connection, network_data)
# bonding
if connection.get_connection_type() == NM_CONNECTION_TYPE_BOND:
_update_bond_kickstart_network_data(nm_client, iface, connection, network_data)
# bridging
if connection.get_connection_type() == NM_CONNECTION_TYPE_BRIDGE:
_update_bridge_kickstart_network_data(nm_client, iface, connection, network_data)
# teaming
if connection.get_connection_type() == NM_CONNECTION_TYPE_TEAM:
_update_team_kickstart_network_data(nm_client, iface, connection, network_data)
return network_data
def _update_nameserver_kickstart_network_data(connection, network_data):
"""Update nameserver configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
# --nameserver is used both for ipv4 and ipv6
dns_list = []
s_ip4_config = connection.get_setting_ip4_config()
if s_ip4_config:
for i in range(s_ip4_config.get_num_dns()):
dns_list.append(s_ip4_config.get_dns(i))
s_ip6_config = connection.get_setting_ip6_config()
if s_ip6_config:
for i in range(s_ip6_config.get_num_dns()):
dns_list.append(s_ip6_config.get_dns(i))
dns_str = ','.join(dns_list)
if dns_str:
network_data.nameserver = dns_str
def _update_ip4_config_kickstart_network_data(connection, network_data):
"""Update IPv4 configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
s_ip4_config = connection.get_setting_ip4_config()
if not s_ip4_config:
return
ip4_method = s_ip4_config.get_method()
if ip4_method == NM.SETTING_IP4_CONFIG_METHOD_DISABLED:
network_data.noipv4 = True
elif ip4_method == NM.SETTING_IP4_CONFIG_METHOD_AUTO:
network_data.bootProto = "dhcp"
elif ip4_method == NM.SETTING_IP4_CONFIG_METHOD_MANUAL:
network_data.bootProto = "static"
if s_ip4_config.get_num_addresses() > 0:
addr = s_ip4_config.get_address(0)
network_data.ip = addr.get_address()
netmask = prefix2netmask(addr.get_prefix())
if netmask:
network_data.netmask = netmask
gateway = s_ip4_config.get_gateway()
if gateway:
network_data.gateway = gateway
# --hostname
ip4_dhcp_hostname = s_ip4_config.get_dhcp_hostname()
if ip4_dhcp_hostname:
network_data.hostname = ip4_dhcp_hostname
def _update_ip6_config_kickstart_network_data(connection, network_data):
"""Update IPv6 configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
s_ip6_config = connection.get_setting_ip6_config()
if not s_ip6_config:
return
ip6_method = s_ip6_config.get_method()
if ip6_method == NM.SETTING_IP6_CONFIG_METHOD_DISABLED:
network_data.noipv6 = True
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_AUTO:
network_data.ipv6 = "auto"
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_DHCP:
network_data.ipv6 = "dhcp"
elif ip6_method == NM.SETTING_IP6_CONFIG_METHOD_MANUAL:
if s_ip6_config.get_num_addresses() > 0:
addr = s_ip6_config.get_address(0)
network_data.ipv6 = "{}/{}".format(addr.get_address(), addr.get_prefix())
gateway = s_ip6_config.get_gateway()
if gateway:
network_data.ipv6gateway = gateway
def _update_vlan_kickstart_network_data(nm_client, connection, network_data):
"""Update vlan configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
setting_vlan = connection.get_setting_vlan()
if setting_vlan:
interface_name = connection.get_setting_connection().get_interface_name()
vlanid = setting_vlan.get_id()
parent = setting_vlan.get_parent()
# if parent is specified by UUID
if len(parent) == NM_CONNECTION_UUID_LENGTH:
parent = get_iface_from_connection(nm_client, parent)
default_name = default_ks_vlan_interface_name(parent, vlanid)
if interface_name and interface_name != default_name:
network_data.interfacename = interface_name
network_data.vlanid = vlanid
network_data.device = parent
def _update_bond_kickstart_network_data(nm_client, iface, connection, network_data):
"""Update bond configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
ports = get_ports_from_connections(
nm_client,
['bond'],
[iface, connection.get_uuid()]
)
if ports:
port_ifaces = sorted(s_iface for _name, s_iface, _uuid in ports if s_iface)
network_data.bondslaves = ",".join(port_ifaces)
s_bond = connection.get_setting_bond()
if s_bond:
option_list = []
for i in range(s_bond.get_num_options()):
_result, _name, _value = s_bond.get_option(i)
if _result:
option_list.append("{}={}".format(_name, _value))
if option_list:
network_data.bondopts = ",".join(option_list)
def _update_bridge_kickstart_network_data(nm_client, iface, connection, network_data):
"""Update bridge configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
ports = get_ports_from_connections(
nm_client,
['bridge'],
[iface, connection.get_uuid()]
)
if ports:
port_ifaces = sorted(s_iface for _name, s_iface, _uuid in ports if s_iface)
network_data.bridgeslaves = ",".join(port_ifaces)
s_bridge = connection.get_setting_bridge()
if s_bridge:
bridge_options = []
for setting, default_value in NM_BRIDGE_DUMPED_SETTINGS_DEFAULTS.items():
value = s_bridge.get_property(setting)
if value != default_value:
bridge_options.append("{}={}".format(setting, value))
if bridge_options:
network_data.bridgeopts = ",".join(bridge_options)
def _update_team_kickstart_network_data(nm_client, iface, connection, network_data):
"""Update team configuration of network data from connection.
:param connection: NetworkManager connection
:type connection: NM.RemoteConnection
:param network_data: kickstart configuration to be modified
:type network_data: pykickstart NetworkData
"""
ports = get_ports_from_connections(
nm_client,
['team'],
[iface, connection.get_uuid()]
)
if ports:
port_list = sorted((s_iface, s_uuid) for _name, s_iface, s_uuid in ports if s_iface)
for s_iface, s_uuid in port_list:
team_port_cfg = get_team_port_config_from_connection(nm_client, s_uuid) or ""
network_data.teamslaves.append((s_iface, team_port_cfg))
s_team = connection.get_setting_team()
if s_team:
teamconfig = s_team.get_config()
if teamconfig:
network_data.teamconfig = teamconfig.replace("\n", "").replace(" ", "")
| gpl-2.0 | -337,976,526,372,481,600 | -8,727,957,566,787,740,000 | 37.551537 | 100 | 0.647388 | false |
Glorf/servo | tests/wpt/web-platform-tests/tools/py/testing/path/test_local.py | 160 | 29652 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import py
import pytest
import os, sys
from py.path import local
import common
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
failsonjywin32 = py.test.mark.xfail("sys.platform.startswith('java') "
"and getattr(os, '_name', None) == 'nt'")
win32only = py.test.mark.skipif(
"not (sys.platform == 'win32' or getattr(os, '_name', None) == 'nt')")
skiponwin32 = py.test.mark.skipif(
"sys.platform == 'win32' or getattr(os, '_name', None) == 'nt'")
def pytest_funcarg__path1(request):
def setup():
path1 = request.getfuncargvalue("tmpdir")
common.setuptestfs(path1)
return path1
def teardown(path1):
# post check
assert path1.join("samplefile").check()
return request.cached_setup(setup, teardown, scope="session")
class TestLocalPath(common.CommonFSTests):
def test_join_normpath(self, tmpdir):
assert tmpdir.join(".") == tmpdir
p = tmpdir.join("../%s" % tmpdir.basename)
assert p == tmpdir
p = tmpdir.join("..//%s/" % tmpdir.basename)
assert p == tmpdir
@skiponwin32
def test_dirpath_abs_no_abs(self, tmpdir):
p = tmpdir.join('foo')
assert p.dirpath('/bar') == tmpdir.join('bar')
assert tmpdir.dirpath('/bar', abs=True) == py.path.local('/bar')
def test_gethash(self, tmpdir):
md5 = py.builtin._tryimport('md5', 'hashlib').md5
lib = py.builtin._tryimport('sha', 'hashlib')
sha = getattr(lib, 'sha1', getattr(lib, 'sha', None))
fn = tmpdir.join("testhashfile")
data = 'hello'.encode('ascii')
fn.write(data, mode="wb")
assert fn.computehash("md5") == md5(data).hexdigest()
assert fn.computehash("sha1") == sha(data).hexdigest()
py.test.raises(ValueError, fn.computehash, "asdasd")
def test_remove_removes_readonly_file(self, tmpdir):
readonly_file = tmpdir.join('readonly').ensure()
readonly_file.chmod(0)
readonly_file.remove()
assert not readonly_file.check(exists=1)
def test_remove_removes_readonly_dir(self, tmpdir):
readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
readonly_dir.chmod(int("500", 8))
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_removes_dir_and_readonly_file(self, tmpdir):
readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
readonly_file = readonly_dir.join('readonlyfile').ensure()
readonly_file.chmod(0)
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_routes_ignore_errors(self, tmpdir, monkeypatch):
l = []
monkeypatch.setattr(py.std.shutil, 'rmtree',
lambda *args, **kwargs: l.append(kwargs))
tmpdir.remove()
assert not l[0]['ignore_errors']
for val in (True, False):
l[:] = []
tmpdir.remove(ignore_errors=val)
assert l[0]['ignore_errors'] == val
def test_initialize_curdir(self):
assert str(local()) == py.std.os.getcwd()
@skiponwin32
def test_chdir_gone(self, path1):
p = path1.ensure("dir_to_be_removed", dir=1)
p.chdir()
p.remove()
pytest.raises(py.error.ENOENT, py.path.local)
assert path1.chdir() is None
assert os.getcwd() == str(path1)
def test_as_cwd(self, path1):
dir = path1.ensure("subdir", dir=1)
old = py.path.local()
with dir.as_cwd() as x:
assert x == old
assert py.path.local() == dir
assert os.getcwd() == str(old)
def test_as_cwd_exception(self, path1):
old = py.path.local()
dir = path1.ensure("subdir", dir=1)
with pytest.raises(ValueError):
with dir.as_cwd():
raise ValueError()
assert old == py.path.local()
def test_initialize_reldir(self, path1):
with path1.as_cwd():
p = local('samplefile')
assert p.check()
@pytest.mark.xfail("sys.version_info < (2,6) and sys.platform == 'win32'")
def test_tilde_expansion(self, monkeypatch, tmpdir):
monkeypatch.setenv("HOME", str(tmpdir))
p = py.path.local("~", expanduser=True)
assert p == os.path.expanduser("~")
def test_eq_with_strings(self, path1):
path1 = path1.join('sampledir')
path2 = str(path1)
assert path1 == path2
assert path2 == path1
path3 = path1.join('samplefile')
assert path3 != path2
assert path2 != path3
def test_eq_with_none(self, path1):
assert path1 != None
def test_gt_with_strings(self, path1):
path2 = path1.join('sampledir')
path3 = str(path1.join("ttt"))
assert path3 > path2
assert path2 < path3
assert path2 < "ttt"
assert "ttt" > path2
path4 = path1.join("aaa")
l = [path2, path4,path3]
assert sorted(l) == [path4, path2, path3]
def test_open_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
with p.open("w", ensure=1) as f:
f.write("hello")
assert p.read() == "hello"
def test_write_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
p.write("hello", ensure=1)
assert p.read() == "hello"
@py.test.mark.multi(bin=(False, True))
def test_dump(self, tmpdir, bin):
path = tmpdir.join("dumpfile%s" % int(bin))
try:
d = {'answer' : 42}
path.dump(d, bin=bin)
f = path.open('rb+')
dnew = py.std.pickle.load(f)
assert d == dnew
finally:
f.close()
@failsonjywin32
def test_setmtime(self):
import tempfile
import time
try:
fd, name = tempfile.mkstemp()
py.std.os.close(fd)
except AttributeError:
name = tempfile.mktemp()
open(name, 'w').close()
try:
mtime = int(time.time())-100
path = local(name)
assert path.mtime() != mtime
path.setmtime(mtime)
assert path.mtime() == mtime
path.setmtime()
assert path.mtime() != mtime
finally:
py.std.os.remove(name)
def test_normpath(self, path1):
new1 = path1.join("/otherdir")
new2 = path1.join("otherdir")
assert str(new1) == str(new2)
def test_mkdtemp_creation(self):
d = local.mkdtemp()
try:
assert d.check(dir=1)
finally:
d.remove(rec=1)
def test_tmproot(self):
d = local.mkdtemp()
tmproot = local.get_temproot()
try:
assert d.check(dir=1)
assert d.dirpath() == tmproot
finally:
d.remove(rec=1)
def test_chdir(self, tmpdir):
old = local()
try:
res = tmpdir.chdir()
assert str(res) == str(old)
assert py.std.os.getcwd() == str(tmpdir)
finally:
old.chdir()
def test_ensure_filepath_withdir(self, tmpdir):
newfile = tmpdir.join('test1','test')
newfile.ensure()
assert newfile.check(file=1)
newfile.write("42")
newfile.ensure()
s = newfile.read()
assert s == "42"
def test_ensure_filepath_withoutdir(self, tmpdir):
newfile = tmpdir.join('test1file')
t = newfile.ensure()
assert t == newfile
assert newfile.check(file=1)
def test_ensure_dirpath(self, tmpdir):
newfile = tmpdir.join('test1','testfile')
t = newfile.ensure(dir=1)
assert t == newfile
assert newfile.check(dir=1)
def test_init_from_path(self, tmpdir):
l = local()
l2 = local(l)
assert l2 == l
wc = py.path.svnwc('.')
l3 = local(wc)
assert l3 is not wc
assert l3.strpath == wc.strpath
assert not hasattr(l3, 'commit')
@py.test.mark.xfail(run=False, reason="unreliable est for long filenames")
def test_long_filenames(self, tmpdir):
if sys.platform == "win32":
py.test.skip("win32: work around needed for path length limit")
# see http://codespeak.net/pipermail/py-dev/2008q2/000922.html
# testing paths > 260 chars (which is Windows' limitation, but
# depending on how the paths are used), but > 4096 (which is the
# Linux' limitation) - the behaviour of paths with names > 4096 chars
# is undetermined
newfilename = '/test' * 60
l = tmpdir.join(newfilename)
l.ensure(file=True)
l.write('foo')
l2 = tmpdir.join(newfilename)
assert l2.read() == 'foo'
def test_visit_depth_first(self, tmpdir):
p1 = tmpdir.ensure("a","1")
p2 = tmpdir.ensure("b","2")
p3 = tmpdir.ensure("breadth")
l = list(tmpdir.visit(lambda x: x.check(file=1)))
assert len(l) == 3
# check that breadth comes last
assert l[2] == p3
def test_visit_rec_fnmatch(self, tmpdir):
p1 = tmpdir.ensure("a","123")
p2 = tmpdir.ensure(".b","345")
l = list(tmpdir.visit("???", rec="[!.]*"))
assert len(l) == 1
# check that breadth comes last
assert l[0] == p1
def test_fnmatch_file_abspath(self, tmpdir):
b = tmpdir.join("a", "b")
assert b.fnmatch(os.sep.join("ab"))
pattern = os.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
def test_sysfind(self):
name = sys.platform == "win32" and "cmd" or "test"
x = py.path.local.sysfind(name)
assert x.check(file=1)
assert py.path.local.sysfind('jaksdkasldqwe') is None
assert py.path.local.sysfind(name, paths=[]) is None
x2 = py.path.local.sysfind(name, paths=[x.dirpath()])
assert x2 == x
class TestExecutionOnWindows:
pytestmark = win32only
def test_sysfind_bat_exe_before(self, tmpdir, monkeypatch):
monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep)
tmpdir.ensure("hello")
h = tmpdir.ensure("hello.bat")
x = py.path.local.sysfind("hello")
assert x == h
class TestExecution:
pytestmark = skiponwin32
def test_sysfind_no_permisson_ignored(self, monkeypatch, tmpdir):
noperm = tmpdir.ensure('noperm', dir=True)
monkeypatch.setenv("PATH", noperm, prepend=":")
noperm.chmod(0)
assert py.path.local.sysfind('jaksdkasldqwe') is None
def test_sysfind_absolute(self):
x = py.path.local.sysfind('test')
assert x.check(file=1)
y = py.path.local.sysfind(str(x))
assert y.check(file=1)
assert y == x
def test_sysfind_multiple(self, tmpdir, monkeypatch):
monkeypatch.setenv('PATH',
"%s:%s" % (tmpdir.ensure('a'),
tmpdir.join('b')),
prepend=":")
tmpdir.ensure('b', 'a')
checker = lambda x: x.dirpath().basename == 'b'
x = py.path.local.sysfind('a', checker=checker)
assert x.basename == 'a'
assert x.dirpath().basename == 'b'
checker = lambda x: None
assert py.path.local.sysfind('a', checker=checker) is None
def test_sysexec(self):
x = py.path.local.sysfind('ls')
out = x.sysexec('-a')
for x in py.path.local().listdir():
assert out.find(x.basename) != -1
def test_sysexec_failing(self):
x = py.path.local.sysfind('false')
py.test.raises(py.process.cmdexec.Error, """
x.sysexec('aksjdkasjd')
""")
def test_make_numbered_dir(self, tmpdir):
tmpdir.ensure('base.not_an_int', dir=1)
for i in range(10):
numdir = local.make_numbered_dir(prefix='base.', rootdir=tmpdir,
keep=2, lock_timeout=0)
assert numdir.check()
assert numdir.basename == 'base.%d' %i
if i>=1:
assert numdir.new(ext=str(i-1)).check()
if i>=2:
assert numdir.new(ext=str(i-2)).check()
if i>=3:
assert not numdir.new(ext=str(i-3)).check()
def test_make_numbered_dir_NotImplemented_Error(self, tmpdir, monkeypatch):
def notimpl(x, y):
raise NotImplementedError(42)
monkeypatch.setattr(py.std.os, 'symlink', notimpl)
x = tmpdir.make_numbered_dir(rootdir=tmpdir, lock_timeout=0)
assert x.relto(tmpdir)
assert x.check()
def test_locked_make_numbered_dir(self, tmpdir):
for i in range(10):
numdir = local.make_numbered_dir(prefix='base2.', rootdir=tmpdir,
keep=2)
assert numdir.check()
assert numdir.basename == 'base2.%d' %i
for j in range(i):
assert numdir.new(ext=str(j)).check()
def test_error_preservation(self, path1):
py.test.raises (EnvironmentError, path1.join('qwoeqiwe').mtime)
py.test.raises (EnvironmentError, path1.join('qwoeqiwe').read)
#def test_parentdirmatch(self):
# local.parentdirmatch('std', startmodule=__name__)
#
class TestImport:
def test_pyimport(self, path1):
obj = path1.join('execfile.py').pyimport()
assert obj.x == 42
assert obj.__name__ == 'execfile'
def test_pyimport_renamed_dir_creates_mismatch(self, tmpdir):
p = tmpdir.ensure("a", "test_x123.py")
p.pyimport()
tmpdir.join("a").move(tmpdir.join("b"))
pytest.raises(tmpdir.ImportMismatchError,
lambda: tmpdir.join("b", "test_x123.py").pyimport())
def test_pyimport_messy_name(self, tmpdir):
# http://bitbucket.org/hpk42/py-trunk/issue/129
path = tmpdir.ensure('foo__init__.py')
obj = path.pyimport()
def test_pyimport_dir(self, tmpdir):
p = tmpdir.join("hello_123")
p_init = p.ensure("__init__.py")
m = p.pyimport()
assert m.__name__ == "hello_123"
m = p_init.pyimport()
assert m.__name__ == "hello_123"
def test_pyimport_execfile_different_name(self, path1):
obj = path1.join('execfile.py').pyimport(modname="0x.y.z")
assert obj.x == 42
assert obj.__name__ == '0x.y.z'
def test_pyimport_a(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('a.py').pyimport()
assert mod.result == "got it"
assert mod.__name__ == 'otherdir.a'
def test_pyimport_b(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('b.py').pyimport()
assert mod.stuff == "got it"
assert mod.__name__ == 'otherdir.b'
def test_pyimport_c(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('c.py').pyimport()
assert mod.value == "got it"
def test_pyimport_d(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('d.py').pyimport()
assert mod.value2 == "got it"
def test_pyimport_and_import(self, tmpdir):
tmpdir.ensure('xxxpackage', '__init__.py')
mod1path = tmpdir.ensure('xxxpackage', 'module1.py')
mod1 = mod1path.pyimport()
assert mod1.__name__ == 'xxxpackage.module1'
from xxxpackage import module1
assert module1 is mod1
def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir):
name = 'pointsback123'
ModuleType = type(py.std.os)
p = tmpdir.ensure(name + '.py')
for ending in ('.pyc', '$py.class', '.pyo'):
mod = ModuleType(name)
pseudopath = tmpdir.ensure(name+ending)
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
newmod = p.pyimport()
assert mod == newmod
monkeypatch.undo()
mod = ModuleType(name)
pseudopath = tmpdir.ensure(name+"123.py")
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
excinfo = py.test.raises(pseudopath.ImportMismatchError,
"p.pyimport()")
modname, modfile, orig = excinfo.value.args
assert modname == name
assert modfile == pseudopath
assert orig == p
assert issubclass(pseudopath.ImportMismatchError, ImportError)
def test_issue131_pyimport_on__init__(self, tmpdir):
# __init__.py files may be namespace packages, and thus the
# __file__ of an imported module may not be ourselves
# see issue
p1 = tmpdir.ensure("proja", "__init__.py")
p2 = tmpdir.ensure("sub", "proja", "__init__.py")
m1 = p1.pyimport()
m2 = p2.pyimport()
assert m1 == m2
def test_ensuresyspath_append(self, tmpdir):
root1 = tmpdir.mkdir("root1")
file1 = root1.ensure("x123.py")
assert str(root1) not in sys.path
file1.pyimport(ensuresyspath="append")
assert str(root1) == sys.path[-1]
assert str(root1) not in sys.path[:-1]
def test_pypkgdir(tmpdir):
pkg = tmpdir.ensure('pkg1', dir=1)
pkg.ensure("__init__.py")
pkg.ensure("subdir/__init__.py")
assert pkg.pypkgpath() == pkg
assert pkg.join('subdir', '__init__.py').pypkgpath() == pkg
def test_pypkgdir_unimportable(tmpdir):
pkg = tmpdir.ensure('pkg1-1', dir=1) # unimportable
pkg.ensure("__init__.py")
subdir = pkg.ensure("subdir/__init__.py").dirpath()
assert subdir.pypkgpath() == subdir
assert subdir.ensure("xyz.py").pypkgpath() == subdir
assert not pkg.pypkgpath()
def test_isimportable():
from py._path.local import isimportable
assert not isimportable("")
assert isimportable("x")
assert isimportable("x1")
assert isimportable("x_1")
assert isimportable("_")
assert isimportable("_1")
assert not isimportable("x-1")
assert not isimportable("x:1")
def test_homedir_from_HOME(monkeypatch):
path = os.getcwd()
monkeypatch.setenv("HOME", path)
assert py.path.local._gethomedir() == py.path.local(path)
def test_homedir_not_exists(monkeypatch):
monkeypatch.delenv("HOME", raising=False)
monkeypatch.delenv("HOMEDRIVE", raising=False)
homedir = py.path.local._gethomedir()
assert homedir is None
def test_samefile(tmpdir):
assert tmpdir.samefile(tmpdir)
p = tmpdir.ensure("hello")
assert p.samefile(p)
with p.dirpath().as_cwd():
assert p.samefile(p.basename)
if sys.platform == "win32":
p1 = p.__class__(str(p).lower())
p2 = p.__class__(str(p).upper())
assert p1.samefile(p2)
def test_listdir_single_arg(tmpdir):
tmpdir.ensure("hello")
assert tmpdir.listdir("hello")[0].basename == "hello"
def test_mkdtemp_rootdir(tmpdir):
dtmp = local.mkdtemp(rootdir=tmpdir)
assert tmpdir.listdir() == [dtmp]
class TestWINLocalPath:
pytestmark = win32only
def test_owner_group_not_implemented(self, path1):
py.test.raises(NotImplementedError, "path1.stat().owner")
py.test.raises(NotImplementedError, "path1.stat().group")
def test_chmod_simple_int(self, path1):
py.builtin.print_("path1 is", path1)
mode = path1.stat().mode
# Ensure that we actually change the mode to something different.
path1.chmod(mode == 0 and 1 or 0)
try:
print(path1.stat().mode)
print(mode)
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_path_comparison_lowercase_mixed(self, path1):
t1 = path1.join("a_path")
t2 = path1.join("A_path")
assert t1 == t1
assert t1 == t2
def test_relto_with_mixed_case(self, path1):
t1 = path1.join("a_path", "fiLe")
t2 = path1.join("A_path")
assert t1.relto(t2) == "fiLe"
def test_allow_unix_style_paths(self, path1):
t1 = path1.join('a_path')
assert t1 == str(path1) + '\\a_path'
t1 = path1.join('a_path/')
assert t1 == str(path1) + '\\a_path'
t1 = path1.join('dir/a_path')
assert t1 == str(path1) + '\\dir\\a_path'
def test_sysfind_in_currentdir(self, path1):
cmd = py.path.local.sysfind('cmd')
root = cmd.new(dirname='', basename='') # c:\ in most installations
with root.as_cwd():
x = py.path.local.sysfind(cmd.relto(root))
assert x.check(file=1)
def test_fnmatch_file_abspath_posix_pattern_on_win32(self, tmpdir):
# path-matching patterns might contain a posix path separator '/'
# Test that we can match that pattern on windows.
import posixpath
b = tmpdir.join("a", "b")
assert b.fnmatch(posixpath.sep.join("ab"))
pattern = posixpath.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
class TestPOSIXLocalPath:
pytestmark = skiponwin32
def test_hardlink(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("Hello")
nlink = filepath.stat().nlink
linkpath.mklinkto(filepath)
assert filepath.stat().nlink == nlink + 1
def test_symlink_are_identical(self, tmpdir):
filepath = tmpdir.join('file')
filepath.write("Hello")
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(filepath)
assert linkpath.readlink() == str(filepath)
def test_symlink_isfile(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("")
linkpath.mksymlinkto(filepath)
assert linkpath.check(file=1)
assert not linkpath.check(link=0, file=1)
assert linkpath.islink()
def test_symlink_relative(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("Hello")
linkpath.mksymlinkto(filepath, absolute=False)
assert linkpath.readlink() == "file"
assert filepath.read() == linkpath.read()
def test_symlink_not_existing(self, tmpdir):
linkpath = tmpdir.join('testnotexisting')
assert not linkpath.check(link=1)
assert linkpath.check(link=0)
def test_relto_with_root(self, path1, tmpdir):
y = path1.join('x').relto(py.path.local('/'))
assert y[0] == str(path1)[1]
def test_visit_recursive_symlink(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(tmpdir)
visitor = tmpdir.visit(None, lambda x: x.check(link=0))
assert list(visitor) == [linkpath]
def test_symlink_isdir(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(tmpdir)
assert linkpath.check(dir=1)
assert not linkpath.check(link=0, dir=1)
def test_symlink_remove(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(linkpath) # point to itself
assert linkpath.check(link=1)
linkpath.remove()
assert not linkpath.check()
def test_realpath_file(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("")
linkpath.mksymlinkto(filepath)
realpath = linkpath.realpath()
assert realpath.basename == 'file'
def test_owner(self, path1, tmpdir):
from pwd import getpwuid
from grp import getgrgid
stat = path1.stat()
assert stat.path == path1
uid = stat.uid
gid = stat.gid
owner = getpwuid(uid)[0]
group = getgrgid(gid)[0]
assert uid == stat.uid
assert owner == stat.owner
assert gid == stat.gid
assert group == stat.group
def test_stat_helpers(self, tmpdir, monkeypatch):
path1 = tmpdir.ensure("file")
stat1 = path1.stat()
stat2 = tmpdir.stat()
assert stat1.isfile()
assert stat2.isdir()
assert not stat1.islink()
assert not stat2.islink()
def test_stat_non_raising(self, tmpdir):
path1 = tmpdir.join("file")
pytest.raises(py.error.ENOENT, lambda: path1.stat())
res = path1.stat(raising=False)
assert res is None
def test_atime(self, tmpdir):
import time
path = tmpdir.ensure('samplefile')
now = time.time()
atime1 = path.atime()
# we could wait here but timer resolution is very
# system dependent
path.read()
time.sleep(0.01)
atime2 = path.atime()
time.sleep(0.01)
duration = time.time() - now
assert (atime2-atime1) <= duration
def test_commondir(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join('something')
p2 = path1.join('otherthing')
assert p1.common(p2) == path1
assert p2.common(p1) == path1
def test_commondir_nocommon(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join('something')
p2 = py.path.local(path1.sep+'blabla')
assert p1.common(p2) == '/'
def test_join_to_root(self, path1):
root = path1.parts()[0]
assert len(str(root)) == 1
assert str(root.join('a')) == '//a' # posix allows two slashes
def test_join_root_to_root_with_no_abs(self, path1):
nroot = path1.join('/')
assert str(path1) == str(nroot)
assert path1 == nroot
def test_chmod_simple_int(self, path1):
mode = path1.stat().mode
path1.chmod(int(mode/2))
try:
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_chmod_rec_int(self, path1):
# XXX fragile test
recfilter = lambda x: x.check(dotfile=0, link=0)
oldmodes = {}
for x in path1.visit(rec=recfilter):
oldmodes[x] = x.stat().mode
path1.chmod(int("772", 8), rec=recfilter)
try:
for x in path1.visit(rec=recfilter):
assert x.stat().mode & int("777", 8) == int("772", 8)
finally:
for x,y in oldmodes.items():
x.chmod(y)
def test_copy_archiving(self, tmpdir):
unicode_fn = u"something-\342\200\223.txt"
f = tmpdir.ensure("a", unicode_fn)
a = f.dirpath()
oldmode = f.stat().mode
newmode = oldmode ^ 1
f.chmod(newmode)
b = tmpdir.join("b")
a.copy(b, mode=True)
assert b.join(f.basename).stat().mode == newmode
@failsonjython
def test_chown_identity(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
@failsonjython
def test_chown_dangling_link(self, path1):
owner = path1.stat().owner
group = path1.stat().group
x = path1.join('hello')
x.mksymlinkto('qlwkejqwlek')
try:
path1.chown(owner, group, rec=1)
finally:
x.remove(rec=0)
@failsonjython
def test_chown_identity_rec_mayfail(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
class TestUnicodePy2Py3:
def test_join_ensure(self, tmpdir, monkeypatch):
if sys.version_info >= (3,0) and "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = py.path.local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.join(part) == y
def test_listdir(self, tmpdir):
if sys.version_info >= (3,0) and "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = py.path.local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.listdir(part)[0] == y
@pytest.mark.xfail(reason="changing read/write might break existing usages")
def test_read_write(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
x.write(part)
assert x.read() == part
x.write(part.encode(sys.getdefaultencoding()))
assert x.read() == part.encode(sys.getdefaultencoding())
class TestBinaryAndTextMethods:
def test_read_binwrite(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
part_utf8 = part.encode("utf8")
x.write_binary(part_utf8)
assert x.read_binary() == part_utf8
s = x.read_text(encoding="utf8")
assert s == part
assert py.builtin._istext(s)
def test_read_textwrite(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
part_utf8 = part.encode("utf8")
x.write_text(part, encoding="utf8")
assert x.read_binary() == part_utf8
assert x.read_text(encoding="utf8") == part
def test_default_encoding(self, tmpdir):
x = tmpdir.join("hello")
# Can't use UTF8 as the default encoding (ASCII) doesn't support it
part = py.builtin._totext("hello", "ascii")
x.write_text(part, "ascii")
s = x.read_text("ascii")
assert s == part
assert type(s) == type(part)
| mpl-2.0 | 1,221,448,556,308,043,500 | -3,150,909,872,371,650,000 | 33.473256 | 80 | 0.577765 | false |
MwanzanFelipe/rockletonfortune | lib/django/contrib/gis/geoip2/base.py | 335 | 9054 | import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2(object):
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def __repr__(self):
meta = self._reader.metadata()
version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version)
return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
| bsd-3-clause | -6,127,196,988,462,805,000 | -4,527,381,407,833,025,500 | 38.537118 | 113 | 0.617186 | false |
Asquera/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Debug.py | 61 | 6766 | """SCons.Debug
Code for debugging SCons internal things. Shouldn't be
needed by most users.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Debug.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import sys
import time
import weakref
tracked_classes = {}
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if name not in tracked_classes:
tracked_classes[name] = []
tracked_classes[name].append(weakref.ref(instance))
def string_to_classes(s):
if s == '*':
return sorted(tracked_classes.keys())
else:
return s.split()
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return [(cn, len(tracked_classes[cn])) for cn in classnames]
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.items():
file.write(' %20s : %s\n' % (key, value))
if sys.platform[:5] == "linux":
# Linux doesn't actually support memory usage stats from getrusage().
def memory():
mstr = open('/proc/self/stat').read()
mstr = mstr.split()[22]
return int(mstr)
elif sys.platform[:6] == 'darwin':
#TODO really get memory stats for OS X
def memory():
return 0
else:
try:
import resource
except ImportError:
try:
import win32process
import win32api
except ImportError:
def memory():
return 0
else:
def memory():
process_handle = win32api.GetCurrentProcess()
memory_info = win32process.GetProcessMemoryInfo( process_handle )
return memory_info['PeakWorkingSetSize']
else:
def memory():
res = resource.getrusage(resource.RUSAGE_SELF)
return res[4]
# returns caller's stack
def caller_stack(*backlist):
import traceback
if not backlist:
backlist = [0]
result = []
for back in backlist:
tb = traceback.extract_stack(limit=3+back)
key = tb[0][:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result
caller_bases = {}
caller_dicts = {}
# trace a caller's stack
def caller_trace(back=0):
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
leader = ' '*level
for v,c in sorted([(-v,c) for c,v in caller_dicts[key].items()]):
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if c in caller_dicts:
_dump_one_caller(c, file, level+1)
# print each call tree
def dump_caller_counts(file=sys.stdout):
for k in sorted(caller_bases.keys()):
file.write("Callers of %s:%d(%s), %d calls:\n"
% (func_shorten(k) + (caller_bases[k],)))
_dump_one_caller(k, file)
shorten_list = [
( '/scons/SCons/', 1),
( '/src/engine/SCons/', 1),
( '/usr/lib/python', 0),
]
if os.sep != '/':
shorten_list = [(t[0].replace('/', os.sep), t[1]) for t in shorten_list]
def func_shorten(func_tuple):
f = func_tuple[0]
for t in shorten_list:
i = f.find(t[0])
if i >= 0:
if t[1]:
i = i + len(t[0])
return (f[i:],)+func_tuple[1:]
return func_tuple
TraceFP = {}
if sys.platform == 'win32':
TraceDefault = 'con'
else:
TraceDefault = '/dev/tty'
TimeStampDefault = None
StartTime = time.time()
PreviousTime = StartTime
def Trace(msg, file=None, mode='w', tstamp=None):
"""Write a trace message to a file. Whenever a file is specified,
it becomes the default for the next call to Trace()."""
global TraceDefault
global TimeStampDefault
global PreviousTime
if file is None:
file = TraceDefault
else:
TraceDefault = file
if tstamp is None:
tstamp = TimeStampDefault
else:
TimeStampDefault = tstamp
try:
fp = TraceFP[file]
except KeyError:
try:
fp = TraceFP[file] = open(file, mode)
except TypeError:
# Assume we were passed an open file pointer.
fp = file
if tstamp:
now = time.time()
fp.write('%8.4f %8.4f: ' % (now - StartTime, now - PreviousTime))
PreviousTime = now
fp.write(msg)
fp.flush()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | 4,417,159,820,131,191,300 | 6,414,603,247,336,466,000 | 29.754545 | 95 | 0.611735 | false |
RohitDas/cubeproject | lib/django/contrib/gis/geos/geometry.py | 82 | 24691 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
# Python, ctypes and types dependencies.
from ctypes import addressof, byref, c_double
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase, gdal
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.mutable_list import ListMixin
# These functions provide access to a thread-local instance
# of their corresponding GEOS I/O class.
from django.contrib.gis.geos.prototypes.io import (
ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w,
)
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
# Raise GEOSIndexError instead of plain IndexError
# (see ticket #4740 and GEOSIndexError docstring)
_IndexError = GEOSIndexError
ptr_type = GEOM_PTR
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif json_regex.match(geo_input):
# Handling GeoJSON input.
if not gdal.HAS_GDAL:
raise ValueError('Initializing geometry from JSON input requires GDAL.')
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, six.memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int):
self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
self.__class__ = GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr and capi:
capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"EWKT is used for the string representation."
return self.ewkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(six.memoryview(wkb))
if not ptr:
raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
# ### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
# #### Coordinate Sequence Routines ####
@property
def has_cs(self):
"Returns True if this Geometry has a coordinate sequence, False if not."
# Only these geometries are allowed to have coordinate sequences.
if isinstance(self, (Point, LineString, LinearRing)):
return True
else:
return False
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
# #### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
# #### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
return capi.geos_isvalidreason(self.ptr).decode()
# #### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
# #### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0:
return None
else:
return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
# #### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values
are only included in this representation if GEOS >= 3.3.0.
"""
if self.get_srid():
return 'SRID=%s;%s' % (self.srid, self.wkt)
else:
return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w(3 if self.hasz else 2).write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(3 if self.hasz else 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry if GDAL is installed.
"""
if gdal.HAS_GDAL:
return self.ogr.json
else:
raise GEOSException('GeoJSON output only supported when GDAL is installed.')
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(3 if self.hasz else 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
return PreparedGeometry(self)
# #### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
if self.srid:
try:
return gdal.OGRGeometry(self.wkb, self.srid)
except SRSException:
pass
return gdal.OGRGeometry(self.wkb)
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to return a SpatialReference object.')
if self.srid:
try:
return gdal.SpatialReference(self.srid)
except SRSException:
pass
return None
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
# #### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def interpolate(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('interpolate only works on LineString and MultiLineString geometries')
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def project(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
if not isinstance(self, (LineString, MultiLineString)):
raise TypeError('locate_point only works on LineString and MultiLineString geometries')
return capi.geos_project_normalized(self.ptr, point.ptr)
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
# #### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumference of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
# Class mapping dictionary. Has to be at the end to avoid import
# conflicts with GEOSGeometry.
from django.contrib.gis.geos.linestring import LineString, LinearRing # isort:skip
from django.contrib.gis.geos.point import Point # isort:skip
from django.contrib.gis.geos.polygon import Polygon # isort:skip
from django.contrib.gis.geos.collections import ( # isort:skip
GeometryCollection, MultiPoint, MultiLineString, MultiPolygon)
from django.contrib.gis.geos.prepared import PreparedGeometry # isort:skip
GEOS_CLASSES = {
0: Point,
1: LineString,
2: LinearRing,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
}
| bsd-3-clause | 6,130,976,694,634,964,000 | 3,725,232,434,926,220,000 | 34.680636 | 99 | 0.617877 | false |
williamFalcon/pytorch-lightning | tests/trainer/test_dataloaders.py | 1 | 9641 | import pytest
import tests.models.utils as tutils
from pytorch_lightning import Trainer
from tests.models import (
TestModelBase,
LightningTestModel,
LightEmptyTestStep,
LightValidationMultipleDataloadersMixin,
LightTestMultipleDataloadersMixin,
LightTestFitSingleTestDataloadersMixin,
LightTestFitMultipleTestDataloadersMixin,
LightValStepFitMultipleDataloadersMixin,
LightValStepFitSingleDataloaderMixin,
LightTrainDataloader,
)
from pytorch_lightning.utilities.debugging import MisconfigurationException
def test_multiple_val_dataloader(tmpdir):
"""Verify multiple val_dataloader."""
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightValidationMultipleDataloadersMixin,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=1.0,
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# verify training completed
assert result == 1
# verify there are 2 val loaders
assert len(trainer.val_dataloaders) == 2, \
'Multiple val_dataloaders not initiated properly'
# make sure predictions are good for each val set
for dataloader in trainer.val_dataloaders:
tutils.run_prediction(dataloader, trainer.model)
def test_multiple_test_dataloader(tmpdir):
"""Verify multiple test_dataloader."""
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightTestMultipleDataloadersMixin,
LightEmptyTestStep,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# fit model
trainer = Trainer(**trainer_options)
trainer.fit(model)
trainer.test()
# verify there are 2 val loaders
assert len(trainer.test_dataloaders) == 2, \
'Multiple test_dataloaders not initiated properly'
# make sure predictions are good for each test set
for dataloader in trainer.test_dataloaders:
tutils.run_prediction(dataloader, trainer.model)
# run the test method
trainer.test()
def test_train_dataloaders_passed_to_fit(tmpdir):
""" Verify that train dataloader can be passed to fit """
tutils.reset_seed()
class CurrentTestModel(LightTrainDataloader, TestModelBase):
pass
hparams = tutils.get_hparams()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# only train passed to fit
model = CurrentTestModel(hparams)
trainer = Trainer(**trainer_options)
fit_options = dict(train_dataloader=model._dataloader(train=True))
results = trainer.fit(model, **fit_options)
def test_train_val_dataloaders_passed_to_fit(tmpdir):
""" Verify that train & val dataloader can be passed to fit """
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightValStepFitSingleDataloaderMixin,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# train, val passed to fit
model = CurrentTestModel(hparams)
trainer = Trainer(**trainer_options)
fit_options = dict(train_dataloader=model._dataloader(train=True),
val_dataloaders=model._dataloader(train=False))
results = trainer.fit(model, **fit_options)
assert len(trainer.val_dataloaders) == 1, \
f"`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}"
def test_all_dataloaders_passed_to_fit(tmpdir):
""" Verify train, val & test dataloader can be passed to fit """
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightValStepFitSingleDataloaderMixin,
LightTestFitSingleTestDataloadersMixin,
LightEmptyTestStep,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# train, val and test passed to fit
model = CurrentTestModel(hparams)
trainer = Trainer(**trainer_options)
fit_options = dict(train_dataloader=model._dataloader(train=True),
val_dataloaders=model._dataloader(train=False),
test_dataloaders=model._dataloader(train=False))
results = trainer.fit(model, **fit_options)
trainer.test()
assert len(trainer.val_dataloaders) == 1, \
f"val_dataloaders` not initiated properly, got {trainer.val_dataloaders}"
assert len(trainer.test_dataloaders) == 1, \
f"test_dataloaders` not initiated properly, got {trainer.test_dataloaders}"
def test_multiple_dataloaders_passed_to_fit(tmpdir):
"""Verify that multiple val & test dataloaders can be passed to fit."""
tutils.reset_seed()
class CurrentTestModel(
LightningTestModel,
LightValStepFitMultipleDataloadersMixin,
LightTestFitMultipleTestDataloadersMixin,
):
pass
hparams = tutils.get_hparams()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# train, multiple val and multiple test passed to fit
model = CurrentTestModel(hparams)
trainer = Trainer(**trainer_options)
fit_options = dict(train_dataloader=model._dataloader(train=True),
val_dataloaders=[model._dataloader(train=False),
model._dataloader(train=False)],
test_dataloaders=[model._dataloader(train=False),
model._dataloader(train=False)])
results = trainer.fit(model, **fit_options)
trainer.test()
assert len(trainer.val_dataloaders) == 2, \
f"Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}"
assert len(trainer.test_dataloaders) == 2, \
f"Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}"
def test_mixing_of_dataloader_options(tmpdir):
"""Verify that dataloaders can be passed to fit"""
tutils.reset_seed()
class CurrentTestModel(
LightTrainDataloader,
LightValStepFitSingleDataloaderMixin,
LightTestFitSingleTestDataloadersMixin,
TestModelBase,
):
pass
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_epochs=1,
val_percent_check=0.1,
train_percent_check=0.2
)
# fit model
trainer = Trainer(**trainer_options)
fit_options = dict(val_dataloaders=model._dataloader(train=False))
results = trainer.fit(model, **fit_options)
# fit model
trainer = Trainer(**trainer_options)
fit_options = dict(val_dataloaders=model._dataloader(train=False),
test_dataloaders=model._dataloader(train=False))
_ = trainer.fit(model, **fit_options)
trainer.test()
assert len(trainer.val_dataloaders) == 1, \
f"`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}"
assert len(trainer.test_dataloaders) == 1, \
f"test_dataloaders` not initiated properly, got {trainer.test_dataloaders}"
def test_inf_train_dataloader(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
tutils.reset_seed()
class CurrentTestModel(LightningTestModel):
def train_dataloader(self):
dataloader = self._dataloader(train=True)
class CustomInfDataLoader:
def __init__(self, dataloader):
self.dataloader = dataloader
self.iter = iter(dataloader)
self.count = 0
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count >= 5:
raise StopIteration
self.count = self.count + 1
try:
return next(self.iter)
except StopIteration:
self.iter = iter(self.dataloader)
return next(self.iter)
return CustomInfDataLoader(dataloader)
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
# fit model
with pytest.raises(MisconfigurationException):
trainer = Trainer(
default_save_path=tmpdir,
max_epochs=1,
val_check_interval=0.5
)
trainer.fit(model)
# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
max_epochs=1,
val_check_interval=50,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
| apache-2.0 | 3,511,133,321,314,229,000 | -3,158,681,638,984,570,400 | 28.756173 | 93 | 0.637382 | false |
ModdedPA/android_external_chromium_org | tools/deep_memory_profiler/tests/mock_gsutil.py | 131 | 1558 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import zipfile
def main():
ZIP_PATTERN = re.compile('dmprof......\.zip')
assert len(sys.argv) == 6
assert sys.argv[1] == 'cp'
assert sys.argv[2] == '-a'
assert sys.argv[3] == 'public-read'
assert ZIP_PATTERN.match(os.path.basename(sys.argv[4]))
assert sys.argv[5] == 'gs://test-storage/'
zip_file = zipfile.ZipFile(sys.argv[4], 'r')
expected_nameset = set(['heap.01234.0001.heap',
'heap.01234.0002.heap',
'heap.01234.0001.buckets',
'heap.01234.0002.buckets',
'heap.01234.symmap/maps',
'heap.01234.symmap/chrome.uvwxyz.readelf-e',
'heap.01234.symmap/chrome.abcdef.nm',
'heap.01234.symmap/files.json'])
assert set(zip_file.namelist()) == expected_nameset
heap_1 = zip_file.getinfo('heap.01234.0001.heap')
assert heap_1.CRC == 763099253
assert heap_1.file_size == 1107
buckets_1 = zip_file.getinfo('heap.01234.0001.buckets')
assert buckets_1.CRC == 2632528901
assert buckets_1.file_size == 2146
nm_chrome = zip_file.getinfo('heap.01234.symmap/chrome.abcdef.nm')
assert nm_chrome.CRC == 2717882373
assert nm_chrome.file_size == 131049
zip_file.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 4,915,781,905,251,792,000 | 8,913,233,344,035,786,000 | 29.54902 | 72 | 0.607831 | false |
3manuek/kubernetes | examples/cluster-dns/images/backend/server.py | 468 | 1313 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8000
# This class will handles any incoming request.
class HTTPHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World!")
try:
# Create a web server and define the handler to manage the incoming request.
server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
| apache-2.0 | -8,501,745,812,661,307,000 | 6,253,021,657,450,215,000 | 34.486486 | 78 | 0.752475 | false |
MiniPlayer/log-island | logisland-plugins/logisland-scripting-processors-plugin/src/main/resources/nltk/test/doctest_nose_plugin.py | 28 | 6008 | # -*- coding: utf-8 -*-
from __future__ import print_function
from nose.suite import ContextList
import re
import sys
import os
import codecs
import doctest
from nose.plugins.base import Plugin
from nose.util import tolist, anyp
from nose.plugins.doctests import Doctest, log, DocFileCase
ALLOW_UNICODE = doctest.register_optionflag('ALLOW_UNICODE')
class _UnicodeOutputChecker(doctest.OutputChecker):
_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
def _remove_u_prefixes(self, txt):
return re.sub(self._literal_re, r'\1\2', txt)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
if not (optionflags & ALLOW_UNICODE):
return False
# ALLOW_UNICODE is active and want != got
cleaned_want = self._remove_u_prefixes(want)
cleaned_got = self._remove_u_prefixes(got)
res = doctest.OutputChecker.check_output(self, cleaned_want, cleaned_got, optionflags)
return res
_checker = _UnicodeOutputChecker()
class DoctestPluginHelper(object):
"""
This mixin adds print_function future import to all test cases.
It also adds support for:
'#doctest +ALLOW_UNICODE' option that
makes DocTestCase think u'foo' == 'foo'.
'#doctest doctestencoding=utf-8' option that
changes the encoding of doctest files
"""
OPTION_BY_NAME = ('doctestencoding',)
def loadTestsFromFileUnicode(self, filename):
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = codecs.open(filename, 'r', self.options.get('doctestencoding'))
try:
doc = dh.read()
finally:
dh.close()
fixture_context = None
globs = {'__file__': filename}
if self.fixtures:
base, ext = os.path.splitext(name)
dirname = os.path.dirname(filename)
sys.path.append(dirname)
fixt_mod = base + self.fixtures
try:
fixture_context = __import__(
fixt_mod, globals(), locals(), ["nop"])
except ImportError as e:
log.debug(
"Could not import %s: %s (%s)", fixt_mod, e, sys.path)
log.debug("Fixture module %s resolved to %s",
fixt_mod, fixture_context)
if hasattr(fixture_context, 'globs'):
globs = fixture_context.globs(globs)
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs=globs, name=name,
filename=filename, lineno=0)
if test.examples:
case = DocFileCase(
test,
optionflags=self.optionflags,
setUp=getattr(fixture_context, 'setup_test', None),
tearDown=getattr(fixture_context, 'teardown_test', None),
result_var=self.doctest_result_var)
if fixture_context:
yield ContextList((case,), context=fixture_context)
else:
yield case
else:
yield False # no tests to load
def loadTestsFromFile(self, filename):
cases = self.loadTestsFromFileUnicode(filename)
for case in cases:
if isinstance(case, ContextList):
yield ContextList([self._patchTestCase(c) for c in case], case.context)
else:
yield self._patchTestCase(case)
def loadTestsFromModule(self, module):
"""Load doctests from the module.
"""
for suite in super(DoctestPluginHelper, self).loadTestsFromModule(module):
cases = [self._patchTestCase(case) for case in suite._get_tests()]
yield self.suiteClass(cases, context=module, can_split=False)
def _patchTestCase(self, case):
if case:
case._dt_test.globs['print_function'] = print_function
case._dt_checker = _checker
return case
def configure(self, options, config):
# it is overriden in order to fix doctest options discovery
Plugin.configure(self, options, config)
self.doctest_result_var = options.doctest_result_var
self.doctest_tests = options.doctest_tests
self.extension = tolist(options.doctestExtension)
self.fixtures = options.doctestFixtures
self.finder = doctest.DocTestFinder()
#super(DoctestPluginHelper, self).configure(options, config)
self.optionflags = 0
self.options = {}
if options.doctestOptions:
stroptions = ",".join(options.doctestOptions).split(',')
for stroption in stroptions:
try:
if stroption.startswith('+'):
self.optionflags |= doctest.OPTIONFLAGS_BY_NAME[stroption[1:]]
continue
elif stroption.startswith('-'):
self.optionflags &= ~doctest.OPTIONFLAGS_BY_NAME[stroption[1:]]
continue
try:
key,value=stroption.split('=')
except ValueError:
pass
else:
if not key in self.OPTION_BY_NAME:
raise ValueError()
self.options[key]=value
continue
except (AttributeError, ValueError, KeyError):
raise ValueError("Unknown doctest option {}".format(stroption))
else:
raise ValueError("Doctest option is not a flag or a key/value pair: {} ".format(stroption))
class DoctestFix(DoctestPluginHelper, Doctest):
pass
| apache-2.0 | -8,156,703,631,221,619,000 | 6,856,711,635,703,229,000 | 37.512821 | 111 | 0.563083 | false |
mpasternak/pyglet-fix-issue-552 | experimental/mt_media/drivers/directsound/__init__.py | 28 | 18147 | #!/usr/bin/python
# $Id:$
import ctypes
import math
import sys
import threading
import time
import pyglet
_debug = pyglet.options['debug_media']
import mt_media
import lib_dsound as lib
from pyglet.window.win32 import _user32, _kernel32
class DirectSoundException(mt_media.MediaException):
pass
def _db(gain):
'''Convert linear gain in range [0.0, 1.0] to 100ths of dB.'''
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log(min(gain, 1))), 0))
class DirectSoundWorker(mt_media.MediaThread):
_min_write_size = 9600
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super(DirectSoundWorker, self).__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
if _debug:
print 'DirectSoundWorker run attempt acquire'
self.condition.acquire()
if _debug:
print 'DirectSoundWorker run acquire'
if self.stopped:
self.condition.release()
break
sleep_time = -1
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if _debug:
print 'DirectSoundWorker run release'
if sleep_time != -1:
self.sleep(sleep_time)
if _debug:
print 'DirectSoundWorker exiting'
def add(self, player):
if _debug:
print 'DirectSoundWorker add', player
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
if _debug:
print 'return DirectSoundWorker add', player
def remove(self, player):
if _debug:
print 'DirectSoundWorker remove', player
self.condition.acquire()
try:
self.players.remove(player)
except KeyError:
pass
self.condition.notify()
self.condition.release()
if _debug:
print 'return DirectSoundWorker remove', player
class DirectSoundAudioPlayer(mt_media.AbstractAudioPlayer):
# How many bytes the ring buffer should be
_buffer_size = 44800 * 1
# Need to cache these because pyglet API allows update separately, but
# DSound requires both to be set at once.
_cone_inner_angle = 360
_cone_outer_angle = 360
def __init__(self, source_group, player):
super(DirectSoundAudioPlayer, self).__init__(source_group, player)
# Locking strategy:
# All DirectSound calls should be locked. All instance vars relating
# to buffering/filling/time/events should be locked (used by both
# application and worker thread). Other instance vars (consts and
# 3d vars) do not need to be locked.
self._lock = threading.RLock()
# Desired play state (may be actually paused due to underrun -- not
# implemented yet).
self._playing = False
# Up to one audio data may be buffered if too much data was received
# from the source that could not be written immediately into the
# buffer. See refill().
self._next_audio_data = None
# Theoretical write and play cursors for an infinite buffer. play
# cursor is always <= write cursor (when equal, underrun is
# happening).
self._write_cursor = 0
self._play_cursor = 0
# Cursor position of end of data. Silence is written after
# eos for one buffer size.
self._eos_cursor = None
# Indexes into DSound circular buffer. Complications ensue wrt each
# other to avoid writing over the play cursor. See get_write_size and
# write().
self._play_cursor_ring = 0
self._write_cursor_ring = 0
# List of (play_cursor, MediaEvent), in sort order
self._events = []
# List of (cursor, timestamp), in sort order (cursor gives expiry
# place of the timestamp)
self._timestamps = []
audio_format = source_group.audio_format
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
# DSound buffer
self._buffer = lib.IDirectSoundBuffer()
driver._dsound.CreateSoundBuffer(dsbdesc,
ctypes.byref(self._buffer),
None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer.SetCurrentPosition(0)
self.refill(self._buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if driver and driver.worker:
driver.worker.remove(self)
self.lock()
self._buffer.Stop()
self._buffer.Release()
self._buffer = None
if self._buffer3d:
self._buffer3d.Release()
self._buffer3d = None
self.unlock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def play(self):
if _debug:
print 'DirectSound play'
driver.worker.add(self)
self.lock()
if not self._playing:
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self.unlock()
if _debug:
print 'return DirectSound play'
def stop(self):
if _debug:
print 'DirectSound stop'
driver.worker.remove(self)
self.lock()
if self._playing:
self._playing = False
self._buffer.Stop()
self.unlock()
if _debug:
print 'return DirectSound stop'
def clear(self):
if _debug:
print 'DirectSound clear'
self.lock()
self._buffer.SetCurrentPosition(0)
self._play_cursor_ring = self._write_cursor_ring = 0
self._play_cursor = self._write_cursor
self._eos_cursor = None
self._next_audio_data = None
del self._events[:]
del self._timestamps[:]
self.unlock()
def refill(self, write_size):
self.lock()
while write_size > 0:
if _debug:
print 'refill, write_size =', write_size
# Get next audio packet (or remains of last one)
if self._next_audio_data:
audio_data = self._next_audio_data
self._next_audio_data = None
else:
audio_data = self.source_group.get_audio_data(write_size)
# Write it, or silence if there are no more packets
if audio_data:
# Add events
for event in audio_data.events:
event_cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((event_cursor, event))
# Add timestamp (at end of this data packet)
ts_cursor = self._write_cursor + audio_data.length
self._timestamps.append(
(ts_cursor, audio_data.timestamp + audio_data.duration))
# Write data
if _debug:
print 'write', audio_data.length
length = min(write_size, audio_data.length)
self.write(audio_data, length)
if audio_data.length:
self._next_audio_data = audio_data
write_size -= length
else:
# Write silence
if self._eos_cursor is None:
self._eos_cursor = self._write_cursor
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_eos')))
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_source_group_eos')))
self._events.sort()
if self._write_cursor > self._eos_cursor + self._buffer_size:
self.stop()
else:
self.write(None, write_size)
write_size = 0
self.unlock()
def update_play_cursor(self):
self.lock()
play_cursor_ring = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor_ring, None)
if play_cursor_ring.value < self._play_cursor_ring:
# Wrapped around
self._play_cursor += self._buffer_size - self._play_cursor_ring
self._play_cursor_ring = 0
self._play_cursor += play_cursor_ring.value - self._play_cursor_ring
self._play_cursor_ring = play_cursor_ring.value
# Dispatch pending events
pending_events = []
while self._events and self._events[0][0] <= self._play_cursor:
_, event = self._events.pop(0)
pending_events.append(event)
if _debug:
print 'Dispatching pending events:', pending_events
print 'Remaining events:', self._events
# Remove expired timestamps
while self._timestamps and self._timestamps[0][0] < self._play_cursor:
del self._timestamps[0]
self.unlock()
for event in pending_events:
event._sync_dispatch_to_player(self.player)
def get_write_size(self):
self.update_play_cursor()
self.lock()
play_cursor = self._play_cursor
write_cursor = self._write_cursor
self.unlock()
return self._buffer_size - (write_cursor - play_cursor)
def write(self, audio_data, length):
# Pass audio_data=None to write silence
if length == 0:
return 0
self.lock()
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
self._buffer.Lock(self._write_cursor_ring, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.source_group.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.source_group.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor_ring += length
self._write_cursor_ring %= self._buffer_size
self.unlock()
def get_time(self):
self.lock()
if self._timestamps:
cursor, ts = self._timestamps[0]
result = ts + (self._play_cursor - cursor) / \
float(self.source_group.audio_format.bytes_per_second)
else:
result = None
self.unlock()
return result
def set_volume(self, volume):
volume = _db(volume)
self.lock()
self._buffer.SetVolume(volume)
self.unlock()
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self.lock()
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_min_distance(self, min_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_max_distance(self, max_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_pitch(self, pitch):
frequency = int(pitch * self.audio_format.sample_rate)
self.lock()
self._buffer.SetFrequency(frequency)
self.unlock()
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self.lock()
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self.lock()
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self.lock()
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
self.unlock()
class DirectSoundDriver(mt_media.AbstractAudioDriver):
def __init__(self):
self._dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(self._dsound), None)
# A trick used by mplayer.. use desktop as window handle since it
# would be complex to use pyglet window handles (and what to do when
# application is audio only?).
hwnd = _user32.GetDesktopWindow()
self._dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
# Create primary buffer with 3D and volume capabilities
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
self._dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
# Create listener
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
# Create worker thread
self.worker = DirectSoundWorker()
self.worker.start()
def __del__(self):
try:
if self._buffer:
self.delete()
except:
pass
def create_audio_player(self, source_group, player):
return DirectSoundAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
self._buffer.Release()
self._buffer = None
self._listener.Release()
self._listener = None
# Listener API
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
def create_audio_driver():
global driver
driver = DirectSoundDriver()
return driver
# Global driver needed for access to worker thread and _dsound
driver = None
| bsd-3-clause | -6,089,822,034,599,535,000 | -6,258,645,161,318,649,000 | 31.730483 | 80 | 0.54312 | false |
JCBarahona/edX | common/djangoapps/monkey_patch/django_utils_translation.py | 128 | 2819 | """
Monkey-patch `django.utils.translation` to not dump header info
Modify Django's translation module, such that the *gettext functions
always return an empty string when attempting to translate an empty
string. This overrides the default behavior [0]:
> It is convention with GNU gettext to include meta-data as the
> translation for the empty string.
Affected Methods:
- gettext
- ugettext
Note: The *ngettext and *pgettext functions are intentionally omitted,
as they already behave as expected. The *_lazy functions are implicitly
patched, as they wrap their nonlazy equivalents.
Django's translation module contains a good deal of indirection. For us
to patch the module with our own functions, we have to patch
`django.utils.translation._trans`. This ensures that the patched
behavior will still be used, even if code elsewhere caches a reference
to one of the translation functions. If you're curious, check out
Django's source code [1].
[0] https://docs.python.org/2.7/library/gettext.html#the-gnutranslations-class
[1] https://github.com/django/django/blob/1.4.8/django/utils/translation/__init__.py#L66
"""
from django.utils.translation import _trans as translation
import monkey_patch
ATTRIBUTES = [
'gettext',
'ugettext',
]
def is_patched():
"""
Check if the translation module has been monkey-patched
"""
patched = True
for attribute in ATTRIBUTES:
if not monkey_patch.is_patched(translation, attribute):
patched = False
break
return patched
def patch():
"""
Monkey-patch the translation functions
Affected Methods:
- gettext
- ugettext
"""
def decorate(function, message_default=u''):
"""
Decorate a translation function
Default message is a unicode string, but gettext overrides this
value to return a UTF8 string.
"""
def dont_translate_empty_string(message):
"""
Return the empty string when passed a falsey message
"""
if message:
message = function(message)
else:
message = message_default
return message
return dont_translate_empty_string
gettext = decorate(translation.gettext, '')
ugettext = decorate(translation.ugettext)
monkey_patch.patch(translation, 'gettext', gettext)
monkey_patch.patch(translation, 'ugettext', ugettext)
return is_patched()
def unpatch():
"""
Un-monkey-patch the translation functions
"""
was_patched = False
for name in ATTRIBUTES:
# was_patched must be the second half of the or-clause, to avoid
# short-circuiting the expression
was_patched = monkey_patch.unpatch(translation, name) or was_patched
return was_patched
| agpl-3.0 | -3,803,502,739,759,274,500 | -305,587,531,064,138,940 | 29.978022 | 88 | 0.683221 | false |
rmcgibbo/mdtraj | tests/test_xtc.py | 5 | 12672 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import sys
import numpy as np
from mdtraj import io
from mdtraj.formats import XTCTrajectoryFile
from mdtraj.testing import eq
import pytest
@pytest.fixture()
def fn_xtc(get_fn):
return get_fn('frame0.xtc')
@pytest.fixture()
def pdb(get_fn):
return get_fn('native.pdb')
strides = (1, 2, 3, 4, 5, 7, 10, 11)
def test_read_chunk1(get_fn, fn_xtc):
with XTCTrajectoryFile(fn_xtc, 'r', chunk_size_multiplier=0.5) as f:
xyz, time, step, box = f.read()
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
assert eq(xyz, iofile['xyz'])
assert eq(step, iofile['step'])
assert eq(box, iofile['box'])
assert eq(time, iofile['time'])
def test_read_stride(get_fn, fn_xtc):
# read xtc with stride
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for s in strides:
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(stride=s)
assert eq(xyz, iofile['xyz'][::s])
assert eq(step, iofile['step'][::s])
assert eq(box, iofile['box'][::s])
assert eq(time, iofile['time'][::s])
def test_read_stride_n_frames(get_fn, fn_xtc):
# read xtc with stride with n_frames
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for s in strides:
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(n_frames=1000, stride=s)
assert eq(xyz, iofile['xyz'][::s])
assert eq(step, iofile['step'][::s])
assert eq(box, iofile['box'][::s])
assert eq(time, iofile['time'][::s])
def test_read_stride_offsets(get_fn, fn_xtc):
# read xtc with stride and offsets
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for s in strides:
with XTCTrajectoryFile(fn_xtc) as f:
f.offsets # pre-compute byte offsets between frames
xyz, time, step, box = f.read(stride=s)
assert eq(xyz, iofile['xyz'][::s])
assert eq(step, iofile['step'][::s])
assert eq(box, iofile['box'][::s])
assert eq(time, iofile['time'][::s])
def test_read_stride_n_frames_offsets(get_fn, fn_xtc):
# read xtc with stride with n_frames and offsets
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for s in strides:
with XTCTrajectoryFile(fn_xtc) as f:
f.offsets # pre-compute byte offsets between frames
xyz, time, step, box = f.read(n_frames=1000, stride=s)
assert eq(xyz, iofile['xyz'][::s])
assert eq(step, iofile['step'][::s])
assert eq(box, iofile['box'][::s])
assert eq(time, iofile['time'][::s])
def test_read_stride_switching_offsets(get_fn, fn_xtc):
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
with XTCTrajectoryFile(fn_xtc) as f:
f.offsets # pre-compute byte offsets between frames
# read the first 10 frames with stride of 2
s = 2
n_frames = 10
xyz, time, step, box = f.read(n_frames=n_frames, stride=s)
assert eq(xyz, iofile['xyz'][:n_frames*s:s])
assert eq(step, iofile['step'][:n_frames*s:s])
assert eq(box, iofile['box'][:n_frames*s:s])
assert eq(time, iofile['time'][:n_frames*s:s])
# now read the rest with stride 3, should start from frame index 8.
# eg. np.arange(0, n_frames*s + 1, 2)[-1] == 20
offset = f.tell()
assert offset == 20
s = 3
xyz, time, step, box = f.read(n_frames=None, stride=s)
assert eq(xyz, iofile['xyz'][offset::s])
assert eq(step, iofile['step'][offset::s])
assert eq(box, iofile['box'][offset::s])
assert eq(time, iofile['time'][offset::s])
def test_read_atomindices_1(get_fn, fn_xtc):
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(atom_indices=[0, 1, 2])
assert eq(xyz, iofile['xyz'][:, [0, 1, 2]])
assert eq(step, iofile['step'])
assert eq(box, iofile['box'])
assert eq(time, iofile['time'])
def test_read_atomindices_w_stride(get_fn, fn_xtc):
# test case for bug: https://github.com/mdtraj/mdtraj/issues/1394
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
for stride in strides:
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(atom_indices=[0, 1, 2], stride=stride)
assert eq(xyz, iofile['xyz'][:, [0, 1, 2]][::stride])
assert eq(step, iofile['step'][::stride])
assert eq(box, iofile['box'][::stride])
assert eq(time, iofile['time'][::stride])
def test_read_atomindices_2(get_fn, fn_xtc):
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
with XTCTrajectoryFile(fn_xtc) as f:
xyz, time, step, box = f.read(atom_indices=slice(None, None, 2))
assert eq(xyz, iofile['xyz'][:, ::2])
assert eq(step, iofile['step'])
assert eq(box, iofile['box'])
assert eq(time, iofile['time'])
def test_read_chunk2(get_fn, fn_xtc):
with XTCTrajectoryFile(fn_xtc, 'r', chunk_size_multiplier=1) as f:
xyz, time, step, box = f.read()
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
assert eq(xyz, iofile['xyz'])
assert eq(step, iofile['step'])
assert eq(box, iofile['box'])
assert eq(time, iofile['time'])
def test_read_chunk3(get_fn, fn_xtc):
with XTCTrajectoryFile(fn_xtc, chunk_size_multiplier=2) as f:
xyz, time, step, box = f.read(n_frames=100)
iofile = io.loadh(get_fn('frame0.xtc.h5'), deferred=False)
assert eq(xyz, iofile['xyz'][:100])
assert eq(step, iofile['step'][:100])
assert eq(box, iofile['box'][:100])
assert eq(time, iofile['time'][:100])
def test_write_0(tmpdir, fn_xtc):
with XTCTrajectoryFile(fn_xtc) as f:
xyz = f.read()[0]
tmpfn = '{}/traj.xtc'.format(tmpdir)
f = XTCTrajectoryFile(tmpfn, 'w')
f.write(xyz)
f.close()
with XTCTrajectoryFile(tmpfn) as f:
xyz2, time2, step2, box2 = f.read()
eq(xyz, xyz2)
def test_write_1(tmpdir):
xyz = np.asarray(np.around(np.random.randn(100, 10, 3), 3), dtype=np.float32)
time = np.asarray(np.random.randn(100), dtype=np.float32)
step = np.arange(100)
box = np.asarray(np.random.randn(100, 3, 3), dtype=np.float32)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
f.write(xyz, time=time, step=step, box=box)
with XTCTrajectoryFile(tmpfn) as f:
xyz2, time2, step2, box2 = f.read()
eq(xyz, xyz2)
eq(time, time2)
eq(step, step2)
eq(box, box2)
def test_write_2(tmpdir):
xyz = np.asarray(np.around(np.random.randn(100, 10, 3), 3), dtype=np.float32)
time = np.asarray(np.random.randn(100), dtype=np.float32)
step = np.arange(100)
box = np.asarray(np.random.randn(100, 3, 3), dtype=np.float32)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
for i in range(len(xyz)):
f.write(xyz[i], time=time[i], step=step[i], box=box[i])
with XTCTrajectoryFile(tmpfn) as f:
xyz2, time2, step2, box2 = f.read()
eq(xyz, xyz2)
eq(time, time2)
eq(step, step2)
eq(box, box2)
def test_read_error_0(tmpdir):
tmpfn = '{}/traj.xtc'.format(tmpdir)
with pytest.raises(IOError):
with XTCTrajectoryFile(tmpfn, 'r') as f:
f.read()
def test_write_error_0(tmpdir):
xyz = np.asarray(np.random.randn(100, 3, 3), dtype=np.float32)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
with pytest.raises(ValueError):
f.read(xyz)
def test_read_error_1():
with pytest.raises(IOError):
XTCTrajectoryFile('/tmp/sdfsdfsdf')
def test_read_error_2(get_fn):
with pytest.raises(IOError):
XTCTrajectoryFile(get_fn('frame0.dcd')).read()
def test_xtc_write_wierd_0(tmpdir):
x0 = np.asarray(np.random.randn(100, 3, 3), dtype=np.float32)
x1 = np.asarray(np.random.randn(100, 9, 3), dtype=np.float32)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
f.write(x0)
with pytest.raises(ValueError):
f.write(x1)
xr = XTCTrajectoryFile(tmpfn).read()[0]
print(xr.shape)
def test_tell(get_fn):
with XTCTrajectoryFile(get_fn('frame0.xtc')) as f:
eq(f.tell(), 0)
f.read(101)
eq(f.tell(), 101)
f.read(3)
eq(f.tell(), 104)
def test_seek(get_fn):
reference = XTCTrajectoryFile(get_fn('frame0.xtc')).read()[0]
with XTCTrajectoryFile(get_fn('frame0.xtc')) as f:
eq(f.tell(), 0)
eq(f.read(1)[0][0], reference[0])
eq(f.tell(), 1)
xyz = f.read(1)[0][0]
eq(xyz, reference[1])
eq(f.tell(), 2)
f.seek(0)
eq(f.tell(), 0)
xyz = f.read(1)[0][0]
eq(f.tell(), 1)
eq(xyz, reference[0])
f.seek(5) # offset array is going to be built
assert len(f.offsets) == len(reference)
eq(f.read(1)[0][0], reference[5])
eq(f.tell(), 6)
f.seek(-5, 1)
eq(f.tell(), 1)
eq(f.read(1)[0][0], reference[1])
def test_seek_natoms9(tmpdir, get_fn):
# create a xtc file with 9 atoms and seek it.
with XTCTrajectoryFile(get_fn('frame0.xtc'), 'r') as fh:
xyz = fh.read()[0][:, :9, :]
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w', force_overwrite=True) as f:
f.write(xyz)
with XTCTrajectoryFile(tmpfn, 'r') as f:
eq(f.read(1)[0].shape, (1, 9, 3))
eq(f.tell(), 1)
f.seek(99)
eq(f.read(1)[0].squeeze(), xyz[99])
# seek relative
f.seek(-1, 1)
eq(f.read(1)[0].squeeze(), xyz[99])
f.seek(0, 0)
eq(f.read(1)[0].squeeze(), xyz[0])
def test_seek_out_of_bounds(get_fn):
with XTCTrajectoryFile(get_fn('frame0.xtc'), 'r') as fh:
with pytest.raises(IOError):
fh.seek(10000000)
def test_ragged_1(tmpdir):
# try first writing no box vectors,, and then adding some
xyz = np.random.randn(100, 5, 3)
time = np.random.randn(100)
box = np.random.randn(100, 3, 3)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w', force_overwrite=True) as f:
f.write(xyz)
with pytest.raises(ValueError):
f.write(xyz, time, box)
def test_ragged_2(tmpdir):
# try first writing no box vectors, and then adding some
xyz = np.random.randn(100, 5, 3)
time = np.random.randn(100)
box = np.random.randn(100, 3, 3)
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w', force_overwrite=True) as f:
f.write(xyz, time=time, box=box)
with pytest.raises(ValueError):
f.write(xyz)
def test_short_traj(tmpdir):
tmpfn = '{}/traj.xtc'.format(tmpdir)
with XTCTrajectoryFile(tmpfn, 'w') as f:
f.write(np.random.uniform(size=(5, 100000, 3)))
with XTCTrajectoryFile(tmpfn, 'r') as f:
assert len(f) == 5, len(f)
not_on_win = pytest.mark.skipif(sys.platform.startswith('win'),
reason='Can not open file being written again due to file locking.')
@not_on_win
def test_flush(tmpdir):
tmpfn = '{}/traj.xtc'.format(tmpdir)
data = np.random.random((5, 100, 3))
with XTCTrajectoryFile(tmpfn, 'w') as f:
f.write(data)
f.flush()
# note that f is still open, so we can now try to read the contents flushed to disk.
with XTCTrajectoryFile(tmpfn, 'r') as f2:
out = f2.read()
np.testing.assert_allclose(out[0], data, atol=1E-3)
| lgpl-2.1 | -3,628,377,180,759,574,500 | 4,678,537,293,633,391,000 | 31.914286 | 100 | 0.599195 | false |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/mac_arabic.py | 593 | 36723 | """ Python Character Mapping Codec generated from 'VENDORS/APPLE/ARABIC.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-arabic',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00a0, # NO-BREAK SPACE, right-left
0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x06ba, # ARABIC LETTER NOON GHUNNA
0x008c: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x2026, # HORIZONTAL ELLIPSIS, right-left
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f7, # DIVISION SIGN, right-left
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x0020, # SPACE, right-left
0x00a1: 0x0021, # EXCLAMATION MARK, right-left
0x00a2: 0x0022, # QUOTATION MARK, right-left
0x00a3: 0x0023, # NUMBER SIGN, right-left
0x00a4: 0x0024, # DOLLAR SIGN, right-left
0x00a5: 0x066a, # ARABIC PERCENT SIGN
0x00a6: 0x0026, # AMPERSAND, right-left
0x00a7: 0x0027, # APOSTROPHE, right-left
0x00a8: 0x0028, # LEFT PARENTHESIS, right-left
0x00a9: 0x0029, # RIGHT PARENTHESIS, right-left
0x00aa: 0x002a, # ASTERISK, right-left
0x00ab: 0x002b, # PLUS SIGN, right-left
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0x002d, # HYPHEN-MINUS, right-left
0x00ae: 0x002e, # FULL STOP, right-left
0x00af: 0x002f, # SOLIDUS, right-left
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE, right-left (need override)
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO, right-left (need override)
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE, right-left (need override)
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX, right-left (need override)
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE, right-left (need override)
0x00ba: 0x003a, # COLON, right-left
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0x003c, # LESS-THAN SIGN, right-left
0x00bd: 0x003d, # EQUALS SIGN, right-left
0x00be: 0x003e, # GREATER-THAN SIGN, right-left
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x274a, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
0x00c1: 0x0621, # ARABIC LETTER HAMZA
0x00c2: 0x0622, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x00c3: 0x0623, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x00c4: 0x0624, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x00c5: 0x0625, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x00c7: 0x0627, # ARABIC LETTER ALEF
0x00c8: 0x0628, # ARABIC LETTER BEH
0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA
0x00ca: 0x062a, # ARABIC LETTER TEH
0x00cb: 0x062b, # ARABIC LETTER THEH
0x00cc: 0x062c, # ARABIC LETTER JEEM
0x00cd: 0x062d, # ARABIC LETTER HAH
0x00ce: 0x062e, # ARABIC LETTER KHAH
0x00cf: 0x062f, # ARABIC LETTER DAL
0x00d0: 0x0630, # ARABIC LETTER THAL
0x00d1: 0x0631, # ARABIC LETTER REH
0x00d2: 0x0632, # ARABIC LETTER ZAIN
0x00d3: 0x0633, # ARABIC LETTER SEEN
0x00d4: 0x0634, # ARABIC LETTER SHEEN
0x00d5: 0x0635, # ARABIC LETTER SAD
0x00d6: 0x0636, # ARABIC LETTER DAD
0x00d7: 0x0637, # ARABIC LETTER TAH
0x00d8: 0x0638, # ARABIC LETTER ZAH
0x00d9: 0x0639, # ARABIC LETTER AIN
0x00da: 0x063a, # ARABIC LETTER GHAIN
0x00db: 0x005b, # LEFT SQUARE BRACKET, right-left
0x00dc: 0x005c, # REVERSE SOLIDUS, right-left
0x00dd: 0x005d, # RIGHT SQUARE BRACKET, right-left
0x00de: 0x005e, # CIRCUMFLEX ACCENT, right-left
0x00df: 0x005f, # LOW LINE, right-left
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0x0641, # ARABIC LETTER FEH
0x00e2: 0x0642, # ARABIC LETTER QAF
0x00e3: 0x0643, # ARABIC LETTER KAF
0x00e4: 0x0644, # ARABIC LETTER LAM
0x00e5: 0x0645, # ARABIC LETTER MEEM
0x00e6: 0x0646, # ARABIC LETTER NOON
0x00e7: 0x0647, # ARABIC LETTER HEH
0x00e8: 0x0648, # ARABIC LETTER WAW
0x00e9: 0x0649, # ARABIC LETTER ALEF MAKSURA
0x00ea: 0x064a, # ARABIC LETTER YEH
0x00eb: 0x064b, # ARABIC FATHATAN
0x00ec: 0x064c, # ARABIC DAMMATAN
0x00ed: 0x064d, # ARABIC KASRATAN
0x00ee: 0x064e, # ARABIC FATHA
0x00ef: 0x064f, # ARABIC DAMMA
0x00f0: 0x0650, # ARABIC KASRA
0x00f1: 0x0651, # ARABIC SHADDA
0x00f2: 0x0652, # ARABIC SUKUN
0x00f3: 0x067e, # ARABIC LETTER PEH
0x00f4: 0x0679, # ARABIC LETTER TTEH
0x00f5: 0x0686, # ARABIC LETTER TCHEH
0x00f6: 0x06d5, # ARABIC LETTER AE
0x00f7: 0x06a4, # ARABIC LETTER VEH
0x00f8: 0x06af, # ARABIC LETTER GAF
0x00f9: 0x0688, # ARABIC LETTER DDAL
0x00fa: 0x0691, # ARABIC LETTER RREH
0x00fb: 0x007b, # LEFT CURLY BRACKET, right-left
0x00fc: 0x007c, # VERTICAL LINE, right-left
0x00fd: 0x007d, # RIGHT CURLY BRACKET, right-left
0x00fe: 0x0698, # ARABIC LETTER JEH
0x00ff: 0x06d2, # ARABIC LETTER YEH BARREE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> CONTROL CHARACTER
u'\x01' # 0x0001 -> CONTROL CHARACTER
u'\x02' # 0x0002 -> CONTROL CHARACTER
u'\x03' # 0x0003 -> CONTROL CHARACTER
u'\x04' # 0x0004 -> CONTROL CHARACTER
u'\x05' # 0x0005 -> CONTROL CHARACTER
u'\x06' # 0x0006 -> CONTROL CHARACTER
u'\x07' # 0x0007 -> CONTROL CHARACTER
u'\x08' # 0x0008 -> CONTROL CHARACTER
u'\t' # 0x0009 -> CONTROL CHARACTER
u'\n' # 0x000a -> CONTROL CHARACTER
u'\x0b' # 0x000b -> CONTROL CHARACTER
u'\x0c' # 0x000c -> CONTROL CHARACTER
u'\r' # 0x000d -> CONTROL CHARACTER
u'\x0e' # 0x000e -> CONTROL CHARACTER
u'\x0f' # 0x000f -> CONTROL CHARACTER
u'\x10' # 0x0010 -> CONTROL CHARACTER
u'\x11' # 0x0011 -> CONTROL CHARACTER
u'\x12' # 0x0012 -> CONTROL CHARACTER
u'\x13' # 0x0013 -> CONTROL CHARACTER
u'\x14' # 0x0014 -> CONTROL CHARACTER
u'\x15' # 0x0015 -> CONTROL CHARACTER
u'\x16' # 0x0016 -> CONTROL CHARACTER
u'\x17' # 0x0017 -> CONTROL CHARACTER
u'\x18' # 0x0018 -> CONTROL CHARACTER
u'\x19' # 0x0019 -> CONTROL CHARACTER
u'\x1a' # 0x001a -> CONTROL CHARACTER
u'\x1b' # 0x001b -> CONTROL CHARACTER
u'\x1c' # 0x001c -> CONTROL CHARACTER
u'\x1d' # 0x001d -> CONTROL CHARACTER
u'\x1e' # 0x001e -> CONTROL CHARACTER
u'\x1f' # 0x001f -> CONTROL CHARACTER
u' ' # 0x0020 -> SPACE, left-right
u'!' # 0x0021 -> EXCLAMATION MARK, left-right
u'"' # 0x0022 -> QUOTATION MARK, left-right
u'#' # 0x0023 -> NUMBER SIGN, left-right
u'$' # 0x0024 -> DOLLAR SIGN, left-right
u'%' # 0x0025 -> PERCENT SIGN, left-right
u'&' # 0x0026 -> AMPERSAND, left-right
u"'" # 0x0027 -> APOSTROPHE, left-right
u'(' # 0x0028 -> LEFT PARENTHESIS, left-right
u')' # 0x0029 -> RIGHT PARENTHESIS, left-right
u'*' # 0x002a -> ASTERISK, left-right
u'+' # 0x002b -> PLUS SIGN, left-right
u',' # 0x002c -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
u'-' # 0x002d -> HYPHEN-MINUS, left-right
u'.' # 0x002e -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
u'/' # 0x002f -> SOLIDUS, left-right
u'0' # 0x0030 -> DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
u':' # 0x003a -> COLON, left-right
u';' # 0x003b -> SEMICOLON, left-right
u'<' # 0x003c -> LESS-THAN SIGN, left-right
u'=' # 0x003d -> EQUALS SIGN, left-right
u'>' # 0x003e -> GREATER-THAN SIGN, left-right
u'?' # 0x003f -> QUESTION MARK, left-right
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET, left-right
u'\\' # 0x005c -> REVERSE SOLIDUS, left-right
u']' # 0x005d -> RIGHT SQUARE BRACKET, left-right
u'^' # 0x005e -> CIRCUMFLEX ACCENT, left-right
u'_' # 0x005f -> LOW LINE, left-right
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET, left-right
u'|' # 0x007c -> VERTICAL LINE, left-right
u'}' # 0x007d -> RIGHT CURLY BRACKET, left-right
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> CONTROL CHARACTER
u'\xc4' # 0x0080 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xa0' # 0x0081 -> NO-BREAK SPACE, right-left
u'\xc7' # 0x0082 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x0083 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x0084 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x0085 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x0086 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x0087 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x0088 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x0089 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x008a -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u06ba' # 0x008b -> ARABIC LETTER NOON GHUNNA
u'\xab' # 0x008c -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xe7' # 0x008d -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x008e -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x008f -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x0090 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0091 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x0092 -> LATIN SMALL LETTER I WITH ACUTE
u'\u2026' # 0x0093 -> HORIZONTAL ELLIPSIS, right-left
u'\xee' # 0x0094 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x0095 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x0096 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x0097 -> LATIN SMALL LETTER O WITH ACUTE
u'\xbb' # 0x0098 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xf4' # 0x0099 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x009a -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0x009b -> DIVISION SIGN, right-left
u'\xfa' # 0x009c -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x009d -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x009e -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x009f -> LATIN SMALL LETTER U WITH DIAERESIS
u' ' # 0x00a0 -> SPACE, right-left
u'!' # 0x00a1 -> EXCLAMATION MARK, right-left
u'"' # 0x00a2 -> QUOTATION MARK, right-left
u'#' # 0x00a3 -> NUMBER SIGN, right-left
u'$' # 0x00a4 -> DOLLAR SIGN, right-left
u'\u066a' # 0x00a5 -> ARABIC PERCENT SIGN
u'&' # 0x00a6 -> AMPERSAND, right-left
u"'" # 0x00a7 -> APOSTROPHE, right-left
u'(' # 0x00a8 -> LEFT PARENTHESIS, right-left
u')' # 0x00a9 -> RIGHT PARENTHESIS, right-left
u'*' # 0x00aa -> ASTERISK, right-left
u'+' # 0x00ab -> PLUS SIGN, right-left
u'\u060c' # 0x00ac -> ARABIC COMMA
u'-' # 0x00ad -> HYPHEN-MINUS, right-left
u'.' # 0x00ae -> FULL STOP, right-left
u'/' # 0x00af -> SOLIDUS, right-left
u'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO, right-left (need override)
u'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE, right-left (need override)
u'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO, right-left (need override)
u'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE, right-left (need override)
u'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR, right-left (need override)
u'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE, right-left (need override)
u'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX, right-left (need override)
u'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN, right-left (need override)
u'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT, right-left (need override)
u'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE, right-left (need override)
u':' # 0x00ba -> COLON, right-left
u'\u061b' # 0x00bb -> ARABIC SEMICOLON
u'<' # 0x00bc -> LESS-THAN SIGN, right-left
u'=' # 0x00bd -> EQUALS SIGN, right-left
u'>' # 0x00be -> GREATER-THAN SIGN, right-left
u'\u061f' # 0x00bf -> ARABIC QUESTION MARK
u'\u274a' # 0x00c0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
u'\u0621' # 0x00c1 -> ARABIC LETTER HAMZA
u'\u0622' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\u0625' # 0x00c5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0x00c7 -> ARABIC LETTER ALEF
u'\u0628' # 0x00c8 -> ARABIC LETTER BEH
u'\u0629' # 0x00c9 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0x00ca -> ARABIC LETTER TEH
u'\u062b' # 0x00cb -> ARABIC LETTER THEH
u'\u062c' # 0x00cc -> ARABIC LETTER JEEM
u'\u062d' # 0x00cd -> ARABIC LETTER HAH
u'\u062e' # 0x00ce -> ARABIC LETTER KHAH
u'\u062f' # 0x00cf -> ARABIC LETTER DAL
u'\u0630' # 0x00d0 -> ARABIC LETTER THAL
u'\u0631' # 0x00d1 -> ARABIC LETTER REH
u'\u0632' # 0x00d2 -> ARABIC LETTER ZAIN
u'\u0633' # 0x00d3 -> ARABIC LETTER SEEN
u'\u0634' # 0x00d4 -> ARABIC LETTER SHEEN
u'\u0635' # 0x00d5 -> ARABIC LETTER SAD
u'\u0636' # 0x00d6 -> ARABIC LETTER DAD
u'\u0637' # 0x00d7 -> ARABIC LETTER TAH
u'\u0638' # 0x00d8 -> ARABIC LETTER ZAH
u'\u0639' # 0x00d9 -> ARABIC LETTER AIN
u'\u063a' # 0x00da -> ARABIC LETTER GHAIN
u'[' # 0x00db -> LEFT SQUARE BRACKET, right-left
u'\\' # 0x00dc -> REVERSE SOLIDUS, right-left
u']' # 0x00dd -> RIGHT SQUARE BRACKET, right-left
u'^' # 0x00de -> CIRCUMFLEX ACCENT, right-left
u'_' # 0x00df -> LOW LINE, right-left
u'\u0640' # 0x00e0 -> ARABIC TATWEEL
u'\u0641' # 0x00e1 -> ARABIC LETTER FEH
u'\u0642' # 0x00e2 -> ARABIC LETTER QAF
u'\u0643' # 0x00e3 -> ARABIC LETTER KAF
u'\u0644' # 0x00e4 -> ARABIC LETTER LAM
u'\u0645' # 0x00e5 -> ARABIC LETTER MEEM
u'\u0646' # 0x00e6 -> ARABIC LETTER NOON
u'\u0647' # 0x00e7 -> ARABIC LETTER HEH
u'\u0648' # 0x00e8 -> ARABIC LETTER WAW
u'\u0649' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0x00ea -> ARABIC LETTER YEH
u'\u064b' # 0x00eb -> ARABIC FATHATAN
u'\u064c' # 0x00ec -> ARABIC DAMMATAN
u'\u064d' # 0x00ed -> ARABIC KASRATAN
u'\u064e' # 0x00ee -> ARABIC FATHA
u'\u064f' # 0x00ef -> ARABIC DAMMA
u'\u0650' # 0x00f0 -> ARABIC KASRA
u'\u0651' # 0x00f1 -> ARABIC SHADDA
u'\u0652' # 0x00f2 -> ARABIC SUKUN
u'\u067e' # 0x00f3 -> ARABIC LETTER PEH
u'\u0679' # 0x00f4 -> ARABIC LETTER TTEH
u'\u0686' # 0x00f5 -> ARABIC LETTER TCHEH
u'\u06d5' # 0x00f6 -> ARABIC LETTER AE
u'\u06a4' # 0x00f7 -> ARABIC LETTER VEH
u'\u06af' # 0x00f8 -> ARABIC LETTER GAF
u'\u0688' # 0x00f9 -> ARABIC LETTER DDAL
u'\u0691' # 0x00fa -> ARABIC LETTER RREH
u'{' # 0x00fb -> LEFT CURLY BRACKET, right-left
u'|' # 0x00fc -> VERTICAL LINE, right-left
u'}' # 0x00fd -> RIGHT CURLY BRACKET, right-left
u'\u0698' # 0x00fe -> ARABIC LETTER JEH
u'\u06d2' # 0x00ff -> ARABIC LETTER YEH BARREE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # CONTROL CHARACTER
0x0001: 0x0001, # CONTROL CHARACTER
0x0002: 0x0002, # CONTROL CHARACTER
0x0003: 0x0003, # CONTROL CHARACTER
0x0004: 0x0004, # CONTROL CHARACTER
0x0005: 0x0005, # CONTROL CHARACTER
0x0006: 0x0006, # CONTROL CHARACTER
0x0007: 0x0007, # CONTROL CHARACTER
0x0008: 0x0008, # CONTROL CHARACTER
0x0009: 0x0009, # CONTROL CHARACTER
0x000a: 0x000a, # CONTROL CHARACTER
0x000b: 0x000b, # CONTROL CHARACTER
0x000c: 0x000c, # CONTROL CHARACTER
0x000d: 0x000d, # CONTROL CHARACTER
0x000e: 0x000e, # CONTROL CHARACTER
0x000f: 0x000f, # CONTROL CHARACTER
0x0010: 0x0010, # CONTROL CHARACTER
0x0011: 0x0011, # CONTROL CHARACTER
0x0012: 0x0012, # CONTROL CHARACTER
0x0013: 0x0013, # CONTROL CHARACTER
0x0014: 0x0014, # CONTROL CHARACTER
0x0015: 0x0015, # CONTROL CHARACTER
0x0016: 0x0016, # CONTROL CHARACTER
0x0017: 0x0017, # CONTROL CHARACTER
0x0018: 0x0018, # CONTROL CHARACTER
0x0019: 0x0019, # CONTROL CHARACTER
0x001a: 0x001a, # CONTROL CHARACTER
0x001b: 0x001b, # CONTROL CHARACTER
0x001c: 0x001c, # CONTROL CHARACTER
0x001d: 0x001d, # CONTROL CHARACTER
0x001e: 0x001e, # CONTROL CHARACTER
0x001f: 0x001f, # CONTROL CHARACTER
0x0020: 0x0020, # SPACE, left-right
0x0020: 0x00a0, # SPACE, right-left
0x0021: 0x0021, # EXCLAMATION MARK, left-right
0x0021: 0x00a1, # EXCLAMATION MARK, right-left
0x0022: 0x0022, # QUOTATION MARK, left-right
0x0022: 0x00a2, # QUOTATION MARK, right-left
0x0023: 0x0023, # NUMBER SIGN, left-right
0x0023: 0x00a3, # NUMBER SIGN, right-left
0x0024: 0x0024, # DOLLAR SIGN, left-right
0x0024: 0x00a4, # DOLLAR SIGN, right-left
0x0025: 0x0025, # PERCENT SIGN, left-right
0x0026: 0x0026, # AMPERSAND, left-right
0x0026: 0x00a6, # AMPERSAND, right-left
0x0027: 0x0027, # APOSTROPHE, left-right
0x0027: 0x00a7, # APOSTROPHE, right-left
0x0028: 0x0028, # LEFT PARENTHESIS, left-right
0x0028: 0x00a8, # LEFT PARENTHESIS, right-left
0x0029: 0x0029, # RIGHT PARENTHESIS, left-right
0x0029: 0x00a9, # RIGHT PARENTHESIS, right-left
0x002a: 0x002a, # ASTERISK, left-right
0x002a: 0x00aa, # ASTERISK, right-left
0x002b: 0x002b, # PLUS SIGN, left-right
0x002b: 0x00ab, # PLUS SIGN, right-left
0x002c: 0x002c, # COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
0x002d: 0x002d, # HYPHEN-MINUS, left-right
0x002d: 0x00ad, # HYPHEN-MINUS, right-left
0x002e: 0x002e, # FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
0x002e: 0x00ae, # FULL STOP, right-left
0x002f: 0x002f, # SOLIDUS, left-right
0x002f: 0x00af, # SOLIDUS, right-left
0x0030: 0x0030, # DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
0x0032: 0x0032, # DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
0x0033: 0x0033, # DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
0x003a: 0x003a, # COLON, left-right
0x003a: 0x00ba, # COLON, right-left
0x003b: 0x003b, # SEMICOLON, left-right
0x003c: 0x003c, # LESS-THAN SIGN, left-right
0x003c: 0x00bc, # LESS-THAN SIGN, right-left
0x003d: 0x003d, # EQUALS SIGN, left-right
0x003d: 0x00bd, # EQUALS SIGN, right-left
0x003e: 0x003e, # GREATER-THAN SIGN, left-right
0x003e: 0x00be, # GREATER-THAN SIGN, right-left
0x003f: 0x003f, # QUESTION MARK, left-right
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET, left-right
0x005b: 0x00db, # LEFT SQUARE BRACKET, right-left
0x005c: 0x005c, # REVERSE SOLIDUS, left-right
0x005c: 0x00dc, # REVERSE SOLIDUS, right-left
0x005d: 0x005d, # RIGHT SQUARE BRACKET, left-right
0x005d: 0x00dd, # RIGHT SQUARE BRACKET, right-left
0x005e: 0x005e, # CIRCUMFLEX ACCENT, left-right
0x005e: 0x00de, # CIRCUMFLEX ACCENT, right-left
0x005f: 0x005f, # LOW LINE, left-right
0x005f: 0x00df, # LOW LINE, right-left
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET, left-right
0x007b: 0x00fb, # LEFT CURLY BRACKET, right-left
0x007c: 0x007c, # VERTICAL LINE, left-right
0x007c: 0x00fc, # VERTICAL LINE, right-left
0x007d: 0x007d, # RIGHT CURLY BRACKET, left-right
0x007d: 0x00fd, # RIGHT CURLY BRACKET, right-left
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # CONTROL CHARACTER
0x00a0: 0x0081, # NO-BREAK SPACE, right-left
0x00ab: 0x008c, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x00bb: 0x0098, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
0x00c4: 0x0080, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0082, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0083, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x0084, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0085, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x0086, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00e0: 0x0088, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x0087, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0089, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x008a, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x008d, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008f, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x008e, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0090, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0091, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x0092, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x0094, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x0095, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x0096, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x0097, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0099, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x009a, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x009b, # DIVISION SIGN, right-left
0x00f9: 0x009d, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x009c, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x009e, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x009f, # LATIN SMALL LETTER U WITH DIAERESIS
0x060c: 0x00ac, # ARABIC COMMA
0x061b: 0x00bb, # ARABIC SEMICOLON
0x061f: 0x00bf, # ARABIC QUESTION MARK
0x0621: 0x00c1, # ARABIC LETTER HAMZA
0x0622: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x0623: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x0624: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x0625: 0x00c5, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x0626: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x0627: 0x00c7, # ARABIC LETTER ALEF
0x0628: 0x00c8, # ARABIC LETTER BEH
0x0629: 0x00c9, # ARABIC LETTER TEH MARBUTA
0x062a: 0x00ca, # ARABIC LETTER TEH
0x062b: 0x00cb, # ARABIC LETTER THEH
0x062c: 0x00cc, # ARABIC LETTER JEEM
0x062d: 0x00cd, # ARABIC LETTER HAH
0x062e: 0x00ce, # ARABIC LETTER KHAH
0x062f: 0x00cf, # ARABIC LETTER DAL
0x0630: 0x00d0, # ARABIC LETTER THAL
0x0631: 0x00d1, # ARABIC LETTER REH
0x0632: 0x00d2, # ARABIC LETTER ZAIN
0x0633: 0x00d3, # ARABIC LETTER SEEN
0x0634: 0x00d4, # ARABIC LETTER SHEEN
0x0635: 0x00d5, # ARABIC LETTER SAD
0x0636: 0x00d6, # ARABIC LETTER DAD
0x0637: 0x00d7, # ARABIC LETTER TAH
0x0638: 0x00d8, # ARABIC LETTER ZAH
0x0639: 0x00d9, # ARABIC LETTER AIN
0x063a: 0x00da, # ARABIC LETTER GHAIN
0x0640: 0x00e0, # ARABIC TATWEEL
0x0641: 0x00e1, # ARABIC LETTER FEH
0x0642: 0x00e2, # ARABIC LETTER QAF
0x0643: 0x00e3, # ARABIC LETTER KAF
0x0644: 0x00e4, # ARABIC LETTER LAM
0x0645: 0x00e5, # ARABIC LETTER MEEM
0x0646: 0x00e6, # ARABIC LETTER NOON
0x0647: 0x00e7, # ARABIC LETTER HEH
0x0648: 0x00e8, # ARABIC LETTER WAW
0x0649: 0x00e9, # ARABIC LETTER ALEF MAKSURA
0x064a: 0x00ea, # ARABIC LETTER YEH
0x064b: 0x00eb, # ARABIC FATHATAN
0x064c: 0x00ec, # ARABIC DAMMATAN
0x064d: 0x00ed, # ARABIC KASRATAN
0x064e: 0x00ee, # ARABIC FATHA
0x064f: 0x00ef, # ARABIC DAMMA
0x0650: 0x00f0, # ARABIC KASRA
0x0651: 0x00f1, # ARABIC SHADDA
0x0652: 0x00f2, # ARABIC SUKUN
0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE, right-left (need override)
0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO, right-left (need override)
0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE, right-left (need override)
0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX, right-left (need override)
0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE, right-left (need override)
0x066a: 0x00a5, # ARABIC PERCENT SIGN
0x0679: 0x00f4, # ARABIC LETTER TTEH
0x067e: 0x00f3, # ARABIC LETTER PEH
0x0686: 0x00f5, # ARABIC LETTER TCHEH
0x0688: 0x00f9, # ARABIC LETTER DDAL
0x0691: 0x00fa, # ARABIC LETTER RREH
0x0698: 0x00fe, # ARABIC LETTER JEH
0x06a4: 0x00f7, # ARABIC LETTER VEH
0x06af: 0x00f8, # ARABIC LETTER GAF
0x06ba: 0x008b, # ARABIC LETTER NOON GHUNNA
0x06d2: 0x00ff, # ARABIC LETTER YEH BARREE
0x06d5: 0x00f6, # ARABIC LETTER AE
0x2026: 0x0093, # HORIZONTAL ELLIPSIS, right-left
0x274a: 0x00c0, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
}
| apache-2.0 | 6,561,660,733,045,871,000 | 2,138,992,958,955,448,000 | 51.611748 | 122 | 0.611225 | false |
thomasrogers03/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/config/messages.py | 119 | 1677 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# These must be in sync with webkit-patch's AbstractQueue.
pass_status = "Pass"
fail_status = "Fail"
retry_status = "Retry"
error_status = "Error"
| bsd-3-clause | -4,463,235,067,037,396,000 | -7,047,661,985,475,286,000 | 49.818182 | 72 | 0.776983 | false |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/distutils/emxccompiler.py | 250 | 11931 | """distutils.emxccompiler
Provides the EMXCCompiler class, a subclass of UnixCCompiler that
handles the EMX port of the GNU C compiler to OS/2.
"""
# issues:
#
# * OS/2 insists that DLLs can have names no longer than 8 characters
# We put export_symbols in a def-file, as though the DLL can have
# an arbitrary length name, but truncate the output filename.
#
# * only use OMF objects and use LINK386 as the linker (-Zomf)
#
# * always build for multithreading (-Zmt) as the accompanying OS/2 port
# of Python is only distributed with threads enabled.
#
# tested configurations:
#
# * EMX gcc 2.81/EMX 0.9d fix03
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
class EMXCCompiler (UnixCCompiler):
compiler_type = 'emx'
obj_extension = ".obj"
static_lib_extension = ".lib"
shared_lib_extension = ".dll"
static_lib_format = "%s%s"
shared_lib_format = "%s%s"
res_extension = ".res" # compiled resource file
exe_extension = ".exe"
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. " +
("Reason: %s." % details) +
"Compiling may fail because of undefined preprocessor macros.")
(self.gcc_version, self.ld_version) = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s\n" %
(self.gcc_version,
self.ld_version) )
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
compiler_so='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
linker_exe='gcc -Zomf -Zmt -Zcrtdll',
linker_so='gcc -Zomf -Zmt -Zcrtdll -Zdll')
# want the gcc library statically linked (so that we don't have
# to distribute a version dependent on the compiler we have)
self.dll_libraries=["gcc"]
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc':
# gcc requires '.rc' compiled to binary ('.res') files !!!
try:
self.spawn(["rc", "-r", src])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE)):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
# Generate .def file
contents = [
"LIBRARY %s INITINSTANCE TERMINSTANCE" % \
os.path.splitext(os.path.basename(output_filename))[0],
"DATA MULTIPLE NONSHARED",
"EXPORTS"]
for sym in export_symbols:
contents.append(' "%s"' % sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# for gcc/ld the def-file is specified as any other object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# override the object_filenames method from CCompiler to
# support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# override the find_library_file method from UnixCCompiler
# to deal with file naming/searching differences
def find_library_file(self, dirs, lib, debug=0):
shortlib = '%s.lib' % lib
longlib = 'lib%s.lib' % lib # this form very rare
# get EMX's default library directory search path
try:
emx_dirs = os.environ['LIBRARY_PATH'].split(';')
except KeyError:
emx_dirs = []
for dir in dirs + emx_dirs:
shortlibp = os.path.join(dir, shortlib)
longlibp = os.path.join(dir, longlib)
if os.path.exists(shortlibp):
return shortlibp
elif os.path.exists(longlibp):
return longlibp
# Oops, didn't find it in *any* of 'dirs'
return None
# class EMXCCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc and ld.
If not possible it returns None for it.
"""
from distutils.version import StrictVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
try:
out_string = out.read()
finally:
out.close()
result = re.search('(\d+\.\d+\.\d+)',out_string)
if result:
gcc_version = StrictVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
# EMX ld has no way of reporting version number, and we use GCC
# anyway - so we can link OMF DLLs
ld_version = None
return (gcc_version, ld_version)
| gpl-2.0 | 7,356,822,141,171,753,000 | -5,459,411,481,052,266,000 | 36.401254 | 97 | 0.558377 | false |
arenadata/ambari | ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py | 2 | 5041 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.exceptions import Fail
from resource_management.core.source import InlineTemplate, Template
from resource_management.core.resources.system import Directory, File
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import solr_cloud_util
def setup_infra_solr(name = None):
import params
if name == 'server':
Directory([params.infra_solr_log_dir, params.infra_solr_piddir,
params.infra_solr_datadir, params.infra_solr_data_resources_dir],
mode=0755,
cd_access='a',
create_parents=True,
owner=params.infra_solr_user,
group=params.user_group
)
Directory([params.solr_dir, params.infra_solr_conf],
mode=0755,
cd_access='a',
owner=params.infra_solr_user,
group=params.user_group,
create_parents=True,
recursive_ownership=True
)
File(params.infra_solr_log,
mode=0644,
owner=params.infra_solr_user,
group=params.user_group,
content=''
)
File(format("{infra_solr_conf}/infra-solr-env.sh"),
content=InlineTemplate(params.solr_env_content),
mode=0755,
owner=params.infra_solr_user,
group=params.user_group
)
File(format("{infra_solr_datadir}/solr.xml"),
content=InlineTemplate(params.solr_xml_content),
owner=params.infra_solr_user,
group=params.user_group
)
File(format("{infra_solr_conf}/log4j.properties"),
content=InlineTemplate(params.solr_log4j_content),
owner=params.infra_solr_user,
group=params.user_group
)
custom_security_json_location = format("{infra_solr_conf}/custom-security.json")
File(custom_security_json_location,
content=InlineTemplate(params.infra_solr_security_json_content),
owner=params.infra_solr_user,
group=params.user_group,
mode=0640
)
jaas_file = params.infra_solr_jaas_file if params.security_enabled else None
url_scheme = 'https' if params.infra_solr_ssl_enabled else 'http'
create_ambari_solr_znode()
security_json_file_location = custom_security_json_location \
if params.infra_solr_security_json_content and str(params.infra_solr_security_json_content).strip() \
else format("{infra_solr_conf}/security.json") # security.json file to upload
if params.security_enabled:
File(format("{infra_solr_jaas_file}"),
content=Template("infra_solr_jaas.conf.j2"),
owner=params.infra_solr_user)
File(format("{infra_solr_conf}/security.json"),
content=Template("infra-solr-security.json.j2"),
owner=params.infra_solr_user,
group=params.user_group,
mode=0640)
solr_cloud_util.set_cluster_prop(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
java64_home=params.java64_home,
prop_name="urlScheme",
prop_value=url_scheme,
jaas_file=jaas_file
)
solr_cloud_util.setup_kerberos_plugin(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
jaas_file=jaas_file,
java64_home=params.java64_home,
secure=params.security_enabled,
security_json_location=security_json_file_location
)
if params.security_enabled:
solr_cloud_util.secure_solr_znode(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
jaas_file=jaas_file,
java64_home=params.java64_home,
sasl_users_str=params.infra_solr_sasl_user
)
elif name == 'client':
solr_cloud_util.setup_solr_client(params.config)
else :
raise Fail('Nor client or server were selected to install.')
@retry(times=30, sleep_time=5, err_class=Fail)
def create_ambari_solr_znode():
import params
solr_cloud_util.create_znode(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
java64_home=params.java64_home,
retry=30, interval=5) | apache-2.0 | -581,820,209,801,189,100 | -2,880,414,495,708,933,600 | 34.013889 | 107 | 0.676255 | false |
alqfahad/odoo | addons/fetchmail/__init__.py | 437 | 1120 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fetchmail
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,563,658,449,658,689,500 | 7,184,539,857,826,001,000 | 42.076923 | 80 | 0.624107 | false |
MobinRanjbar/hue | desktop/core/ext-py/South-1.0.2/south/introspection_plugins/geodjango.py | 153 | 1286 | """
GeoDjango introspection rules
"""
import django
from django.conf import settings
from south.modelsinspector import add_introspection_rules
has_gis = "django.contrib.gis" in settings.INSTALLED_APPS
if has_gis:
# Alright,import the field
from django.contrib.gis.db.models.fields import GeometryField
# Make some introspection rules
if django.VERSION[0] == 1 and django.VERSION[1] >= 1:
# Django 1.1's gis module renamed these.
rules = [
(
(GeometryField, ),
[],
{
"srid": ["srid", {"default": 4326}],
"spatial_index": ["spatial_index", {"default": True}],
"dim": ["dim", {"default": 2}],
"geography": ["geography", {"default": False}],
},
),
]
else:
rules = [
(
(GeometryField, ),
[],
{
"srid": ["_srid", {"default": 4326}],
"spatial_index": ["_index", {"default": True}],
"dim": ["_dim", {"default": 2}],
},
),
]
# Install them
add_introspection_rules(rules, ["^django\.contrib\.gis"]) | apache-2.0 | -7,300,738,519,964,498,000 | 148,479,085,157,115,260 | 27.6 | 74 | 0.457232 | false |
rcatwood/Savu | savu/plugins/loaders/image_loader.py | 1 | 3329 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: temp_loader
:platform: Unix
:synopsis: A class for loading standard tomography data in a variety of
formats.
.. moduleauthor:: Nicola Wadeson <[email protected]>
"""
import h5py
import tempfile
import numpy as np
from savu.plugins.base_loader import BaseLoader
from savu.plugins.utils import register_plugin
@register_plugin
class ImageLoader(BaseLoader):
"""
A class to load tomography data from a Nexus file
:param image_type: Type of image. Choose from 'FabIO'. Default: 'FabIO'.
:param angles: A python statement to be evaluated or a file. Default: None.
:param frame_dim: Which dimension requires stitching? Default: 0.
"""
def __init__(self, name='ImageLoader'):
super(ImageLoader, self).__init__(name)
def setup(self):
exp = self.exp
data_obj = exp.create_data_object('in_data', 'tomo')
rot = 0
detY = 1
detX = 2
data_obj.set_axis_labels('rotation_angle.degrees',
'detector_y.pixel',
'detector_x.pixel')
data_obj.add_pattern('PROJECTION', core_dir=(detX, detY),
slice_dir=(rot,))
data_obj.add_pattern('SINOGRAM', core_dir=(detX, rot),
slice_dir=(detY,))
dtype = self.parameters['image_type']
mod = __import__('savu.data.data_structures.data_type', fromlist=dtype)
clazz = getattr(mod, dtype)
path = exp.meta_data.get_meta_data("data_file")
data_obj.data = clazz(path, data_obj, [self.parameters['frame_dim']])
self.set_rotation_angles(data_obj)
# dummy file
filename = path.split('/')[-1] + '.h5'
data_obj.backing_file = \
h5py.File(tempfile.mkdtemp() + '/' + filename, 'a')
data_obj.set_shape(data_obj.data.get_shape())
self.set_data_reduction_params(data_obj)
def set_rotation_angles(self, data_obj):
angles = self.parameters['angles']
if angles is None:
angles = np.linspace(0, 180, data_obj.data.get_shape()[0])
else:
try:
exec("angles = " + angles)
except:
try:
angles = np.loadtxt(angles)
except:
raise Exception('Cannot set angles in loader.')
n_angles = len(angles)
data_angles = data_obj.data.get_shape()[0]
if data_angles is not n_angles:
raise Exception("The number of angles %s does not match the data "
"dimension length %s", n_angles, data_angles)
data_obj.meta_data.set_meta_data("rotation_angle", angles)
| gpl-3.0 | 5,046,864,489,229,754,000 | 7,959,879,910,903,683,000 | 33.319588 | 79 | 0.606789 | false |
courtarro/gnuradio | gr-channels/python/channels/__init__.py | 54 | 1350 | #
# Copyright 2012-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Blocks for channel models and related functions.
'''
import os
try:
from channels_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from channels_swig import *
# Blocks for Hardware Impairments
from amp_bal import *
from conj_fs_iqcorr import *
from distortion_2_gen import *
from distortion_3_gen import *
from iqbal_gen import *
from impairments import *
from phase_bal import *
from phase_noise_gen import *
from quantizer import *
| gpl-3.0 | -7,279,704,678,111,954,000 | -5,538,865,342,100,534,000 | 29.681818 | 70 | 0.737037 | false |
suyashphadtare/test | erpnext/hr/doctype/appraisal/appraisal.py | 1 | 2344 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class Appraisal(Document):
def validate(self):
if not self.status:
self.status = "Draft"
set_employee_name(self)
self.validate_dates()
self.validate_existing_appraisal()
self.calculate_total()
def get_employee_name(self):
self.employee_name = frappe.db.get_value("Employee", self.employee, "employee_name")
return self.employee_name
def validate_dates(self):
if getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("End Date can not be less than Start Date"))
def validate_existing_appraisal(self):
chk = frappe.db.sql("""select name from tabAppraisal where employee=%s
and (status='Submitted' or status='Completed')
and ((start_date>=%s and start_date<=%s)
or (end_date>=%s and end_date<=%s))""",
(self.employee,self.start_date,self.end_date,self.start_date,self.end_date))
if chk:
frappe.throw(_("Appraisal {0} created for Employee {1} in the given date range").format(chk[0][0], self.employee_name))
def calculate_total(self):
total, total_w = 0, 0
for d in self.get('appraisal_details'):
if d.score:
d.score_earned = flt(d.score) * flt(d.per_weightage) / 100
total = total + d.score_earned
total_w += flt(d.per_weightage)
if int(total_w) != 100:
frappe.throw(_("Total weightage assigned should be 100%. It is {0}").format(str(total_w) + "%"))
if frappe.db.get_value("Employee", self.employee, "user_id") != \
frappe.session.user and total == 0:
frappe.throw(_("Total cannot be zero"))
self.total_score = total
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
@frappe.whitelist()
def fetch_appraisal_template(source_name, target_doc=None):
target_doc = get_mapped_doc("Appraisal Template", source_name, {
"Appraisal Template": {
"doctype": "Appraisal",
},
"Appraisal Template Goal": {
"doctype": "Appraisal Goal",
}
}, target_doc)
return target_doc
| agpl-3.0 | 2,421,168,567,169,664,000 | 3,035,326,484,753,892,400 | 30.253333 | 122 | 0.697099 | false |
chubbymaggie/miasm | example/disasm/callback.py | 2 | 1866 | from miasm2.core.bin_stream import bin_stream_str
from miasm2.core.asmblock import AsmLabel, AsmConstraint, expr_is_label
from miasm2.arch.x86.disasm import dis_x86_32, cb_x86_funcs
def cb_x86_callpop(cur_bloc, symbol_pool, *args, **kwargs):
"""
1000: call 1005
1005: pop
Will give:
1000: push 1005
1005: pop
"""
# Pattern matching
if len(cur_bloc.lines) < 1:
return
## We want to match a CALL, always the last line of a basic block
last_instr = cur_bloc.lines[-1]
if last_instr.name != 'CALL':
return
## The destination must be a label
dst = last_instr.args[0]
if not expr_is_label(dst):
return
## The destination must be the next instruction
if dst.name.offset != last_instr.offset + last_instr.l:
return
# Update instruction instance
last_instr.name = 'PUSH'
# Update next blocks to process in the disassembly engine
cur_bloc.bto.clear()
cur_bloc.add_cst(dst.name.offset, AsmConstraint.c_next, symbol_pool)
# Prepare a tiny shellcode
shellcode = ''.join(["\xe8\x00\x00\x00\x00", # CALL $
"X", # POP EAX
"\xc3", # RET
])
bin_stream = bin_stream_str(shellcode)
mdis = dis_x86_32(bin_stream)
print "Without callback:\n"
blocks = mdis.dis_multibloc(0)
print "\n".join(str(block) for block in blocks)
# Enable callback
cb_x86_funcs.append(cb_x86_callpop)
## Other method:
## mdis.dis_bloc_callback = cb_x86_callpop
# Clean disassembly cache
mdis.job_done.clear()
print "=" * 40
print "With callback:\n"
blocks_after = mdis.dis_multibloc(0)
print "\n".join(str(block) for block in blocks_after)
# Ensure the callback has been called
assert blocks.heads()[0].lines[0].name == "CALL"
assert blocks_after.heads()[0].lines[0].name == "PUSH"
| gpl-2.0 | -7,606,784,549,028,681,000 | -8,588,658,183,188,905,000 | 26.850746 | 72 | 0.63612 | false |
apache/airflow | airflow/providers/google/cloud/example_dags/example_gcs_to_bigquery.py | 10 | 2303 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG using GCSToBigQueryOperator.
"""
import os
from airflow import models
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryDeleteDatasetOperator,
)
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
from airflow.utils.dates import days_ago
DATASET_NAME = os.environ.get("GCP_DATASET_NAME", 'airflow_test')
TABLE_NAME = os.environ.get("GCP_TABLE_NAME", 'gcs_to_bq_table')
dag = models.DAG(
dag_id='example_gcs_to_bigquery_operator',
start_date=days_ago(2),
schedule_interval=None,
tags=['example'],
)
create_test_dataset = BigQueryCreateEmptyDatasetOperator(
task_id='create_airflow_test_dataset', dataset_id=DATASET_NAME, dag=dag
)
# [START howto_operator_gcs_to_bigquery]
load_csv = GCSToBigQueryOperator(
task_id='gcs_to_bigquery_example',
bucket='cloud-samples-data',
source_objects=['bigquery/us-states/us-states.csv'],
destination_project_dataset_table=f"{DATASET_NAME}.{TABLE_NAME}",
schema_fields=[
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'post_abbr', 'type': 'STRING', 'mode': 'NULLABLE'},
],
write_disposition='WRITE_TRUNCATE',
dag=dag,
)
# [END howto_operator_gcs_to_bigquery]
delete_test_dataset = BigQueryDeleteDatasetOperator(
task_id='delete_airflow_test_dataset', dataset_id=DATASET_NAME, delete_contents=True, dag=dag
)
create_test_dataset >> load_csv >> delete_test_dataset
| apache-2.0 | -4,140,008,365,617,579,000 | -2,941,666,870,225,786,400 | 33.893939 | 97 | 0.736865 | false |
rabipanda/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 | -8,774,506,751,284,465,000 | 5,770,849,766,390,128,000 | 36.556863 | 80 | 0.602067 | false |
dynaryu/inasafe | safe/gui/tools/help/needs_manager_help.py | 2 | 10496 | # coding=utf-8
"""Context help for minimum needs manager dialog."""
from safe.utilities.i18n import tr
from safe import messaging as m
from safe.messaging import styles
INFO_STYLE = styles.INFO_STYLE
__author__ = 'ismailsunni'
def needs_manager_helps():
"""Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message
def heading():
"""Helper method that returns just the header.
This method was added so that the text could be reused in the
other contexts.
.. versionadded:: 3.2.2
:returns: A heading object.
:rtype: safe.messaging.heading.Heading
"""
message = m.Heading(tr('Minimum needs manager help'), **INFO_STYLE)
return message
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'During and after a disaster, providing for the basic human minimum '
'needs of food, water, hygiene and shelter is an important element of '
'your contingency plan. InaSAFE has a customisable minimum needs '
'system that allows you to define country or region specific '
'requirements for compiling a needs report where the exposure '
'layer represents population.'
)))
message.add(m.Paragraph(tr(
'By default InaSAFE uses minimum needs defined for Indonesia - '
'and ships with additional profiles for the Philippines and Tanzania. '
'You can customise these or add your own region-specific profiles too.'
)))
message.add(m.Paragraph(tr(
'Minimum needs are grouped into regional or linguistic \'profiles\'. '
'The default profile is \'BNPB_en\' - the english profile for the '
'national disaster agency in Indonesia.'
'You will see that this profile defines requirements for displaced '
'persons in terms of Rice, Drinking Water, Clean Water (for bathing '
'etc.), Family Kits (with personal hygiene items) and provision of '
'toilets.'
)))
message.add(m.Paragraph(tr(
'Each item in the profile can be customised or removed. For example '
'selecting the first item in the list and then clicking on the '
'\'pencil\' icon will show the details of how it was defined.'
'If you scroll up and down in the panel you will see that for each '
'item, you can set a name, description, units (in singular, '
'plural and abbreviated forms), specify maxima and minima for the '
'quantity of item allowed, a default and a frequency. You would use '
'the maxima and minima to ensure that disaster managers never '
'allocate amounts that will not be sufficient for human livelihood, '
'and also that will not overtax the logistics operation for those '
'providing humanitarian relief.'
)))
message.add(m.Paragraph(tr(
'The final item in the item configuration is the \'readable sentence\''
'which bears special discussion. Using a simple system of tokens you '
'can construct a sentence that will be used in the generated needs '
'report.'
)))
message.add(m.Heading(tr('Minimum needs profiles'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'A profile is a collection of resources that define the minimum needs '
'for a particular country or region. Typically a profile should be '
'based on a regional, national or international standard. The '
'actual definition of which resources are needed in a given '
'profile is dependent on the local conditions and customs for the '
'area where the contingency plan is being devised.'
)))
message.add(m.Paragraph(tr(
'For example in the middle east, rice is a staple food whereas in '
'South Africa, maize meal is a staple food and thus the contingency '
'planning should take these localised needs into account.'
)))
message.add(m.Heading(tr('Minimum needs resources'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'Each item in a minimum needs profile is a resource. Each resource '
'is described as a simple natural language sentence e.g.:'
)))
message.add(m.EmphasizedText(tr(
'Each person should be provided with 2.8 kilograms of Rice weekly.'
)))
message.add(m.Paragraph(tr(
'By clicking on a resource entry in the profile window, and then '
'clicking the black pencil icon you will be able to edit the '
'resource using the resource editor. Alternatively you can create a '
'new resource for a profile by clicking on the black + icon in '
'the profile manager. You can also remove any resource from a '
'profile using the - icon in the profile manager.')))
message.add(m.Heading(tr('Resource Editor'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'When switching to edit or add resource mode, the minimum needs '
'manager will be updated to show the resource editor. Each '
'resource is described in terms of:'
)))
bullets = m.BulletedList()
bullets.add(m.Text(
m.ImportantText(tr(
'resource name')),
tr(' - e.g. Rice')))
bullets.add(m.Text(
m.ImportantText(tr(
'a description of the resource')),
tr(' - e.g. Basic food')))
bullets.add(m.Text(
m.ImportantText(tr(
'unit in which the resource is provided')),
tr(' - e.g. kilogram')))
bullets.add(m.Text(
m.ImportantText(tr(
'pluralised form of the units')),
tr(' - e.g. kilograms')))
bullets.add(m.Text(
m.ImportantText(tr(
'abbreviation for the unit')),
tr(' - e.g. kg')))
bullets.add(m.Text(
m.ImportantText(tr(
'the default allocation for the resource')),
tr(' - e.g. 2.8. This number can be overridden on a '
'per-analysis basis')))
bullets.add(m.Text(
m.ImportantText(tr(
'minimum allowed which is used to prevent allocating')),
tr(' - e.g. no drinking water to displaced persons')))
bullets.add(m.ImportantText(tr(
'maximum allowed which is used to set a sensible upper '
'limit for the resource')))
bullets.add(m.ImportantText(tr(
'a readable sentence which is used to compile the '
'sentence describing the resource in reports.')))
message.add(bullets)
message.add(m.Paragraph(tr(
'These parameters are probably all fairly self explanatory, but '
'the readable sentence probably needs further detail. The '
'sentence is compiled using a simple keyword token replacement '
'system. The following tokens can be used:')))
bullets = m.BulletedList()
bullets.add(m.Text('{{ Default }}'))
bullets.add(m.Text('{{ Unit }}'))
bullets.add(m.Text('{{ Units }}'))
bullets.add(m.Text('{{ Unit abbreviation }}'))
bullets.add(m.Text('{{ Resource name }}'))
bullets.add(m.Text('{{ Frequency }}'))
bullets.add(m.Text('{{ Minimum allowed }}'))
bullets.add(m.Text('{{ Maximum allowed }}'))
message.add(bullets)
message.add(m.Paragraph(tr(
'When the token is placed in the sentence it will be replaced with '
'the actual value at report generation time. This contrived example '
'shows a tokenised sentence that includes all possible keywords:'
)))
message.add(m.EmphasizedText(tr(
'A displaced person should be provided with {{ %s }} '
'{{ %s }}/{{ %s }}/{{ %s }} of {{ %s }}. Though no less than {{ %s }} '
'and no more than {{ %s }}. This should be provided {{ %s }}.' % (
'Default',
'Unit',
'Units',
'Unit abbreviation',
'Resource name',
'Minimum allowed',
'Maximum allowed',
'Frequency'
)
)))
message.add(m.Paragraph(tr(
'Would generate a human readable sentence like this:')))
message.add(m.ImportantText(tr(
'A displaced person should be provided with 2.8 kilogram/kilograms/kg '
'of rice. Though no less than 0 and no more than 100. This should '
'be provided daily.'
)))
message.add(m.Paragraph(tr(
'Once you have populated the resource elements, click the Save '
'resource button to return to the profile view. You will see the '
'new resource added in the profile\'s resource list.'
)))
message.add(m.Heading(tr('Managing profiles'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'When switching to edit or add resource mode, the minimum needs '
'manager will be updated to show the resource editor. Each '
'resource is described in terms of:'
)))
message.add(m.Paragraph(tr(
'In addition to the profiles that come as standard with InaSAFE, you '
'can create new ones, either from scratch, or based on an existing '
'one (which you can then modify).'
)))
message.add(m.Paragraph(tr(
'Use the New button to create new profile. When prompted, give your '
'profile a name e.g. \'JakartaProfile\'.'
)))
message.add(m.Paragraph(tr(
'Note: The profile must be saved in your home directory under '
'.qgis2/minimum_needs in order for InaSAFE to successfully detect it.'
)))
message.add(m.Paragraph(tr(
'An alternative way to create a new profile is to use the Save as to '
'clone an existing profile. The clone profile can then be edited '
'according to your specific needs.'
)))
message.add(m.Heading(tr('Active profile'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'It is important to note, that which ever profile you select in the '
'Profile pick list, will be considered active and will be used as '
'the basis for all minimum needs analysis. You need to restart '
'QGIS before the changed profile become active.'
)))
return message
| gpl-3.0 | 2,377,081,921,634,443,300 | -4,610,287,854,190,074,400 | 40 | 79 | 0.639672 | false |
yjxtogo/horizon | horizon/utils/validators.py | 32 | 2470 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.core.exceptions import ValidationError # noqa
from django.core import validators # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import conf
def validate_port_range(port):
if port not in range(-1, 65536):
raise ValidationError(_("Not a valid port number"))
def validate_ip_protocol(ip_proto):
if ip_proto not in range(-1, 256):
raise ValidationError(_("Not a valid IP protocol number"))
def password_validator():
return conf.HORIZON_CONFIG["password_validator"]["regex"]
def password_validator_msg():
return conf.HORIZON_CONFIG["password_validator"]["help_text"]
def validate_port_or_colon_separated_port_range(port_range):
"""Accepts a port number or a single-colon separated range."""
if port_range.count(':') > 1:
raise ValidationError(_("One colon allowed in port range"))
ports = port_range.split(':')
for port in ports:
try:
if int(port) not in range(-1, 65536):
raise ValidationError(_("Not a valid port number"))
except ValueError:
raise ValidationError(_("Port number must be integer"))
def validate_metadata(value):
error_msg = _('Invalid metadata entry. Use comma-separated'
' key=value pairs')
if value:
specs = value.split(",")
for spec in specs:
keyval = spec.split("=")
# ensure both sides of "=" exist, but allow blank value
if not len(keyval) == 2 or not keyval[0]:
raise ValidationError(error_msg)
# Same as POSIX [:print:]. Accordingly, diacritics are disallowed.
PRINT_REGEX = re.compile(r'^[\x20-\x7E]*$')
validate_printable_ascii = validators.RegexValidator(
PRINT_REGEX,
_("The string may only contain ASCII printable characters."),
"invalid_characters")
| apache-2.0 | 7,337,788,864,952,785,000 | 5,921,145,211,204,500,000 | 32.835616 | 78 | 0.669636 | false |
mm1ke/portage | pym/_emerge/resolver/circular_dependency.py | 8 | 9294 | # Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function, unicode_literals
from itertools import chain, product
import logging
from portage.dep import use_reduce, extract_affecting_use, check_required_use, get_required_use_flags
from portage.exception import InvalidDependString
from portage.output import colorize
from portage.util import writemsg_level
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.Package import Package
class circular_dependency_handler(object):
MAX_AFFECTING_USE = 10
def __init__(self, depgraph, graph):
self.depgraph = depgraph
self.graph = graph
self.all_parent_atoms = depgraph._dynamic_config._parent_atoms
if "--debug" in depgraph._frozen_config.myopts:
# Show this debug output before doing the calculations
# that follow, so at least we have this debug info
# if we happen to hit a bug later.
writemsg_level("\n\ncircular dependency graph:\n\n",
level=logging.DEBUG, noiselevel=-1)
self.debug_print()
self.cycles, self.shortest_cycle = self._find_cycles()
#Guess if it is a large cluster of cycles. This usually requires
#a global USE change.
self.large_cycle_count = len(self.cycles) > 3
self.merge_list = self._prepare_reduced_merge_list()
#The digraph dump
self.circular_dep_message = self._prepare_circular_dep_message()
#Suggestions, in machine and human readable form
self.solutions, self.suggestions = self._find_suggestions()
def _find_cycles(self):
shortest_cycle = None
cycles = self.graph.get_cycles(ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
for cycle in cycles:
if not shortest_cycle or len(cycle) < len(shortest_cycle):
shortest_cycle = cycle
return cycles, shortest_cycle
def _prepare_reduced_merge_list(self):
"""
Create a merge to be displayed by depgraph.display().
This merge list contains only packages involved in
the circular deps.
"""
display_order = []
tempgraph = self.graph.copy()
while tempgraph:
nodes = tempgraph.leaf_nodes()
if not nodes:
node = tempgraph.order[0]
else:
node = nodes[0]
display_order.append(node)
tempgraph.remove(node)
return tuple(display_order)
def _prepare_circular_dep_message(self):
"""
Like digraph.debug_print(), but prints only the shortest cycle.
"""
if not self.shortest_cycle:
return None
msg = []
indent = ""
for pos, pkg in enumerate(self.shortest_cycle):
parent = self.shortest_cycle[pos-1]
priorities = self.graph.nodes[parent][0][pkg]
if pos > 0:
msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
else:
msg.append(indent + "%s depends on" % pkg)
indent += " "
pkg = self.shortest_cycle[0]
parent = self.shortest_cycle[-1]
priorities = self.graph.nodes[parent][0][pkg]
msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
return "\n".join(msg)
def _get_use_mask_and_force(self, pkg):
return pkg.use.mask, pkg.use.force
def _get_autounmask_changes(self, pkg):
needed_use_config_change = self.depgraph._dynamic_config._needed_use_config_changes.get(pkg)
if needed_use_config_change is None:
return frozenset()
use, changes = needed_use_config_change
return frozenset(changes.keys())
def _find_suggestions(self):
if not self.shortest_cycle:
return None, None
suggestions = []
final_solutions = {}
for pos, pkg in enumerate(self.shortest_cycle):
parent = self.shortest_cycle[pos-1]
priorities = self.graph.nodes[parent][0][pkg]
parent_atoms = self.all_parent_atoms.get(pkg)
if priorities[-1].buildtime:
dep = " ".join(parent._metadata[k]
for k in Package._buildtime_keys)
elif priorities[-1].runtime:
dep = parent._metadata["RDEPEND"]
for ppkg, atom in parent_atoms:
if ppkg == parent:
changed_parent = ppkg
parent_atom = atom.unevaluated_atom
break
try:
affecting_use = extract_affecting_use(dep, parent_atom,
eapi=parent.eapi)
except InvalidDependString:
if not parent.installed:
raise
affecting_use = set()
# Make sure we don't want to change a flag that is
# a) in use.mask or use.force
# b) changed by autounmask
usemask, useforce = self._get_use_mask_and_force(parent)
autounmask_changes = self._get_autounmask_changes(parent)
untouchable_flags = frozenset(chain(usemask, useforce, autounmask_changes))
affecting_use.difference_update(untouchable_flags)
#If any of the flags we're going to touch is in REQUIRED_USE, add all
#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
required_use_flags = get_required_use_flags(
parent._metadata.get("REQUIRED_USE", ""),
eapi=parent.eapi)
if affecting_use.intersection(required_use_flags):
# TODO: Find out exactly which REQUIRED_USE flags are
# entangled with affecting_use. We have to limit the
# number of flags since the number of loops is
# exponentially related (see bug #374397).
total_flags = set()
total_flags.update(affecting_use, required_use_flags)
total_flags.difference_update(untouchable_flags)
if len(total_flags) <= self.MAX_AFFECTING_USE:
affecting_use = total_flags
affecting_use = tuple(affecting_use)
if not affecting_use:
continue
if len(affecting_use) > self.MAX_AFFECTING_USE:
# Limit the number of combinations explored (bug #555698).
# First, discard irrelevent flags that are not enabled.
# Since extract_affecting_use doesn't distinguish between
# positive and negative effects (flag? vs. !flag?), assume
# a positive relationship.
current_use = self.depgraph._pkg_use_enabled(parent)
affecting_use = tuple(flag for flag in affecting_use
if flag in current_use)
if len(affecting_use) > self.MAX_AFFECTING_USE:
# There are too many USE combinations to explore in
# a reasonable amount of time.
continue
#We iterate over all possible settings of these use flags and gather
#a set of possible changes
#TODO: Use the information encoded in REQUIRED_USE
solutions = set()
for use_state in product(("disabled", "enabled"),
repeat=len(affecting_use)):
current_use = set(self.depgraph._pkg_use_enabled(parent))
for flag, state in zip(affecting_use, use_state):
if state == "enabled":
current_use.add(flag)
else:
current_use.discard(flag)
try:
reduced_dep = use_reduce(dep,
uselist=current_use, flat=True)
except InvalidDependString:
if not parent.installed:
raise
reduced_dep = None
if reduced_dep is not None and \
parent_atom not in reduced_dep:
#We found an assignment that removes the atom from 'dep'.
#Make sure it doesn't conflict with REQUIRED_USE.
required_use = parent._metadata.get("REQUIRED_USE", "")
if check_required_use(required_use, current_use,
parent.iuse.is_valid_flag,
eapi=parent.eapi):
use = self.depgraph._pkg_use_enabled(parent)
solution = set()
for flag, state in zip(affecting_use, use_state):
if state == "enabled" and \
flag not in use:
solution.add((flag, True))
elif state == "disabled" and \
flag in use:
solution.add((flag, False))
solutions.add(frozenset(solution))
for solution in solutions:
ignore_solution = False
for other_solution in solutions:
if solution is other_solution:
continue
if solution.issuperset(other_solution):
ignore_solution = True
if ignore_solution:
continue
#Check if a USE change conflicts with use requirements of the parents.
#If a requiremnet is hard, ignore the suggestion.
#If the requirment is conditional, warn the user that other changes might be needed.
followup_change = False
parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(changed_parent)
for ppkg, atom in parent_parent_atoms:
atom = atom.unevaluated_atom
if not atom.use:
continue
for flag, state in solution:
if flag in atom.use.enabled or flag in atom.use.disabled:
ignore_solution = True
break
elif atom.use.conditional:
for flags in atom.use.conditional.values():
if flag in flags:
followup_change = True
break
if ignore_solution:
break
if ignore_solution:
continue
changes = []
for flag, state in solution:
if state:
changes.append(colorize("red", "+"+flag))
else:
changes.append(colorize("blue", "-"+flag))
msg = "- %s (Change USE: %s)\n" \
% (parent.cpv, " ".join(changes))
if followup_change:
msg += " (This change might require USE changes on parent packages.)"
suggestions.append(msg)
final_solutions.setdefault(pkg, set()).add(solution)
return final_solutions, suggestions
def debug_print(self):
"""
Create a copy of the digraph, prune all root nodes,
and call the debug_print() method.
"""
graph = self.graph.copy()
while True:
root_nodes = graph.root_nodes(
ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
if not root_nodes:
break
graph.difference_update(root_nodes)
graph.debug_print()
| gpl-2.0 | 7,858,802,498,861,789,000 | -754,443,383,066,382,300 | 31.15917 | 101 | 0.688401 | false |
kou/zulip | scripts/lib/check_rabbitmq_queue.py | 3 | 6873 | import json
import os
import re
import subprocess
import time
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
normal_queues = [
'deferred_work',
'digest_emails',
'email_mirror',
'embed_links',
'embedded_bots',
'error_reports',
'invites',
'email_senders',
'missedmessage_emails',
'missedmessage_mobile_notifications',
'outgoing_webhooks',
'signups',
'user_activity',
'user_activity_interval',
'user_presence',
]
OK = 0
WARNING = 1
CRITICAL = 2
UNKNOWN = 3
states = {
0: "OK",
1: "WARNING",
2: "CRITICAL",
3: "UNKNOWN",
}
MAX_SECONDS_TO_CLEAR: DefaultDict[str, int] = defaultdict(
lambda: 30,
digest_emails=1200,
missedmessage_mobile_notifications=120,
embed_links=60,
)
CRITICAL_SECONDS_TO_CLEAR: DefaultDict[str, int] = defaultdict(
lambda: 60,
missedmessage_mobile_notifications=180,
digest_emails=1800,
embed_links=90,
)
def analyze_queue_stats(queue_name: str, stats: Dict[str, Any],
queue_count_rabbitmqctl: int) -> Dict[str, Any]:
now = int(time.time())
if stats == {}:
return dict(status=UNKNOWN,
name=queue_name,
message='invalid or no stats data')
if now - stats['update_time'] > 180 and queue_count_rabbitmqctl > 10:
# Queue isn't updating the stats file and has some events in
# the backlog, it's likely stuck.
#
# TODO: There's an unlikely race condition here - if the queue
# was fully emptied and was idle due to no new events coming
# for over 180 seconds, suddenly gets a burst of events and
# this code runs exactly in the very small time window between
# those events popping up and the queue beginning to process
# the first one (which will refresh the stats file at the very
# start), we'll incorrectly return the CRITICAL status. The
# chance of that happening should be negligible because the queue
# worker should wake up immediately and log statistics before
# starting to process the first event.
return dict(status=CRITICAL,
name=queue_name,
message='queue appears to be stuck, last update {}, queue size {}'.format(
stats['update_time'], queue_count_rabbitmqctl))
current_size = queue_count_rabbitmqctl
average_consume_time = stats['recent_average_consume_time']
if average_consume_time is None:
# Queue just started; we can't effectively estimate anything.
#
# If the queue is stuck in this state and not processing
# anything, eventually the `update_time` rule above will fire.
return dict(status=OK,
name=queue_name,
message='')
expected_time_to_clear_backlog = current_size * average_consume_time
if expected_time_to_clear_backlog > MAX_SECONDS_TO_CLEAR[queue_name]:
if expected_time_to_clear_backlog > CRITICAL_SECONDS_TO_CLEAR[queue_name]:
status = CRITICAL
else:
status = WARNING
return dict(status=status,
name=queue_name,
message=f'clearing the backlog will take too long: {expected_time_to_clear_backlog}s, size: {current_size}')
return dict(status=OK,
name=queue_name,
message='')
WARN_COUNT_THRESHOLD_DEFAULT = 10
CRITICAL_COUNT_THRESHOLD_DEFAULT = 50
def check_other_queues(queue_counts_dict: Dict[str, int]) -> List[Dict[str, Any]]:
""" Do a simple queue size check for queues whose workers don't publish stats files."""
results = []
for queue, count in queue_counts_dict.items():
if queue in normal_queues:
continue
if count > CRITICAL_COUNT_THRESHOLD_DEFAULT:
results.append(dict(status=CRITICAL, name=queue,
message=f'count critical: {count}'))
elif count > WARN_COUNT_THRESHOLD_DEFAULT:
results.append(dict(status=WARNING, name=queue,
message=f'count warning: {count}'))
else:
results.append(dict(status=OK, name=queue, message=''))
return results
def check_rabbitmq_queues() -> None:
pattern = re.compile(r'(\w+)\t(\d+)\t(\d+)')
if 'USER' in os.environ and not os.environ['USER'] in ['root', 'rabbitmq']:
print("This script must be run as the root or rabbitmq user")
list_queues_output = subprocess.check_output(['/usr/sbin/rabbitmqctl', 'list_queues',
'name', 'messages', 'consumers'],
universal_newlines=True)
queue_counts_rabbitmqctl = {}
queues_with_consumers = []
for line in list_queues_output.split("\n"):
line = line.strip()
m = pattern.match(line)
if m:
queue = m.group(1)
count = int(m.group(2))
consumers = int(m.group(3))
queue_counts_rabbitmqctl[queue] = count
if consumers > 0 and not queue.startswith("notify_tornado"):
queues_with_consumers.append(queue)
queue_stats_dir = subprocess.check_output([os.path.join(ZULIP_PATH, 'scripts/get-django-setting'),
'QUEUE_STATS_DIR'],
universal_newlines=True).strip()
queue_stats: Dict[str, Dict[str, Any]] = {}
queues_to_check = set(normal_queues).intersection(set(queues_with_consumers))
for queue in queues_to_check:
fn = queue + ".stats"
file_path = os.path.join(queue_stats_dir, fn)
if not os.path.exists(file_path):
queue_stats[queue] = {}
continue
with open(file_path) as f:
try:
queue_stats[queue] = json.load(f)
except json.decoder.JSONDecodeError:
queue_stats[queue] = {}
results = []
for queue_name, stats in queue_stats.items():
results.append(analyze_queue_stats(queue_name, stats, queue_counts_rabbitmqctl[queue_name]))
results.extend(check_other_queues(queue_counts_rabbitmqctl))
status = max(result['status'] for result in results)
now = int(time.time())
if status > 0:
queue_error_template = "queue {} problem: {}:{}"
error_message = '; '.join(
queue_error_template.format(result['name'], states[result['status']], result['message'])
for result in results if result['status'] > 0
)
print(f"{now}|{status}|{states[status]}|{error_message}")
else:
print(f"{now}|{status}|{states[status]}|queues normal")
| apache-2.0 | -5,226,767,660,429,202,000 | 3,912,488,566,632,752,000 | 36.151351 | 128 | 0.598138 | false |
iansf/engine | build/find_isolated_tests.py | 142 | 2261 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Scans build output directory for .isolated files, calculates their SHA1
hashes, stores final list in JSON document and then removes *.isolated files
found (to ensure no stale *.isolated stay around on the next build).
Used to figure out what tests were build in isolated mode to trigger these
tests to run on swarming.
For more info see:
https://sites.google.com/a/chromium.org/dev/developers/testing/isolated-testing
"""
import glob
import hashlib
import json
import optparse
import os
import re
import sys
def hash_file(filepath):
"""Calculates the hash of a file without reading it all in memory at once."""
digest = hashlib.sha1()
with open(filepath, 'rb') as f:
while True:
chunk = f.read(1024*1024)
if not chunk:
break
digest.update(chunk)
return digest.hexdigest()
def main():
parser = optparse.OptionParser(
usage='%prog --build-dir <path> --output-json <path>',
description=sys.modules[__name__].__doc__)
parser.add_option(
'--build-dir',
help='Path to a directory to search for *.isolated files.')
parser.add_option(
'--output-json',
help='File to dump JSON results into.')
options, _ = parser.parse_args()
if not options.build_dir:
parser.error('--build-dir option is required')
if not options.output_json:
parser.error('--output-json option is required')
result = {}
# Get the file hash values and output the pair.
pattern = os.path.join(options.build_dir, '*.isolated')
for filepath in sorted(glob.glob(pattern)):
test_name = os.path.splitext(os.path.basename(filepath))[0]
if re.match(r'^.+?\.\d$', test_name):
# It's a split .isolated file, e.g. foo.0.isolated. Ignore these.
continue
# TODO(csharp): Remove deletion once the isolate tracked dependencies are
# inputs for the isolated files.
sha1_hash = hash_file(filepath)
os.remove(filepath)
result[test_name] = sha1_hash
with open(options.output_json, 'wb') as f:
json.dump(result, f)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 4,122,522,741,819,551,000 | -1,454,864,437,499,502,600 | 27.987179 | 79 | 0.684653 | false |
kryptxy/torrench | torrench/utilities/Config.py | 1 | 4151 | """ Config module."""
import logging
import os
from configparser import SafeConfigParser
from torrench.utilities.Common import Common
class Config(Common):
r"""
Config class.
This class checks for config file's presence.
Also, this class manages TPB/KAT proxies; That is,
obtains TPB/KAT URL and fetches proxies thorugh those URL.
Proxies are stored as list and returned.
By default, Config files is checked in $XDG_CONFIG_HOME/torrench/ and
fallback to $HOME/.config/torrench/ directory (linux)
For windows, default location is ~\.config\torrench
This class inherits Common class.
"""
def __init__(self):
"""Initialisations."""
Common.__init__(self)
self.config = SafeConfigParser()
self.config_dir = os.getenv('XDG_CONFIG_HOME', os.path.expanduser(os.path.join('~', '.config')))
self.full_config_dir = os.path.join(self.config_dir, 'torrench')
self.config_file_name = "config.ini"
self.config_file_name_new = "config.ini.new"
self.config_file = os.path.join(self.full_config_dir, self.config_file_name)
self.config_file_new = os.path.join(self.full_config_dir, self.config_file_name_new)
self.url = None
self.name = None
self.urllist = []
self.logger = logging.getLogger('log1')
def file_exists(self):
"""To check whether config.ini file exists and is enabled or not."""
if os.path.isfile(self.config_file):
self.config.read(self.config_file)
enable = self.config.get('Torrench-Config', 'enable')
if enable == '1':
self.logger.debug("Config file exists and enabled!")
return True
def update_file(self):
try:
# Get updated copy of config.ini file.
self.logger.debug("Downloading new config.ini file")
url = "https://pastebin.com/raw/reymRHSL"
self.logger.debug("Download complete. Saving file..")
soup = self.http_request(url)
res = soup.p.get_text()
with open(self.config_file, 'w', encoding="utf-8") as f:
f.write(res)
self.logger.debug("Saved new file as {}".format(self.config_file))
# Read file and set enable = 1
self.config.read(self.config_file)
self.logger.debug("Now enabling file")
self.config.set('Torrench-Config', 'enable', '1')
# Write changes to config.ini file (self.config_file)
with open(self.config_file, 'w', encoding="utf-8") as configfile:
self.config.write(configfile)
self.logger.debug("File enabled successfull and saved.")
print("Config file updated!")
self.logger.debug("Config file updated successfully.")
except Exception as e:
print("Something went wrong. See logs for details.")
self.logger.debug("Something gone wrong while updating config file.")
self.logger.exception(e)
# To get proxies for KAT/TPB/...
def get_proxies(self, name):
"""
Get Proxies.
Proxies are read from config.ini file.
"""
self.logger.debug("getting proxies for '%s'" % (name))
temp = []
self.config.read(self.config_file)
name = '{}_URL'.format(name.upper())
self.url = self.config.get('Torrench-Config', name)
self.urllist = self.url.split()
if name == 'TPB_URL':
soup = self.http_request(self.urllist[-1])
link = soup.find_all('td', class_='site')
del self.urllist[-1]
for i in link:
temp.append(i.a["href"])
self.urllist.extend(temp)
elif name == "1337X_URL":
soup = self.http_request(self.urllist[-1])
link = soup.findAll('td', class_='text-left')
del self.urllist[-1]
for i in link:
temp.append(i.a["href"])
self.urllist.extend(temp)
self.logger.debug("got %d proxies!" % (len(self.urllist)))
return self.urllist
| gpl-3.0 | -8,780,640,910,296,494,000 | 5,419,518,951,941,712,000 | 38.160377 | 104 | 0.58781 | false |
joshuaduffy/selenium | py/test/selenium/webdriver/common/children_finding_tests.py | 3 | 10379 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
class ChildrenFindingTests(unittest.TestCase):
def test_should_find_element_by_xpath(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
child = element.find_element_by_xpath("select")
self.assertEqual(child.get_attribute("id"), "2")
def test_should_not_find_element_by_xpath(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element_by_xpath("select/x")
self.fail("Expected NoSuchElementException to have been thrown")
except NoSuchElementException as e:
pass
def test_finding_dot_slash_elements_on_element_by_xpath_should_find_not_top_level_elements(self):
self._load_simple_page()
parent = self.driver.find_element_by_id("multiline")
children = parent.find_elements_by_xpath("./p")
self.assertEqual(1, len(children))
self.assertEqual("A div containing", children[0].text)
def test_should_find_elements_by_xpath(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
children = element.find_elements_by_xpath("select/option")
self.assertEqual(len(children), 8);
self.assertEqual(children[0].text, "One")
self.assertEqual(children[1].text, "Two")
def test_should_not_find_elements_by_xpath(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
children = element.find_elements_by_xpath("select/x")
self.assertEqual(len(children), 0)
def test_finding_elements_on_element_by_xpath_should_find_top_level_elements(self):
self._load_simple_page()
parent = self.driver.find_element_by_id("multiline")
all_para_elements = self.driver.find_elements_by_xpath("//p")
children = parent.find_elements_by_xpath("//p")
self.assertEqual(len(all_para_elements), len(children))
def test_should_find_element_by_name(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
child = element.find_element_by_name("selectomatic")
self.assertEqual(child.get_attribute("id"), "2")
def test_should_find_elements_by_name(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
children = element.find_elements_by_name("selectomatic")
self.assertEqual(len(children), 2)
def test_should_find_element_by_id(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
child = element.find_element_by_id("2")
self.assertEqual(child.get_attribute("name"), "selectomatic")
def test_should_find_elements_by_id(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
child = element.find_elements_by_id("2")
self.assertEqual(len(child), 2)
def test_should_find_element_by_id_when_multiple_matches_exist(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_id("test_id_div")
child = element.find_element_by_id("test_id")
self.assertEqual(child.text, "inside")
def test_should_find_element_by_id_when_no_match_in_context(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_id("test_id_div")
try:
element.find_element_by_id("test_id_out")
self.fail("Expected NoSuchElementException to have been thrown")
except NoSuchElementException as e:
pass
def test_should_find_element_by_link_text(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("div1")
child = element.find_element_by_link_text("hello world")
self.assertEqual(child.get_attribute("name"), "link1")
def test_should_find_elements_by_link_text(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("div1")
children = element.find_elements_by_link_text("hello world")
self.assertEqual(len(children), 2)
self.assertEqual("link1", children[0].get_attribute("name"))
self.assertEqual("link2", children[1].get_attribute("name"))
def test_should_find_element_by_class_name(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("classes")
element = parent.find_element_by_class_name("one")
self.assertEqual("Find me", element.text)
def test_should_find_elements_by_class_name(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("classes")
elements = parent.find_elements_by_class_name("one")
self.assertEqual(2, len(elements))
def test_should_find_element_by_tag_name(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("div1")
element = parent.find_element_by_tag_name("a")
self.assertEqual("link1", element.get_attribute("name"))
def test_should_find_elements_by_tag_name(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("div1")
elements = parent.find_elements_by_tag_name("a")
self.assertEqual(2, len(elements))
def test_should_be_able_to_find_an_element_by_css_selector(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("form2")
element = parent.find_element_by_css_selector('*[name="selectomatic"]')
self.assertEqual("2", element.get_attribute("id"))
def test_should_be_able_to_find_multiple_elements_by_css_selector(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("form2")
elements = parent.find_elements_by_css_selector(
'*[name="selectomatic"]')
self.assertEqual(2, len(elements))
def test_should_throw_an_error_if_user_passes_in_integer(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element(By.ID, 333333)
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_tuple(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element((By.ID, 333333))
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_inNone(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element(By.ID, None)
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_invalid_by(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element("css", "body")
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_integer_when_find_elements(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_elements(By.ID, 333333)
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_tuple_when_find_elements(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_elements((By.ID, 333333))
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_inNone_when_find_elements(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_elements(By.ID, None)
self.fail("should have thrown webdriver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_invalid_by_when_find_elements(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_elements("css", "body")
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #_this is expected
def _page_url(self, name):
return self.webserver.where_is(name + '.html')
def _load_simple_page(self):
self._load_page("simpleTest")
def _load_page(self, name):
self.driver.get(self._page_url(name))
| apache-2.0 | -2,558,015,482,808,840,000 | -8,337,541,985,553,042,000 | 42.609244 | 101 | 0.659216 | false |
tayfun/django | django/views/debug.py | 44 | 46856 | from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.core.urlresolvers import Resolver404, resolve
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if six.PY2:
tb = tb.tb_next
elif not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _("Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."),
"explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 10px 20px; }
#template-not-exist .postmortem-section { margin-bottom: 3px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
.append-bottom { margin-bottom: 10px; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML.trim() == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if postmortem %}
<p class="append-bottom">Django tried loading these templates, in this order:</p>
{% for entry in postmortem %}
<p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p>
<ul>
{% if entry.tried %}
{% for attempt in entry.tried %}
<li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li>
{% endfor %}
</ul>
{% else %}
<li>This engine did not provide a list of tried templates.</li>
{% endif %}
</ul>
{% endfor %}
{% else %}
<p>No templates were found because your 'TEMPLATES' setting is not configured.</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}"""
"""<span class="specific">{{ template_info.during }}</span>"""
"""{{ template_info.after }}</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
""" """{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %} * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}"""
"{% for source_line in template_info.source_lines %}"
"{% ifequal source_line.0 template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endifequal %}{% endfor %}{% endif %}
Traceback:{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}{% endif %}{% endifchanged %}
File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""")
TECHNICAL_500_TEXT_TEMPLATE = ("""{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %} * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}
{% for source_line in template_info.source_lines %}"""
"{% ifequal source_line.0 template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:"""
"{% for frame in frames %}"
"{% ifchanged frame.exc_cause %}"
" {% if frame.exc_cause %}" """
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% if not is_email %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
{% endif %}
""")
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
| bsd-3-clause | -5,783,865,119,494,672,000 | 2,720,295,253,107,130,000 | 36.42492 | 125 | 0.574996 | false |
SivilTaram/edx-platform | lms/envs/cms/microsite_test.py | 110 | 1675 | """
This is a localdev test for the Microsite processing pipeline
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
from .dev import *
from ..dev import ENV_ROOT, FEATURES
MICROSITE_CONFIGURATION = {
"openedx": {
"domain_prefix": "openedx",
"university": "openedx",
"platform_name": "Open edX",
"logo_image_url": "openedx/images/header-logo.png",
"email_from_address": "[email protected]",
"payment_support_email": "[email protected]",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "openedx.localhost",
"course_org_filter": "CDX",
"course_about_show_social_links": False,
"css_overrides_file": "openedx/css/openedx.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "Explore free courses from leading universities.",
"course_index_overlay_logo_file": "openedx/images/header-logo.png",
"homepage_overlay_html": "<h1>Take an Open edX Course</h1>"
}
}
MICROSITE_ROOT_DIR = ENV_ROOT / 'edx-microsite'
# pretend we are behind some marketing site, we want to be able to assert that the Microsite config values override
# this global setting
FEATURES['ENABLE_MKTG_SITE'] = True
FEATURES['USE_MICROSITES'] = True
| agpl-3.0 | 2,615,944,887,565,239,000 | -2,293,570,911,349,018,600 | 37.953488 | 115 | 0.687164 | false |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/yaml/parser.py | 409 | 25542 |
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from error import MarkedYAMLError
from tokens import *
from events import *
from scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser(object):
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
u'!': u'!',
u'!!': u'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == u'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == u'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle.encode('utf-8'),
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle.encode('utf-8'),
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == u'!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == u'!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == u'!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), u'',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), u'', mark, mark)
| mit | 9,169,245,480,355,322,000 | -8,376,757,338,804,677,000 | 42.365025 | 156 | 0.561467 | false |
Azulinho/flocker | flocker/cli/functional/test_sshconfig.py | 15 | 9942 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``flocker.cli._sshconfig``.
"""
from os.path import expanduser
from socket import socket
from subprocess import CalledProcessError
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath, Permissions
from twisted.internet.threads import deferToThread
from .. import configure_ssh
from .._sshconfig import OpenSSHConfiguration
from ...testtools.ssh import create_ssh_server, create_ssh_agent, if_conch
try:
from twisted.conch.ssh.keys import Key
except ImportError:
pass
def goodlines(path):
"""
Return a list of lines read from ``path`` excluding those that are blank
or begin with ``#``.
:param FilePath path: The path to the file to read.
:return: A ``list`` of ``bytes`` giving good lines from the file.
"""
return list(line for line in path.getContent().splitlines()
if line and not line.strip().startswith(b"#"))
class ConfigureSSHTests(TestCase):
"""
Tests for ``configure_ssh``.
"""
def setUp(self):
self.ssh_config = FilePath(self.mktemp())
self.server = create_ssh_server(self.ssh_config)
# Create a fake local keypair
self.addCleanup(self.server.restore)
self.flocker_config = FilePath(self.mktemp())
self.config = OpenSSHConfiguration(
ssh_config_path=self.ssh_config,
flocker_path=self.flocker_config)
self.config.create_keypair()
self.configure_ssh = self.config.configure_ssh
self.agent = create_ssh_agent(self.server.key_path, testcase=self)
def test_connection_failed(self):
"""
If an SSH connection cannot be established to the given address then an
exception is raised explaining that this is so.
"""
# Bind a port and guarantee it is not accepting connections.
blocker = socket()
blocker.bind((b"127.0.0.1", 0))
port = blocker.getsockname()[1]
exc = self.assertRaises(CalledProcessError,
self.configure_ssh, b"127.0.0.1", port)
# There are different error messages on different platforms.
# On Linux the error may be:
# 'ssh: connect to host 127.0.0.1 port 34716: Connection refused\r\n'
# On OS X the error may be:
# 'ssh: connect to host 127.0.0.1 port 56711: Operation timed out\r\n'
self.assertTrue(b"refused" in exc.output or "Operation timed out" in
exc.output)
def test_authorized_keys(self):
"""
When the SSH connection is established, the ``~/.ssh/authorized_keys``
file has the public part of the generated key pair appended to it.
"""
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
def configured(ignored):
id_rsa_pub = self.ssh_config.child(b"id_rsa_flocker.pub")
keys = self.server.home.descendant([b".ssh", b"authorized_keys"])
# Compare the contents ignoring comments for ease.
self.assertEqual(goodlines(id_rsa_pub), goodlines(keys))
configuring.addCallback(configured)
return configuring
def test_authorized_keys_already_in_place(self):
"""
When the SSH connection is established, if the
``~/.ssh/authorized_keys`` file already has the public part of the key
pair then it is not appended again.
"""
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
configuring.addCallback(
lambda ignored:
deferToThread(
self.configure_ssh, self.server.ip, self.server.port))
def configured(ignored):
id_rsa_pub = self.ssh_config.child(b"id_rsa_flocker.pub")
keys = self.server.home.descendant([b".ssh", b"authorized_keys"])
self.assertEqual(goodlines(id_rsa_pub), goodlines(keys))
configuring.addCallback(configured)
return configuring
def test_existing_authorized_keys_preserved(self):
"""
Any unrelated content in the ``~/.ssh/authorized_keys`` file is left in
place by ``configure_ssh``.
"""
existing_keys = (
b"ssh-dss AAAAB3Nz1234567890 comment\n"
b"ssh-dss AAAAB3Nz0987654321 comment\n"
)
ssh_path = self.server.home.child(b".ssh")
ssh_path.makedirs()
authorized_keys = ssh_path.child(b"authorized_keys")
authorized_keys.setContent(existing_keys)
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
def configured(ignored):
self.assertIn(existing_keys, authorized_keys.getContent())
configuring.addCallback(configured)
return configuring
def test_flocker_keypair_written(self):
"""
``configure_ssh`` writes the keypair to ``id_rsa_flocker`` and
``id_rsa_flocker.pub`` remotely.
"""
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
def configured(ignored):
expected = (
self.ssh_config.child(b"id_rsa_flocker").getContent(),
self.ssh_config.child(b"id_rsa_flocker.pub").getContent()
)
actual = (
self.flocker_config.child(b"id_rsa_flocker").getContent(),
self.flocker_config.child(b"id_rsa_flocker.pub").getContent()
)
self.assertEqual(expected, actual)
configuring.addCallback(configured)
return configuring
def test_flocker_keypair_permissions(self):
"""
``configure_ssh`` writes the remote keypair with secure permissions.
"""
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
expected_private_key_permissions = Permissions(0600)
expected_public_key_permissions = Permissions(0644)
def configured(ignored):
expected = (
expected_private_key_permissions,
expected_public_key_permissions
)
actual = (
self.flocker_config.child(b"id_rsa_flocker").getPermissions(),
self.flocker_config.child(
b"id_rsa_flocker.pub").getPermissions()
)
self.assertEqual(expected, actual)
configuring.addCallback(configured)
return configuring
class CreateKeyPairTests(TestCase):
"""
Tests for ``create_keypair``.
"""
@if_conch
def test_key_generated(self):
"""
``create_keypair`` generates a new key pair and writes it locally to
``id_rsa_flocker`` and ``id_rsa_flocker.pub``.
"""
ssh_config = FilePath(self.mktemp())
configurator = OpenSSHConfiguration(
ssh_config_path=ssh_config, flocker_path=None)
configurator.create_keypair()
id_rsa = ssh_config.child(b"id_rsa_flocker")
id_rsa_pub = ssh_config.child(b"id_rsa_flocker.pub")
key = Key.fromFile(id_rsa.path)
self.assertEqual(
# Avoid comparing the comment
key.public().toString(
type="OPENSSH", extra='test comment').split(None, 2)[:2],
id_rsa_pub.getContent().split(None, 2)[:2])
@if_conch
def test_key_not_regenerated(self):
"""
``create_keypair`` does not generate a new key pair if one can
already be found in ``id_rsa_flocker`` and ``id_rsa_flocker.pub``.
"""
ssh_config = FilePath(self.mktemp())
configurator = OpenSSHConfiguration(
ssh_config_path=ssh_config, flocker_path=None)
id_rsa = ssh_config.child(b"id_rsa_flocker")
configurator.create_keypair()
expected_key = Key.fromFile(id_rsa.path)
configurator.create_keypair()
self.assertEqual(expected_key, Key.fromFile(id_rsa.path))
def test_key_permissions(self):
"""
``create_keypair`` sets secure permissions on
``id_rsa_flocker`` and ``id_rsa_flocker.pub``.
"""
ssh_config = FilePath(self.mktemp())
configurator = OpenSSHConfiguration(
ssh_config_path=ssh_config, flocker_path=None)
configurator.create_keypair()
expected_private_key_permissions = Permissions(0600)
expected_public_key_permissions = Permissions(0644)
id_rsa = ssh_config.child(b"id_rsa_flocker")
id_rsa_pub = ssh_config.child(b"id_rsa_flocker.pub")
self.assertEqual(
(expected_private_key_permissions,
expected_public_key_permissions),
(id_rsa.getPermissions(), id_rsa_pub.getPermissions()))
class OpenSSHDefaultsTests(TestCase):
"""
Tests for `OpenSSHConfiguration.defaults``.
"""
def test_flocker_path(self):
"""
``OpenSSHConfiguration.defaults`` creates an instance with
``/etc/flocker`` as the Flocker configuration path.
"""
self.assertEqual(
FilePath(b"/etc/flocker"),
OpenSSHConfiguration.defaults().flocker_path)
def test_ssh_config_path(self):
"""
``OpenSSHConfiguration.defaults`` creates an instance with the current
user's SSH configuration path as the SSH configuration path.
"""
expected = FilePath(expanduser(b"~")).child(b".ssh")
self.assertEqual(
expected, OpenSSHConfiguration.defaults().ssh_config_path)
def test_configure_ssh(self):
"""
``configure_ssh`` is taken from an ``OpenSSHConfiguration`` instance
created using the ``defaults`` method.
"""
self.assertEqual(
OpenSSHConfiguration.defaults().configure_ssh, configure_ssh)
| apache-2.0 | 4,790,035,385,579,631,000 | 6,643,523,372,443,656,000 | 34.634409 | 79 | 0.615671 | false |
vrenaville/OCB | addons/hr_evaluation/__openerp__.py | 53 | 3305 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Appraisals',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 31,
'website': 'https://www.odoo.com/page/appraisal',
'summary': 'Periodical Evaluations, Appraisals, Surveys',
'images': ['images/hr_evaluation_analysis.jpeg',
'images/hr_evaluation.jpeg',
'images/hr_interview_requests.jpeg'],
'depends': ['hr', 'calendar', 'survey'],
'description': """
Periodical Employees evaluation and appraisals
==============================================
By using this application you can maintain the motivational process by doing periodical evaluations of your employees' performance. The regular assessment of human resources can benefit your people as well your organization.
An evaluation plan can be assigned to each employee. These plans define the frequency and the way you manage your periodic personal evaluations. You will be able to define steps and attach interview forms to each step.
Manages several types of evaluations: bottom-up, top-down, self-evaluations and the final evaluation by the manager.
Key Features
------------
* Ability to create employees evaluations.
* An evaluation can be created by an employee for subordinates, juniors as well as his manager.
* The evaluation is done according to a plan in which various surveys can be created. Each survey can be answered by a particular level in the employees hierarchy. The final review and evaluation is done by the manager.
* Every evaluation filled by employees can be viewed in a PDF form.
* Interview Requests are generated automatically by OpenERP according to employees evaluation plans. Each user receives automatic emails and requests to perform a periodical evaluation of their colleagues.
""",
"data": [
'security/ir.model.access.csv',
'security/hr_evaluation_security.xml',
'hr_evaluation_view.xml',
'report/hr_evaluation_report_view.xml',
'survey_data_appraisal.xml',
'hr_evaluation_data.xml',
'hr_evaluation_installer.xml',
],
"demo": ["hr_evaluation_demo.xml"],
# 'test': [
# 'test/test_hr_evaluation.yml',
# 'test/hr_evalution_demo.yml',
# ],
'auto_install': False,
'installable': True,
'application': True,
}
| agpl-3.0 | 1,222,431,746,683,419,400 | -9,071,671,415,077,970,000 | 46.898551 | 224 | 0.662935 | false |
anksp21/Community-Zenpacks | ZenPacks.ZenSystems.ApcUps/ZenPacks/ZenSystems/ApcUps/modeler/plugins/ApcUpsDeviceMap.py | 2 | 2850 | ##########################################################################
# Author: Jane Curry, [email protected]
# Date: March 28th, 2011
# Revised:
#
# ApcUpsDevice modler plugin
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
##########################################################################
__doc__ = """ApcUpsDeviceMap
Gather information from APC UPS devices.
"""
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap
from Products.DataCollector.plugins.DataMaps import MultiArgs
import re
class ApcUpsDeviceMap(SnmpPlugin):
maptype = "ApcUpsDeviceMap"
snmpGetMap = GetMap({
'.1.3.6.1.4.1.318.1.1.1.1.1.1.0': 'setHWProductKey',
'.1.3.6.1.4.1.318.1.1.1.1.2.1.0': 'setOSProductKey',
'.1.3.6.1.4.1.318.1.1.1.1.2.3.0': 'setHWSerialNumber',
'.1.3.6.1.4.1.318.1.1.1.2.2.5.0': 'numBatteryPacks',
'.1.3.6.1.4.1.318.1.1.1.2.2.6.0': 'numBadBatteryPacks',
'.1.3.6.1.4.1.318.1.1.1.4.1.1.0': 'basicOutputStatus',
})
def condition(self, device, log):
"""only for boxes with proper object id
"""
return device.snmpOid.startswith(".1.3.6.1.4.1.318.1.3.2")
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
om = self.objectMap(getdata)
manufacturer = "American Power Conversion Corp."
om.setHWProductKey = MultiArgs(om.setHWProductKey, manufacturer)
# log.debug("HWProductKey=%s Manufacturer = %s" % (om.setHWProductKey, manufacturer))
om.setOSProductKey = MultiArgs(om.setOSProductKey, manufacturer)
# log.debug("OSProductKey=%s Manufacturer = %s" % (om.setOSProductKey, manufacturer))
if (om.basicOutputStatus < 1 or om.basicOutputStatus > 12):
om.basicOutputStatus = 1
index = om.basicOutputStatus
om.basicOutputStatusText = self.basicOutputStatusMap[index]
return om
basicOutputStatusMap = { 1: 'Unknown',
2: 'onLine',
3: 'onBattery',
4: 'onSmartBoost',
5: 'timedSleeping',
6: 'softwareBypass',
7: 'off',
8: 'rebooting',
9: 'switchedBypass',
10: 'hardwareFailureBypass',
11: 'sleepingUntilPowerReturn',
12: 'onSmartTrim',
}
| gpl-2.0 | -4,894,862,989,463,930,000 | 8,110,676,406,375,234,000 | 39.140845 | 92 | 0.522807 | false |
shepdelacreme/ansible | test/units/parsing/utils/test_jsonify.py | 113 | 1491 | # -*- coding: utf-8 -*-
# (c) 2016, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.parsing.utils.jsonify import jsonify
class TestJsonify(unittest.TestCase):
def test_jsonify_simple(self):
self.assertEqual(jsonify(dict(a=1, b=2, c=3)), '{"a": 1, "b": 2, "c": 3}')
def test_jsonify_simple_format(self):
res = jsonify(dict(a=1, b=2, c=3), format=True)
cleaned = "".join([x.strip() for x in res.splitlines()])
self.assertEqual(cleaned, '{"a": 1,"b": 2,"c": 3}')
def test_jsonify_unicode(self):
self.assertEqual(jsonify(dict(toshio=u'くらとみ')), u'{"toshio": "くらとみ"}')
def test_jsonify_empty(self):
self.assertEqual(jsonify(None), '{}')
| gpl-3.0 | 2,522,996,440,571,399,700 | -4,369,476,091,601,852,400 | 36.820513 | 82 | 0.68678 | false |
sharthee/ProgrammingAssignment2 | labs/lab2/cs109style.py | 38 | 1293 | from __future__ import print_function
from IPython.core.display import HTML
from matplotlib import rcParams
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def customize_mpl():
"""Tweak matplotlib visual style"""
print("Setting custom matplotlib visual style")
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.grid'] = True
rcParams['axes.facecolor'] = '#eeeeee'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'none'
def customize_css():
print("Setting custom CSS for the IPython Notebook")
styles = open('custom.css', 'r').read()
return HTML(styles)
| mit | 5,128,059,927,308,924,000 | 1,168,207,053,216,665,600 | 38.181818 | 79 | 0.66512 | false |
40223240/cadb_g3_0420 | static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py | 733 | 66279 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| gpl-3.0 | -3,403,772,347,364,899,300 | 8,637,779,078,902,313,000 | 33.737421 | 89 | 0.585404 | false |
liosha2007/temporary-groupdocs-python-sdk | groupdocs/models/SignatureEnvelopeFieldLocationSettings.py | 2 | 1794 | #!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureEnvelopeFieldLocationSettings:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'page': 'int',
'locationX': 'float',
'locationY': 'float',
'locationWidth': 'float',
'locationHeight': 'float',
'fontName': 'str',
'fontColor': 'str',
'fontSize': 'float',
'fontBold': 'bool',
'fontItalic': 'bool',
'fontUnderline': 'bool',
'forceNewField': 'bool',
'align': 'int'
}
self.page = None # int
self.locationX = None # float
self.locationY = None # float
self.locationWidth = None # float
self.locationHeight = None # float
self.fontName = None # str
self.fontColor = None # str
self.fontSize = None # float
self.fontBold = None # bool
self.fontItalic = None # bool
self.fontUnderline = None # bool
self.forceNewField = None # bool
self.align = None # int
| apache-2.0 | 3,567,907,551,812,110,300 | 3,325,502,836,564,764,000 | 31.035714 | 77 | 0.593088 | false |
matthew-tucker/mne-python | mne/io/tests/test_reference.py | 10 | 13045 | # Authors: Marijn van Vliet <[email protected]>
# Alexandre Gramfort <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import warnings
import os.path as op
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_allclose
from mne import pick_types, Evoked, Epochs, read_events
from mne.io.constants import FIFF
from mne.io import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from mne.io.proj import _has_eeg_average_ref_proj
from mne.io.reference import _apply_reference
from mne.datasets import testing
from mne.io import Raw
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif')
ave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif')
def _test_reference(raw, reref, ref_data, ref_from):
"""Helper function to test whether a reference has been correctly
applied."""
# Separate EEG channels from other channel types
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=True, exclude='bads')
# Calculate indices of reference channesl
picks_ref = [raw.ch_names.index(ch) for ch in ref_from]
# Get data
if isinstance(raw, Evoked):
_data = raw.data
_reref = reref.data
else:
_data = raw._data
_reref = reref._data
# Check that the ref has been properly computed
assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2))
# Get the raw EEG data and other channel data
raw_eeg_data = _data[..., picks_eeg, :]
raw_other_data = _data[..., picks_other, :]
# Get the rereferenced EEG data
reref_eeg_data = _reref[..., picks_eeg, :]
reref_other_data = _reref[..., picks_other, :]
# Undo rereferencing of EEG channels
if isinstance(raw, Epochs):
unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :]
else:
unref_eeg_data = reref_eeg_data + ref_data
# Check that both EEG data and other data is the same
assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15)
assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15)
@testing.requires_testing_data
def test_apply_reference():
"""Test base function for rereferencing"""
raw = Raw(fif_fname, preload=True)
# Rereference raw data by creating a copy of original data
reref, ref_data = _apply_reference(raw, ref_from=['EEG 001', 'EEG 002'],
copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
# The CAR reference projection should have been removed by the function
assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
# Test that disabling the reference does not break anything
reref, ref_data = _apply_reference(raw, [])
assert_array_equal(raw._data, reref._data)
# Test that data is modified in place when copy=False
reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'],
copy=False)
assert_true(raw is reref)
# Test re-referencing Epochs object
raw = Raw(fif_fname, preload=False, add_eeg_ref=False)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
reref, ref_data = _apply_reference(epochs, ref_from=['EEG 001', 'EEG 002'],
copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test re-referencing Evoked object
evoked = epochs.average()
reref, ref_data = _apply_reference(evoked, ref_from=['EEG 001', 'EEG 002'],
copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test invalid input
raw_np = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])
@testing.requires_testing_data
def test_set_eeg_reference():
"""Test rereference eeg data"""
raw = Raw(fif_fname, preload=True)
raw.info['projs'] = []
# Test setting an average reference
assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
reref, ref_data = set_eeg_reference(raw)
assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
assert_true(ref_data is None)
# Test setting an average reference when one was already present
reref, ref_data = set_eeg_reference(raw, copy=False)
assert_true(ref_data is None)
# Rereference raw data by creating a copy of original data
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test that data is modified in place when copy=False
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
copy=False)
assert_true(raw is reref)
@testing.requires_testing_data
def test_set_bipolar_reference():
"""Test bipolar referencing"""
raw = Raw(fif_fname, preload=True)
reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar',
{'kind': FIFF.FIFFV_EOG_CH,
'extra': 'some extra value'})
assert_true(reref.info['custom_ref_applied'])
# Compare result to a manual calculation
a = raw.pick_channels(['EEG 001', 'EEG 002'], copy=True)
a = a._data[0, :] - a._data[1, :]
b = reref.pick_channels(['bipolar'], copy=True)._data[0, :]
assert_allclose(a, b)
# Original channels should be replaced by a virtual one
assert_true('EEG 001' not in reref.ch_names)
assert_true('EEG 002' not in reref.ch_names)
assert_true('bipolar' in reref.ch_names)
# Check channel information
bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]
an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]
for key in bp_info:
if key == 'loc' or key == 'eeg_loc':
assert_array_equal(bp_info[key], 0)
elif key == 'coil_type':
assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)
elif key == 'kind':
assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)
else:
assert_equal(bp_info[key], an_info[key])
assert_equal(bp_info['extra'], 'some extra value')
# Minimalist call
reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')
assert_true('EEG 001-EEG 002' in reref.ch_names)
# Test creating a bipolar reference that doesn't involve EEG channels:
# it should not set the custom_ref_applied flag
reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112',
ch_info={'kind': FIFF.FIFFV_MEG_CH})
assert_true(not reref.info['custom_ref_applied'])
assert_true('MEG 0111-MEG 0112' in reref.ch_names)
# Test a battery of invalid inputs
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar')
assert_raises(ValueError, set_bipolar_reference, raw,
['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2'])
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', 'bipolar',
ch_info=[{'foo': 'bar'}, {'foo': 'bar'}])
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', ch_name='EEG 003')
@testing.requires_testing_data
def test_add_reference():
raw = Raw(fif_fname, preload=True)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# check if channel already exists
assert_raises(ValueError, add_reference_channels,
raw, raw.info['ch_names'][0])
# add reference channel to Raw
raw_ref = add_reference_channels(raw, 'Ref', copy=True)
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
orig_nchan = raw.info['nchan']
raw = add_reference_channels(raw, 'Ref', copy=False)
assert_array_equal(raw._data, raw_ref._data)
assert_equal(raw.info['nchan'], orig_nchan + 1)
ref_idx = raw.ch_names.index('Ref')
ref_data, _ = raw[ref_idx]
assert_array_equal(ref_data, 0)
# add two reference channels to Raw
raw = Raw(fif_fname, preload=True)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
assert_raises(ValueError, add_reference_channels, raw,
raw.info['ch_names'][0])
raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
ref_idx = raw.ch_names.index('M1')
ref_idy = raw.ch_names.index('M2')
ref_data, _ = raw[[ref_idx, ref_idy]]
assert_array_equal(ref_data, 0)
# add reference channel to epochs
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
ref_idx = epochs_ref.ch_names.index('Ref')
ref_data = epochs_ref.get_data()[:, ref_idx, :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
assert_array_equal(epochs.get_data()[:, picks_eeg, :],
epochs_ref.get_data()[:, picks_eeg, :])
# add two reference channels to epochs
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
ref_idx = epochs_ref.ch_names.index('M1')
ref_idy = epochs_ref.ch_names.index('M2')
ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
assert_array_equal(epochs.get_data()[:, picks_eeg, :],
epochs_ref.get_data()[:, picks_eeg, :])
# add reference channel to evoked
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
evoked = epochs.average()
evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
ref_idx = evoked_ref.ch_names.index('Ref')
ref_data = evoked_ref.data[ref_idx, :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
assert_array_equal(evoked.data[picks_eeg, :],
evoked_ref.data[picks_eeg, :])
# add two reference channels to evoked
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
evoked = epochs.average()
evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
ref_idx = evoked_ref.ch_names.index('M1')
ref_idy = evoked_ref.ch_names.index('M2')
ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
assert_array_equal(evoked.data[picks_eeg, :],
evoked_ref.data[picks_eeg, :])
# Test invalid inputs
raw_np = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
assert_raises(ValueError, add_reference_channels, raw, 1)
| bsd-3-clause | 5,364,216,937,736,114,000 | 2,384,127,057,814,831,600 | 41.491857 | 79 | 0.62913 | false |
valkjsaaa/sl4a | python/src/Lib/lib2to3/fixes/fix_intern.py | 49 | 1368 | # Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name("sys"), Name("intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.set_prefix(node.get_prefix())
touch_import(None, 'sys', node)
return new
| apache-2.0 | -8,802,105,844,741,314,000 | 3,086,107,879,016,203,000 | 30.090909 | 75 | 0.483187 | false |
urrego093/proyecto_mv | applications/welcome/languages/pt-br.py | 88 | 7249 | # -*- coding: utf-8 -*-
{
'!langcode!': 'pt-br',
'!langname!': 'Português (do Brasil)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novovalor\'". Você não pode atualizar ou apagar os resultados de um JOIN',
'%s %%{row} deleted': '%s linhas apagadas',
'%s %%{row} updated': '%s linhas atualizadas',
'%s selected': '%s selecionado',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'About': 'Sobre',
'Access Control': 'Controle de Acesso',
'Administrative Interface': 'Interface Administrativa',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Ocorreu um erro, por favor [[reload %s]] a página',
'Administrative interface': 'Interface administrativa',
'Ajax Recipes': 'Receitas de Ajax',
'appadmin is disabled because insecure channel': 'Administração desativada porque o canal não é seguro',
'Are you sure you want to delete this object?': 'Você está certo que deseja apagar este objeto?',
'Available Databases and Tables': 'Bancos de dados e tabelas disponíveis',
'Buy this book': 'Compre o livro',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Chaves de cache',
'Cannot be empty': 'Não pode ser vazio',
'change password': 'modificar senha',
'Check to delete': 'Marque para apagar',
'Clear CACHE?': 'Limpar CACHE?',
'Clear DISK': 'Limpar DISCO',
'Clear RAM': 'Limpar memória RAM',
'Client IP': 'IP do cliente',
'Community': 'Comunidade',
'Components and Plugins': 'Componentes e Plugins',
'Controller': 'Controlador',
'Copyright': 'Copyright',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'customize me!': 'Personalize-me!',
'data uploaded': 'dados enviados',
'Database': 'banco de dados',
'Database %s select': 'Selecionar banco de dados %s',
'db': 'bd',
'DB Model': 'Modelo BD',
'Delete:': 'Apagar:',
'Demo': 'Demo',
'Deployment Recipes': 'Receitas de deploy',
'Description': 'Descrição',
'design': 'projeto',
'DISK': 'DISK',
'Disk Cache Keys': 'Chaves do Cache de Disco',
'Disk Cleared': 'Disco Limpo',
'Documentation': 'Documentação',
"Don't know what to do?": "Não sabe o que fazer?",
'done!': 'concluído!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Editar',
'Edit current record': 'Editar o registro atual',
'edit profile': 'editar perfil',
'Edit This App': 'Editar esta aplicação',
'Email and SMS': 'Email e SMS',
'Errors': 'Erros',
'Enter an integer between %(min)g and %(max)g': 'Informe um valor inteiro entre %(min)g e %(max)g',
'export as csv file': 'exportar como um arquivo csv',
'FAQ': 'Perguntas frequentes',
'First name': 'Nome',
'Forms and Validators': 'Formulários e Validadores',
'Free Applications': 'Aplicações gratuitas',
'Group ID': 'ID do Grupo',
'Groups': 'Grupos',
'Hello World': 'Olá Mundo',
'Home': 'Principal',
'How did you get here?': 'Como você chegou aqui?',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'Index': 'Início',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'Internal State': 'Estado Interno',
'Introduction': 'Introdução',
'Invalid email': 'Email inválido',
'Invalid Query': 'Consulta Inválida',
'invalid request': 'requisição inválida',
'Key': 'Chave',
'Last name': 'Sobrenome',
'Layout': 'Layout',
'Layout Plugins': 'Plugins de Layout',
'Layouts': 'Layouts',
'Live chat': 'Chat ao vivo',
'Live Chat': 'Chat ao vivo',
'login': 'Entrar',
'Login': 'Autentique-se',
'logout': 'Sair',
'Lost Password': 'Esqueceu sua senha?',
'lost password?': 'esqueceu sua senha?',
'Main Menu': 'Menu Principal',
'Manage Cache': 'Gerenciar Cache',
'Menu Model': 'Modelo de Menu',
'My Sites': 'Meus sites',
'Name': 'Nome',
'New Record': 'Novo Registro',
'new record inserted': 'novo registro inserido',
'next 100 rows': 'próximas 100 linhas',
'No databases in this application': 'Não há bancos de dados nesta aplicação',
'Object or table name': 'Nome do objeto do da tabela',
'Online examples': 'Exemplos online',
'or import from csv file': 'ou importar de um arquivo csv',
'Origin': 'Origem',
'Other Plugins': 'Outros Plugins',
'Other Recipes': 'Outras Receitas',
'Overview': 'Visão Geral',
'Password': 'Senha',
'Plugins': 'Plugins',
'Powered by': 'Desenvolvido com',
'Preface': 'Prefácio',
'previous 100 rows': '100 linhas anteriores',
'Python': 'Python',
'Query:': 'Consulta:',
'Quick Examples': 'Exemplos rápidos',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Receitas',
'Record': 'Registro',
'record does not exist': 'registro não existe',
'Record ID': 'ID do Registro',
'Record id': 'id do registro',
'Register': 'Registre-se',
'register': 'Registre-se',
'Registration key': 'Chave de registro',
'Reset Password key': 'Resetar chave de senha',
'Resources': 'Recursos',
'Role': 'Papel',
'Registration identifier': 'Idenficador de registro',
'Rows in Table': 'Linhas na tabela',
'Rows selected': 'Linhas selecionadas',
'Semantic': 'Semântico',
'Services': 'Serviço',
'Size of cache:': 'Tamanho do cache:',
'state': 'estado',
'Statistics': 'Estatísticas',
'Stylesheet': 'Folha de estilo',
'submit': 'enviar',
'Support': 'Suporte',
'Sure you want to delete this object?': 'Está certo(a) que deseja apagar este objeto?',
'Table': 'Tabela',
'Table name': 'Nome da tabela',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Uma "consulta" é uma condição como "db.tabela1.campo1==\'valor\'". Expressões como "db.tabela1.campo1==db.tabela2.campo2" resultam em um JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'A saída do arquivo é um dicionário que foi apresentado pela visão %s',
'The Views': 'As views',
'This App': 'Esta aplicação',
'This email already has an account': 'Este email já tem uma conta',
'This is a copy of the scaffolding application': 'Isto é uma cópia da aplicação modelo',
'Time in Cache (h:m:s)': 'Tempo em Cache (h:m:s)',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'unable to parse csv file': 'não foi possível analisar arquivo csv',
'Update:': 'Atualizar:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir consultas mais complexas.',
'User ID': 'ID do Usuário',
'User Voice': 'Opinião dos usuários',
'Videos': 'Vídeos',
'View': 'Visualização',
'Web2py': 'Web2py',
'Welcome': 'Bem-vindo',
'Welcome %s': 'Bem-vindo %s',
'Welcome to web2py': 'Bem-vindo ao web2py',
'Welcome to web2py!': 'Bem-vindo ao web2py!',
'Which called the function %s located in the file %s': 'Que chamou a função %s localizada no arquivo %s',
'You are successfully running web2py': 'Você está executando o web2py com sucesso',
'You are successfully running web2py.': 'Você está executando o web2py com sucesso.',
'You can modify this application and adapt it to your needs': 'Você pode modificar esta aplicação e adaptá-la às suas necessidades',
'You visited the url %s': 'Você acessou a url %s',
'Working...': 'Trabalhando...',
}
| gpl-3.0 | -1,191,606,616,366,335,000 | 8,713,464,902,095,445,000 | 39.693182 | 290 | 0.683608 | false |
rabimba/p2pScrapper | BitTorrent-5.2.2/BitTorrent/Storage_threadpool.py | 6 | 14110 | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Bram Cohen and Greg Hazel
import os
import sys
import Queue
from bisect import bisect_right
from BTL.translation import _
from BTL.obsoletepythonsupport import set
from BitTorrent import BTFailure
from BTL.defer import Deferred, ThreadedDeferred, Failure, wrap_task
from BTL.yielddefer import launch_coroutine
from BitTorrent.platform import get_allocated_regions
from BTL.sparse_set import SparseSet
from BTL.DictWithLists import DictWithLists, DictWithSets
import BTL.stackthreading as threading
from BitTorrent.Storage_base import open_sparse_file, make_file_sparse
from BitTorrent.Storage_base import bad_libc_workaround, is_open_for_write
from BitTorrent.Storage_base import UnregisteredFileException
class FilePool(object):
def __init__(self, doneflag, add_task, external_add_task,
max_files_open, num_disk_threads):
self.doneflag = doneflag
self.external_add_task = external_add_task
self.file_to_torrent = {}
self.free_handle_condition = threading.Condition()
self.active_file_to_handles = DictWithSets()
self.open_file_to_handles = DictWithLists()
self.set_max_files_open(max_files_open)
self.diskq = Queue.Queue()
for i in xrange(num_disk_threads):
t = threading.Thread(target=self._disk_thread,
name="disk_thread-%s" % (i+1))
t.start()
self.doneflag.addCallback(self.finalize)
def finalize(self, r=None):
# re-queue self so all threads die. we end up with one extra event on
# the queue, but who cares.
self._create_op(self.finalize)
def close_all(self):
failures = {}
self.free_handle_condition.acquire()
while self.get_open_file_count() > 0:
while len(self.open_file_to_handles) > 0:
filename, handle = self.open_file_to_handles.popitem()
try:
handle.close()
except Exception, e:
failures[self.file_to_torrent[filename]] = e
self.free_handle_condition.notify()
if self.get_open_file_count() > 0:
self.free_handle_condition.wait(1)
self.free_handle_condition.release()
for torrent, e in failures.iteritems():
torrent.got_exception(e)
def close_files(self, file_set):
failures = set()
self.free_handle_condition.acquire()
done = False
while not done:
filenames = list(self.open_file_to_handles.iterkeys())
for filename in filenames:
if filename not in file_set:
continue
handles = self.open_file_to_handles.poprow(filename)
for handle in handles:
try:
handle.close()
except Exception, e:
failures.add(e)
self.free_handle_condition.notify()
done = True
for filename in file_set.iterkeys():
if filename in self.active_file_to_handles:
done = False
break
if not done:
self.free_handle_condition.wait(0.5)
self.free_handle_condition.release()
if len(failures) > 0:
raise failures.pop()
def set_max_files_open(self, max_files_open):
if max_files_open <= 0:
max_files_open = 1e100
self.max_files_open = max_files_open
self.close_all()
def add_files(self, files, torrent):
for filename in files:
if filename in self.file_to_torrent:
raise BTFailure(_("File %s belongs to another running torrent")
% filename)
for filename in files:
self.file_to_torrent[filename] = torrent
def remove_files(self, files):
for filename in files:
del self.file_to_torrent[filename]
def _ensure_exists(self, filename, length=0):
if not os.path.exists(filename):
f = os.path.split(filename)[0]
if f != '' and not os.path.exists(f):
os.makedirs(f)
f = file(filename, 'wb')
make_file_sparse(filename, f, length)
f.close()
def get_open_file_count(self):
t = self.open_file_to_handles.total_length()
t += self.active_file_to_handles.total_length()
return t
def acquire_handle(self, filename, for_write, length=0):
# this will block until a new file handle can be made
self.free_handle_condition.acquire()
if filename not in self.file_to_torrent:
self.free_handle_condition.release()
raise UnregisteredFileException()
while self.active_file_to_handles.total_length() == self.max_files_open:
self.free_handle_condition.wait()
if filename in self.open_file_to_handles:
handle = self.open_file_to_handles.pop_from_row(filename)
if for_write and not is_open_for_write(handle.mode):
handle.close()
handle = open_sparse_file(filename, 'rb+', length=length)
#elif not for_write and is_open_for_write(handle.mode):
# handle.close()
# handle = open_sparse_file(filename, 'rb', length=length)
else:
if self.get_open_file_count() == self.max_files_open:
oldfname, oldhandle = self.open_file_to_handles.popitem()
oldhandle.close()
self._ensure_exists(filename, length)
if for_write:
handle = open_sparse_file(filename, 'rb+', length=length)
else:
handle = open_sparse_file(filename, 'rb', length=length)
self.active_file_to_handles.push_to_row(filename, handle)
self.free_handle_condition.release()
return handle
def release_handle(self, filename, handle):
self.free_handle_condition.acquire()
self.active_file_to_handles.remove_fom_row(filename, handle)
self.open_file_to_handles.push_to_row(filename, handle)
self.free_handle_condition.notify()
self.free_handle_condition.release()
def _create_op(self, _f, *args, **kwargs):
df = Deferred()
self.diskq.put((df, _f, args, kwargs))
return df
read = _create_op
write = _create_op
def _disk_thread(self):
while not self.doneflag.isSet():
df, func, args, kwargs = self.diskq.get(True)
try:
v = func(*args, **kwargs)
except:
self.external_add_task(0, df.errback, Failure())
else:
self.external_add_task(0, df.callback, v)
class Storage(object):
def __init__(self, config, filepool, save_path,
files, add_task,
external_add_task, doneflag):
self.filepool = filepool
self.config = config
self.doneflag = doneflag
self.add_task = add_task
self.external_add_task = external_add_task
self.initialize(save_path, files)
def initialize(self, save_path, files):
# a list of bytes ranges and filenames for window-based IO
self.ranges = []
# a dict of filename-to-ranges for piece priorities and filename lookup
self.range_by_name = {}
# a sparse set for smart allocation detection
self.allocated_regions = SparseSet()
# dict of filename-to-length on disk (for % complete in the file view)
self.undownloaded = {}
self.save_path = save_path
# Rather implement this as an ugly hack here than change all the
# individual calls. Affects all torrent instances using this module.
if self.config['bad_libc_workaround']:
bad_libc_workaround()
self.initialized = False
self.startup_df = ThreadedDeferred(wrap_task(self.external_add_task),
self._build_file_structs,
self.filepool, files)
return self.startup_df
def _build_file_structs(self, filepool, files):
total = 0
for filename, length in files:
# we're shutting down, abort.
if self.doneflag.isSet():
return False
self.undownloaded[filename] = length
if length > 0:
self.ranges.append((total, total + length, filename))
self.range_by_name[filename] = (total, total + length)
if os.path.exists(filename):
if not os.path.isfile(filename):
raise BTFailure(_("File %s already exists, but is not a "
"regular file") % filename)
l = os.path.getsize(filename)
if l > length:
# This is the truncation Bram was talking about that no one
# else thinks is a good idea.
#h = file(filename, 'rb+')
#make_file_sparse(filename, h, length)
#h.truncate(length)
#h.close()
l = length
a = get_allocated_regions(filename, begin=0, length=l)
if a is not None:
a.offset(total)
else:
a = SparseSet()
if l > 0:
a.add(total, total + l)
self.allocated_regions += a
total += length
self.total_length = total
self.initialized = True
return True
def get_byte_range_for_filename(self, filename):
if filename not in self.range_by_name:
filename = os.path.normpath(filename)
filename = os.path.join(self.save_path, filename)
return self.range_by_name[filename]
def was_preallocated(self, pos, length):
return self.allocated_regions.is_range_in(pos, pos+length)
def get_total_length(self):
return self.total_length
def _intervals(self, pos, amount):
r = []
stop = pos + amount
p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)
for begin, end, filename in self.ranges[p:]:
if begin >= stop:
break
r.append((filename, max(pos, begin) - begin, min(end, stop) - begin))
return r
def _read(self, filename, pos, amount):
begin, end = self.get_byte_range_for_filename(filename)
length = end - begin
h = self.filepool.acquire_handle(filename, for_write=False, length=length)
if h is None:
return
try:
h.seek(pos)
r = h.read(amount)
finally:
self.filepool.release_handle(filename, h)
return r
def _batch_read(self, pos, amount):
dfs = []
r = []
# queue all the reads
for filename, pos, end in self._intervals(pos, amount):
df = self.filepool.read(self._read, filename, pos, end - pos)
dfs.append(df)
# yield on all the reads in order - they complete in any order
exc = None
for df in dfs:
yield df
try:
r.append(df.getResult())
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
r = ''.join(r)
if len(r) != amount:
raise BTFailure(_("Short read (%d of %d) - something truncated files?") %
(len(r), amount))
yield r
def read(self, pos, amount):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_read, pos, amount)
return df
def _write(self, filename, pos, s):
begin, end = self.get_byte_range_for_filename(filename)
length = end - begin
h = self.filepool.acquire_handle(filename, for_write=True, length=length)
if h is None:
return
try:
h.seek(pos)
h.write(s)
finally:
self.filepool.release_handle(filename, h)
return len(s)
def _batch_write(self, pos, s):
dfs = []
total = 0
amount = len(s)
# queue all the writes
for filename, begin, end in self._intervals(pos, amount):
length = end - begin
d = buffer(s, total, length)
total += length
df = self.filepool.write(self._write, filename, begin, d)
dfs.append(df)
# yield on all the writes - they complete in any order
exc = None
for df in dfs:
yield df
try:
df.getResult()
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
yield total
def write(self, pos, s):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_write, pos, s)
return df
def close(self):
if not self.initialized:
self.startup_df.addCallback(lambda *a : self.filepool.close_files(self.range_by_name))
return self.startup_df
self.filepool.close_files(self.range_by_name)
def downloaded(self, pos, length):
for filename, begin, end in self._intervals(pos, length):
self.undownloaded[filename] -= end - begin
| mit | 7,257,356,898,381,349,000 | -1,695,401,481,199,220,000 | 34.721519 | 98 | 0.563643 | false |
jcoady9/python-for-android | python3-alpha/python3-src/Lib/encodings/iso8859_10.py | 272 | 13589 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 | 2,227,053,026,805,854,500 | 2,470,534,571,366,441,000 | 43.263844 | 109 | 0.530871 | false |
xxhank/namebench | nb_third_party/jinja2/parser.py | 215 | 34717 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.utils import next
from jinja2.lexer import describe_token, describe_token_expr
#: statements that callinto
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(map(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
self.stream.expect('assign')
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hypens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary(with_postfix=False)
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif self.stream.current.test('name:not') and \
self.stream.look().test('name:in'):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = self.parse_unary()
return nodes.Neg(node, lineno=lineno)
if token_type == 'add':
next(self.stream)
node = self.parse_unary()
return nodes.Pos(node, lineno=lineno)
return self.parse_primary()
def parse_primary(self, with_postfix=True):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
if with_postfix:
node = self.parse_postfix(node)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = lambda: self.parse_primary(with_postfix=False)
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
elif token_type == 'lparen':
node = self.parse_call(node)
elif token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
priority_on_attribute = False
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not \
self.stream.current.test_any('name:else', 'name:or',
'name:and'):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| apache-2.0 | 974,967,971,003,515,800 | -1,636,482,853,749,414,100 | 38.361678 | 82 | 0.545842 | false |
Tehsmash/ironic | ironic/tests/test_images.py | 1 | 20411 | # Vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import mock
from oslo.concurrency import processutils
from oslo.config import cfg
import six.moves.builtins as __builtin__
from ironic.common import exception
from ironic.common import image_service
from ironic.common import images
from ironic.common import utils
from ironic.openstack.common import imageutils
from ironic.tests import base
CONF = cfg.CONF
class IronicImagesTestCase(base.TestCase):
class FakeImgInfo(object):
pass
@mock.patch.object(imageutils, 'QemuImgInfo')
@mock.patch.object(os.path, 'exists', return_value=False)
def test_qemu_img_info_path_doesnt_exist(self, path_exists_mock,
qemu_img_info_mock):
images.qemu_img_info('noimg')
path_exists_mock.assert_called_once_with('noimg')
qemu_img_info_mock.assert_called_once_with()
@mock.patch.object(utils, 'execute', return_value=('out', 'err'))
@mock.patch.object(imageutils, 'QemuImgInfo')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_qemu_img_info_path_exists(self, path_exists_mock,
qemu_img_info_mock, execute_mock):
images.qemu_img_info('img')
path_exists_mock.assert_called_once_with('img')
execute_mock.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', 'img')
qemu_img_info_mock.assert_called_once_with('out')
@mock.patch.object(utils, 'execute')
def test_convert_image(self, execute_mock):
images.convert_image('source', 'dest', 'out_format')
execute_mock.assert_called_once_with('qemu-img', 'convert', '-O',
'out_format', 'source', 'dest',
run_as_root=False)
@mock.patch.object(image_service, 'Service')
@mock.patch.object(__builtin__, 'open')
def test_fetch_no_image_service(self, open_mock, image_service_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
open_mock.return_value = mock_file_handle
images.fetch('context', 'image_href', 'path')
open_mock.assert_called_once_with('path', 'wb')
image_service_mock.assert_called_once_with(version=1,
context='context')
image_service_mock.return_value.download.assert_called_once_with(
'image_href', 'file')
@mock.patch.object(__builtin__, 'open')
def test_fetch_image_service(self, open_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
open_mock.return_value = mock_file_handle
image_service_mock = mock.Mock()
images.fetch('context', 'image_href', 'path', image_service_mock)
open_mock.assert_called_once_with('path', 'wb')
image_service_mock.download.assert_called_once_with(
'image_href', 'file')
@mock.patch.object(images, 'image_to_raw')
@mock.patch.object(__builtin__, 'open')
def test_fetch_image_service_force_raw(self, open_mock, image_to_raw_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
open_mock.return_value = mock_file_handle
image_service_mock = mock.Mock()
images.fetch('context', 'image_href', 'path', image_service_mock,
force_raw=True)
open_mock.assert_called_once_with('path', 'wb')
image_service_mock.download.assert_called_once_with(
'image_href', 'file')
image_to_raw_mock.assert_called_once_with(
'image_href', 'path', 'path.part')
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw_no_file_format(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.file_format = None
qemu_img_info_mock.return_value = info
e = self.assertRaises(exception.ImageUnacceptable, images.image_to_raw,
'image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_called_once_with('path_tmp')
self.assertIn("'qemu-img info' parsing failed.", str(e))
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw_backing_file_present(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.file_format = 'raw'
info.backing_file = 'backing_file'
qemu_img_info_mock.return_value = info
e = self.assertRaises(exception.ImageUnacceptable, images.image_to_raw,
'image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_called_once_with('path_tmp')
self.assertIn("fmt=raw backed by: backing_file", str(e))
@mock.patch.object(os, 'rename')
@mock.patch.object(os, 'unlink')
@mock.patch.object(images, 'convert_image')
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw(self, qemu_img_info_mock, convert_image_mock,
unlink_mock, rename_mock):
CONF.set_override('force_raw_images', True)
info = self.FakeImgInfo()
info.file_format = 'fmt'
info.backing_file = None
qemu_img_info_mock.return_value = info
def convert_side_effect(source, dest, out_format):
info.file_format = 'raw'
convert_image_mock.side_effect = convert_side_effect
images.image_to_raw('image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_has_calls([mock.call('path_tmp'),
mock.call('path.converted')])
convert_image_mock.assert_called_once_with('path_tmp',
'path.converted', 'raw')
unlink_mock.assert_called_once_with('path_tmp')
rename_mock.assert_called_once_with('path.converted', 'path')
@mock.patch.object(os, 'unlink')
@mock.patch.object(images, 'convert_image')
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw_not_raw_after_conversion(self, qemu_img_info_mock,
convert_image_mock,
unlink_mock):
CONF.set_override('force_raw_images', True)
info = self.FakeImgInfo()
info.file_format = 'fmt'
info.backing_file = None
qemu_img_info_mock.return_value = info
self.assertRaises(exception.ImageConvertFailed, images.image_to_raw,
'image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_has_calls([mock.call('path_tmp'),
mock.call('path.converted')])
convert_image_mock.assert_called_once_with('path_tmp',
'path.converted', 'raw')
unlink_mock.assert_called_once_with('path_tmp')
@mock.patch.object(os, 'rename')
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw_already_raw_format(self, qemu_img_info_mock,
rename_mock):
info = self.FakeImgInfo()
info.file_format = 'raw'
info.backing_file = None
qemu_img_info_mock.return_value = info
images.image_to_raw('image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_called_once_with('path_tmp')
rename_mock.assert_called_once_with('path_tmp', 'path')
@mock.patch.object(image_service, 'Service')
def test_download_size_no_image_service(self, image_service_mock):
images.download_size('context', 'image_href')
image_service_mock.assert_called_once_with(version=1,
context='context')
image_service_mock.return_value.show.assert_called_once_with(
'image_href')
def test_download_size_image_service(self):
image_service_mock = mock.MagicMock()
images.download_size('context', 'image_href', image_service_mock)
image_service_mock.show.assert_called_once_with('image_href')
@mock.patch.object(images, 'qemu_img_info')
def test_converted_size(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.virtual_size = 1
qemu_img_info_mock.return_value = info
size = images.converted_size('path')
qemu_img_info_mock.assert_called_once_with('path')
self.assertEqual(1, size)
class FsImageTestCase(base.TestCase):
@mock.patch.object(shutil, 'copyfile')
@mock.patch.object(os, 'makedirs')
@mock.patch.object(os.path, 'dirname')
@mock.patch.object(os.path, 'exists')
def test__create_root_fs(self, path_exists_mock,
dirname_mock, mkdir_mock, cp_mock):
path_exists_mock_func = lambda path: path == 'root_dir'
files_info = {
'a1': 'b1',
'a2': 'b2',
'a3': 'sub_dir/b3'}
path_exists_mock.side_effect = path_exists_mock_func
dirname_mock.side_effect = ['root_dir', 'root_dir',
'root_dir/sub_dir', 'root_dir/sub_dir']
images._create_root_fs('root_dir', files_info)
cp_mock.assert_any_call('a1', 'root_dir/b1')
cp_mock.assert_any_call('a2', 'root_dir/b2')
cp_mock.assert_any_call('a3', 'root_dir/sub_dir/b3')
path_exists_mock.assert_any_call('root_dir/sub_dir')
dirname_mock.assert_any_call('root_dir/b1')
dirname_mock.assert_any_call('root_dir/b2')
dirname_mock.assert_any_call('root_dir/sub_dir/b3')
mkdir_mock.assert_called_once_with('root_dir/sub_dir')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'write_to_file')
@mock.patch.object(utils, 'dd')
@mock.patch.object(utils, 'umount')
@mock.patch.object(utils, 'mount')
@mock.patch.object(utils, 'mkfs')
def test_create_vfat_image(self, mkfs_mock, mount_mock, umount_mock,
dd_mock, write_mock, tempdir_mock, create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
parameters = {'p1': 'v1'}
files_info = {'a': 'b'}
images.create_vfat_image('tgt_file', parameters=parameters,
files_info=files_info, parameters_file='qwe',
fs_size_kib=1000)
dd_mock.assert_called_once_with('/dev/zero',
'tgt_file',
'count=1',
'bs=1000KiB')
mkfs_mock.assert_called_once_with('vfat', 'tgt_file')
mount_mock.assert_called_once_with('tgt_file', 'tempdir',
'-o', 'umask=0')
parameters_file_path = os.path.join('tempdir', 'qwe')
write_mock.assert_called_once_with(parameters_file_path, 'p1=v1')
create_root_fs_mock.assert_called_once_with('tempdir', files_info)
umount_mock.assert_called_once_with('tempdir')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'dd')
@mock.patch.object(utils, 'umount')
@mock.patch.object(utils, 'mount')
@mock.patch.object(utils, 'mkfs')
def test_create_vfat_image_always_umount(self, mkfs_mock, mount_mock,
umount_mock, dd_mock, tempdir_mock, create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
files_info = {'a': 'b'}
create_root_fs_mock.side_effect = OSError()
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file',
files_info=files_info)
umount_mock.assert_called_once_with('tempdir')
@mock.patch.object(utils, 'dd')
def test_create_vfat_image_dd_fails(self, dd_mock):
dd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'dd')
@mock.patch.object(utils, 'mkfs')
def test_create_vfat_image_mkfs_fails(self, mkfs_mock, dd_mock,
tempdir_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
mkfs_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'dd')
@mock.patch.object(utils, 'umount')
@mock.patch.object(utils, 'mount')
@mock.patch.object(utils, 'mkfs')
def test_create_vfat_image_umount_fails(self, mkfs_mock, mount_mock,
umount_mock, dd_mock, tempdir_mock, create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
umount_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
def test__generate_isolinux_cfg(self):
kernel_params = ['key1=value1', 'key2']
expected_cfg = ("default boot\n"
"\n"
"label boot\n"
"kernel /vmlinuz\n"
"append initrd=/initrd text key1=value1 key2 --")
cfg = images._generate_isolinux_cfg(kernel_params)
self.assertEqual(expected_cfg, cfg)
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'write_to_file')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'execute')
@mock.patch.object(images, '_generate_isolinux_cfg')
def test_create_isolinux_image(self, gen_cfg_mock, utils_mock,
tempdir_mock, write_to_file_mock,
create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tmpdir'
tempdir_mock.return_value = mock_file_handle
cfg = "cfg"
cfg_file = 'tmpdir/isolinux/isolinux.cfg'
gen_cfg_mock.return_value = cfg
params = ['a=b', 'c']
images.create_isolinux_image('tgt_file', 'path/to/kernel',
'path/to/ramdisk', kernel_params=params)
files_info = {
'path/to/kernel': 'vmlinuz',
'path/to/ramdisk': 'initrd',
CONF.isolinux_bin: 'isolinux/isolinux.bin'
}
create_root_fs_mock.assert_called_once_with('tmpdir', files_info)
gen_cfg_mock.assert_called_once_with(params)
write_to_file_mock.assert_called_once_with(cfg_file, cfg)
utils_mock.assert_called_once_with('mkisofs', '-r', '-V',
"BOOT IMAGE", '-cache-inodes', '-J', '-l',
'-no-emul-boot', '-boot-load-size',
'4', '-boot-info-table', '-b', 'isolinux/isolinux.bin',
'-o', 'tgt_file', 'tmpdir')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'execute')
def test_create_isolinux_image_rootfs_fails(self, utils_mock,
tempdir_mock,
create_root_fs_mock):
create_root_fs_mock.side_effect = IOError
self.assertRaises(exception.ImageCreationFailed,
images.create_isolinux_image,
'tgt_file', 'path/to/kernel',
'path/to/ramdisk')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'write_to_file')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'execute')
@mock.patch.object(images, '_generate_isolinux_cfg')
def test_create_isolinux_image_mkisofs_fails(self, gen_cfg_mock,
utils_mock,
tempdir_mock,
write_to_file_mock,
create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tmpdir'
tempdir_mock.return_value = mock_file_handle
utils_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_isolinux_image,
'tgt_file', 'path/to/kernel',
'path/to/ramdisk')
@mock.patch.object(images, 'create_isolinux_image')
@mock.patch.object(images, 'fetch')
@mock.patch.object(utils, 'tempdir')
def test_create_boot_iso(self, tempdir_mock, fetch_images_mock,
create_isolinux_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tmpdir'
tempdir_mock.return_value = mock_file_handle
images.create_boot_iso('ctx', 'output_file', 'kernel-uuid',
'ramdisk-uuid', 'root-uuid', 'kernel-params')
fetch_images_mock.assert_any_call('ctx', 'kernel-uuid',
'tmpdir/kernel-uuid', True)
fetch_images_mock.assert_any_call('ctx', 'ramdisk-uuid',
'tmpdir/ramdisk-uuid', True)
params = ['root=UUID=root-uuid', 'kernel-params']
create_isolinux_mock.assert_called_once_with('output_file',
'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid', params)
@mock.patch.object(image_service, 'Service')
def test_get_glance_image_property(self, image_service_mock):
prop_dict = {'properties': {'prop1': 'val1'}}
image_service_obj_mock = image_service_mock.return_value
image_service_obj_mock.show.return_value = prop_dict
ret_val = images.get_glance_image_property('con', 'uuid', 'prop1')
image_service_mock.assert_called_once_with(version=1, context='con')
image_service_obj_mock.show.assert_called_once_with('uuid')
self.assertEqual('val1', ret_val)
ret_val = images.get_glance_image_property('con', 'uuid', 'prop2')
self.assertIsNone(ret_val)
@mock.patch.object(image_service, 'Service')
def test_get_temp_url_for_glance_image(self, image_service_mock):
direct_url = 'swift+http://host/v1/AUTH_xx/con/obj'
image_info = {'id': 'qwe', 'properties': {'direct_url': direct_url}}
glance_service_mock = image_service_mock.return_value
glance_service_mock.swift_temp_url.return_value = 'temp-url'
glance_service_mock.show.return_value = image_info
temp_url = images.get_temp_url_for_glance_image('context',
'glance_uuid')
glance_service_mock.show.assert_called_once_with('glance_uuid')
self.assertEqual('temp-url', temp_url)
| apache-2.0 | -7,991,514,417,433,597,000 | -4,632,912,837,886,406,000 | 42.335456 | 79 | 0.589535 | false |
mgpyh/django-fluent-comments | setup.py | 1 | 2800 | #!/usr/bin/env python
from __future__ import print_function
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
# When creating the sdist, make sure the django.mo file also exists:
if 'sdist' in sys.argv or 'develop' in sys.argv:
try:
os.chdir('fluent_comments')
# from django.core.management.commands.compilemessages import Command
# Command.compile_messages(sys.stderr)
finally:
os.chdir('..')
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
if sys.version_info[0] >= 3:
# Akismet 0.2 does not support Python 3.
install_requires=[
'django-crispy-forms>=1.1.1',
]
if 'install' in sys.argv:
print("\nwarning: skipped Akismet as dependency because it does not have a Python 3 version.")
else:
install_requires=[
'django-crispy-forms>=1.1.1',
'akismet>=0.2',
]
setup(
name='django-fluent-comments',
version=find_version('fluent_comments', '__init__.py'),
license='Apache License, Version 2.0',
install_requires=install_requires,
requires=[
'Django (>=1.3)', # Using staticfiles
],
extras_require = {
'threadedcomments': ['django-threadedcomments>=0.9'],
},
description='A modern, ajax-based appearance for django_comments',
long_description=read('README.rst'),
author='Diederik van der Boor',
author_email='[email protected]',
url='https://github.com/edoburu/django-fluent-comments',
download_url='https://github.com/edoburu/django-fluent-comments/zipball/master',
packages=find_packages(exclude=('example*',)),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| apache-2.0 | 716,790,686,812,453,900 | 827,704,632,069,284,500 | 30.818182 | 102 | 0.633929 | false |
andyzsf/edx | common/lib/chem/chem/miller.py | 182 | 9162 | """ Calculation of Miller indices """
import numpy as np
import math
import fractions as fr
import decimal
import json
def lcm(a, b):
"""
Returns least common multiple of a, b
Args:
a, b: floats
Returns:
float
"""
return a * b / fr.gcd(a, b)
def segment_to_fraction(distance):
"""
Converts lengths of which the plane cuts the axes to fraction.
Tries convert distance to closest nicest fraction with denominator less or
equal than 10. It is
purely for simplicity and clearance of learning purposes. Jenny: 'In typical
courses students usually do not encounter indices any higher than 6'.
If distance is not a number (numpy nan), it means that plane is parallel to
axis or contains it. Inverted fraction to nan (nan is 1/0) = 0 / 1 is
returned
Generally (special cases):
a) if distance is smaller than some constant, i.g. 0.01011,
than fraction's denominator usually much greater than 10.
b) Also, if student will set point on 0.66 -> 1/3, so it is 333 plane,
But if he will slightly move the mouse and click on 0.65 -> it will be
(16,15,16) plane. That's why we are doing adjustments for points coordinates,
to the closest tick, tick + tick / 2 value. And now UI sends to server only
values multiple to 0.05 (half of tick). Same rounding is implemented for
unittests.
But if one will want to calculate miller indices with exact coordinates and
with nice fractions (which produce small Miller indices), he may want shift
to new origin if segments are like S = (0.015, > 0.05, >0.05) - close to zero
in one coordinate. He may update S to (0, >0.05, >0.05) and shift origin.
In this way he can receive nice small fractions. Also there is can be
degenerated case when S = (0.015, 0.012, >0.05) - if update S to (0, 0, >0.05) -
it is a line. This case should be considered separately. Small nice Miller
numbers and possibility to create very small segments can not be implemented
at same time).
Args:
distance: float distance that plane cuts on axis, it must not be 0.
Distance is multiple of 0.05.
Returns:
Inverted fraction.
0 / 1 if distance is nan
"""
if np.isnan(distance):
return fr.Fraction(0, 1)
else:
fract = fr.Fraction(distance).limit_denominator(10)
return fr.Fraction(fract.denominator, fract.numerator)
def sub_miller(segments):
'''
Calculates Miller indices from segments.
Algorithm:
1. Obtain inverted fraction from segments
2. Find common denominator of inverted fractions
3. Lead fractions to common denominator and throws denominator away.
4. Return obtained values.
Args:
List of 3 floats, meaning distances that plane cuts on x, y, z axes.
Any float not equals zero, it means that plane does not intersect origin,
i. e. shift of origin has already been done.
Returns:
String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
'''
fracts = [segment_to_fraction(segment) for segment in segments]
common_denominator = reduce(lcm, [fract.denominator for fract in fracts])
miller_indices = ([
fract.numerator * math.fabs(common_denominator) / fract.denominator
for fract in fracts
])
return'(' + ','.join(map(str, map(decimal.Decimal, miller_indices))) + ')'
def miller(points):
"""
Calculates Miller indices from points.
Algorithm:
1. Calculate normal vector to a plane that goes trough all points.
2. Set origin.
3. Create Cartesian coordinate system (Ccs).
4. Find the lengths of segments of which the plane cuts the axes. Equation
of a line for axes: Origin + (Coordinate_vector - Origin) * parameter.
5. If plane goes trough Origin:
a) Find new random origin: find unit cube vertex, not crossed by a plane.
b) Repeat 2-4.
c) Fix signs of segments after Origin shift. This means to consider
original directions of axes. I.g.: Origin was 0,0,0 and became
new_origin. If new_origin has same Y coordinate as Origin, then segment
does not change its sign. But if new_origin has another Y coordinate than
origin (was 0, became 1), than segment has to change its sign (it now
lies on negative side of Y axis). New Origin 0 value of X or Y or Z
coordinate means that segment does not change sign, 1 value -> does
change. So new sign is (1 - 2 * new_origin): 0 -> 1, 1 -> -1
6. Run function that calculates miller indices from segments.
Args:
List of points. Each point is list of float coordinates. Order of
coordinates in point's list: x, y, z. Points are different!
Returns:
String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
"""
N = np.cross(points[1] - points[0], points[2] - points[0])
O = np.array([0, 0, 0])
P = points[0] # point of plane
Ccs = map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]])
segments = ([
np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0
else np.nan for ort in Ccs
])
if any(x == 0 for x in segments): # Plane goes through origin.
vertices = [
# top:
np.array([1.0, 1.0, 1.0]),
np.array([0.0, 0.0, 1.0]),
np.array([1.0, 0.0, 1.0]),
np.array([0.0, 1.0, 1.0]),
# bottom, except 0,0,0:
np.array([1.0, 0.0, 0.0]),
np.array([0.0, 1.0, 0.0]),
np.array([1.0, 1.0, 1.0]),
]
for vertex in vertices:
if np.dot(vertex - O, N) != 0: # vertex not in plane
new_origin = vertex
break
# obtain new axes with center in new origin
X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]])
Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]])
Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]])
new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin]
segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if
np.dot(ort, N) != 0 else np.nan for ort in new_Ccs])
# fix signs of indices: 0 -> 1, 1 -> -1 (
segments = (1 - 2 * new_origin) * segments
return sub_miller(segments)
def grade(user_input, correct_answer):
'''
Grade crystallography problem.
Returns true if lattices are the same and Miller indices are same or minus
same. E.g. (2,2,2) = (2, 2, 2) or (-2, -2, -2). Because sign depends only
on student's selection of origin.
Args:
user_input, correct_answer: json. Format:
user_input: {"lattice":"sc","points":[["0.77","0.00","1.00"],
["0.78","1.00","0.00"],["0.00","1.00","0.72"]]}
correct_answer: {'miller': '(00-1)', 'lattice': 'bcc'}
"lattice" is one of: "", "sc", "bcc", "fcc"
Returns:
True or false.
'''
def negative(m):
"""
Change sign of Miller indices.
Args:
m: string with meaning of Miller indices. E.g.:
(-6,3,-6) -> (6, -3, 6)
Returns:
String with changed signs.
"""
output = ''
i = 1
while i in range(1, len(m) - 1):
if m[i] in (',', ' '):
output += m[i]
elif m[i] not in ('-', '0'):
output += '-' + m[i]
elif m[i] == '0':
output += m[i]
else:
i += 1
output += m[i]
i += 1
return '(' + output + ')'
def round0_25(point):
"""
Rounds point coordinates to closest 0.5 value.
Args:
point: list of float coordinates. Order of coordinates: x, y, z.
Returns:
list of coordinates rounded to closes 0.5 value
"""
rounded_points = []
for coord in point:
base = math.floor(coord * 10)
fractional_part = (coord * 10 - base)
aliquot0_25 = math.floor(fractional_part / 0.25)
if aliquot0_25 == 0.0:
rounded_points.append(base / 10)
if aliquot0_25 in (1.0, 2.0):
rounded_points.append(base / 10 + 0.05)
if aliquot0_25 == 3.0:
rounded_points.append(base / 10 + 0.1)
return rounded_points
user_answer = json.loads(user_input)
if user_answer['lattice'] != correct_answer['lattice']:
return False
points = [map(float, p) for p in user_answer['points']]
if len(points) < 3:
return False
# round point to closes 0.05 value
points = [round0_25(point) for point in points]
points = [np.array(point) for point in points]
# print miller(points), (correct_answer['miller'].replace(' ', ''),
# negative(correct_answer['miller']).replace(' ', ''))
if miller(points) in (correct_answer['miller'].replace(' ', ''), negative(correct_answer['miller']).replace(' ', '')):
return True
return False
| agpl-3.0 | 431,166,509,700,144,450 | -4,681,532,763,781,096,000 | 32.683824 | 122 | 0.583715 | false |
RockySteveJobs/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/OpenSSL_TripleDES.py | 359 | 1666 | """OpenSSL/M2Crypto 3DES implementation."""
from cryptomath import *
from TripleDES import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return ciphertext
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return plaintext | apache-2.0 | 8,079,756,952,782,045,000 | 704,948,145,582,644,600 | 36.886364 | 98 | 0.59964 | false |
apache/flink | flink-python/pyflink/table/tests/test_correlate.py | 5 | 4123 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table import expressions as expr
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class CorrelateTests(PyFlinkStreamTableTestCase):
def test_join_lateral(self):
t_env = self.t_env
t_env.create_java_temporary_system_function("split",
"org.apache.flink.table.legacyutils.TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.join_lateral("split(words) as (word)")
query_operation = result._j_table.getQueryOperation()
self.assertEqual('INNER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
def test_join_lateral_with_join_predicate(self):
t_env = self.t_env
t_env.create_java_temporary_system_function("split",
"org.apache.flink.table.legacyutils.TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.join_lateral(expr.call('split', source.words).alias('word'),
expr.col('id') == expr.col('word'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('INNER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('equals(id, word)',
query_operation.getCondition().toString())
def test_left_outer_join_lateral(self):
t_env = self.t_env
t_env.create_java_temporary_system_function("split",
"org.apache.flink.table.legacyutils.TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.left_outer_join_lateral(expr.call('split', source.words).alias('word'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('LEFT_OUTER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
def test_left_outer_join_lateral_with_join_predicate(self):
t_env = self.t_env
t_env.create_java_temporary_system_function("split",
"org.apache.flink.table.legacyutils.TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
# only support "true" as the join predicate currently
result = source.left_outer_join_lateral(expr.call('split', source.words).alias('word'),
expr.lit(True))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('LEFT_OUTER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
| apache-2.0 | 4,647,608,940,733,014,000 | 657,732,824,891,107,500 | 51.858974 | 100 | 0.605384 | false |
peterbarkley/SurfGeckos | djangosite/snippets/migrations/0012_auto_20170917_2131.py | 1 | 2390 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-18 07:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snippets', '0011_auto_20170917_2103'),
]
operations = [
migrations.AlterField(
model_name='actionlevel',
name='aquatic_ecotoxicity',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='direct_exposure',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='dw_toxicity',
field=models.FloatField(blank=True, null=True, verbose_name='Drinking Water Toxicity'),
),
migrations.AlterField(
model_name='actionlevel',
name='gw_gross_contamination',
field=models.FloatField(blank=True, null=True, verbose_name='Groundwater Gross Contamination'),
),
migrations.AlterField(
model_name='actionlevel',
name='gw_vapor_emissions',
field=models.FloatField(blank=True, null=True, verbose_name='Groundwater Vapor Emissions'),
),
migrations.AlterField(
model_name='actionlevel',
name='indoor_air',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='leaching',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='shallow_soil_vapor',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='soil_gross_contamination',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='soil_vapor_emissions',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='terrestrial_ecotoxicity',
field=models.FloatField(blank=True, null=True),
),
]
| mit | 440,914,125,301,500,300 | 3,053,190,839,366,344,700 | 33.142857 | 107 | 0.575732 | false |
anthraxx/diffoscope | diffoscope/changes.py | 2 | 11351 | # -*- coding: utf-8 -*-
#
# changes.py — .changes file handling class
#
# This file was originally part of debexpo
# https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2008 Jonny Lamb <[email protected]>
# Copyright © 2010 Jan Dittberner <[email protected]>
# Copyright © 2012 Arno Töll <[email protected]>
# Copyright © 2012 Paul Tagliamonte <[email protected]>
# Copyright © 2014 Jérémy Bobbio <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
This code deals with the reading and processing of Debian .changes files. This
code is copyright (c) Jonny Lamb, and is used by dput, rather then created as
a result of it. Thank you Jonny.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb, Copyright © 2010 Jan Dittberner'
__license__ = 'MIT'
import os.path
import hashlib
import logging
import subprocess
from debian import deb822
from .tools import tool_required
logger = logging.getLogger(__name__)
class ChangesFileException(Exception):
pass
class Changes(object):
"""
Changes object to help process and store information regarding Debian
.changes files, used in the upload process.
"""
def __init__(self, filename=None, string=None):
"""
Object constructor. The object allows the user to specify **either**:
#. a path to a *changes* file to parse
#. a string with the *changes* file contents.
::
a = Changes(filename='/tmp/packagename_version.changes')
b = Changes(string='Source: packagename\\nMaintainer: ...')
``filename``
Path to *changes* file to parse.
``string``
*changes* file in a string to parse.
"""
if (filename and string) or (not filename and not string):
raise TypeError
if filename:
self._absfile = os.path.abspath(filename)
self._directory = os.path.dirname(self._absfile)
self._data = deb822.Changes(open(filename, encoding='utf-8'))
self.basename = os.path.basename(filename)
else:
self._data = deb822.Changes(string)
if len(self._data) == 0:
raise ChangesFileException('Changes file could not be parsed.')
def get_filename(self):
"""
Returns the filename from which the changes file was generated from.
Please do note this is just the basename, not the entire full path, or
even a relative path. For the absolute path to the changes file, please
see :meth:`get_changes_file`.
"""
return self.basename
def get_changes_file(self):
"""
Return the full, absolute path to the changes file. For just the
filename, please see :meth:`get_filename`.
"""
return os.path.join(self._directory, self.get_filename())
def get_path(self, filename):
"""
Return the full, absolute path to a file referenced by the changes
file.
"""
return os.path.join(self._directory, filename)
def get_files(self):
"""
Returns a list of files referenced in the changes file, such as
the .dsc, .deb(s), .orig.tar.gz, and .diff.gz or .debian.tar.gz.
All strings in the array will be absolute paths to the files.
"""
return [os.path.join(self._directory, z['name'])
for z in self._data['Files']]
def keys(self):
return self._data.keys()
def __getitem__(self, key):
"""
Returns the value of the rfc822 key specified.
``key``
Key of data to request.
"""
return self._data[key]
def __contains__(self, key):
"""
Returns whether the specified RFC822 key exists.
``key``
Key of data to check for existence.
"""
return key in self._data
def get(self, key, default=None):
"""
Returns the value of the rfc822 key specified, but defaults
to a specific value if not found in the rfc822 file.
``key``
Key of data to request.
``default``
Default return value if ``key`` does not exist.
"""
return self._data.get(key, default)
def get_as_string(self, key):
"""
Returns the value of the rfc822 key specified as the original string.
``key``
Key of data to request.
"""
return self._data.get_as_string(key)
def get_component(self):
"""
Returns the component of the package.
"""
return self._parse_section(self._data['Files'][0]['section'])[0]
def get_priority(self):
"""
Returns the priority of the package.
"""
return self._parse_section(self._data['Files'][0]['priority'])[1]
def get_section(self):
"""
Returns the section of the package.
"""
return self._parse_section(self._data['Files'][0]['section'])[1]
def get_dsc(self):
"""
Returns the name of the .dsc file.
"""
for item in self.get_files():
if item.endswith('.dsc'):
return item
def get_pool_path(self):
"""
Returns the path the changes file would be
"""
return self._data.get_pool_path()
def get_package_name(self):
"""
Returns the source package name
"""
return self.get("Source")
def _parse_section(self, section):
"""
Works out the component and section from the "Section" field.
Sections like `python` or `libdevel` are in main.
Sections with a prefix, separated with a forward-slash also show the
component.
It returns a list of strings in the form [component, section].
For example, `non-free/python` has component `non-free` and section
`python`.
``section``
Section name to parse.
"""
if '/' in section:
return section.split('/')
else:
return ['main', section]
def set_directory(self, directory):
if directory:
self._directory = directory
else:
self._directory = ""
def validate(self, check_hash="sha1", check_signature=True):
"""
See :meth:`validate_checksums` for ``check_hash``, and
:meth:`validate_signature` if ``check_signature`` is True.
"""
self.validate_checksums(check_hash)
if check_signature:
self.validate_signature(check_signature)
else:
logger.info("Not checking signature")
@tool_required('gpg')
def validate_signature(self, check_signature=True):
"""
Validate the GPG signature of a .changes file.
Throws a :class:`dput.exceptions.ChangesFileException` if there's
an issue with the GPG signature. Returns the GPG key ID.
"""
pipe = subprocess.Popen(
["gpg", "--status-fd", "1", "--verify", "--batch",
self.get_changes_file()],
shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg_output, gpg_output_stderr = pipe.communicate()
print(gpg_output)
if pipe.returncode != 0:
raise ChangesFileException(
"Unknown problem while verifying signature")
# contains verbose human readable GPG information
gpg_output_stderr = str(gpg_output_stderr, encoding='utf8')
print(gpg_output_stderr)
gpg_output = gpg_output.decode(encoding='UTF-8')
if gpg_output.count('[GNUPG:] GOODSIG'):
pass
elif gpg_output.count('[GNUPG:] BADSIG'):
raise ChangesFileException("Bad signature")
elif gpg_output.count('[GNUPG:] ERRSIG'):
raise ChangesFileException("Error verifying signature")
elif gpg_output.count('[GNUPG:] NODATA'):
raise ChangesFileException("No signature on")
else:
raise ChangesFileException(
"Unknown problem while verifying signature"
)
key = None
for line in gpg_output.split("\n"):
if line.startswith('[GNUPG:] VALIDSIG'):
key = line.split()[2]
return key
def validate_checksums(self, check_hash="sha1"):
"""
Validate checksums for a package, using ``check_hack``'s type
to validate the package.
Valid ``check_hash`` types:
* sha1
* sha256
* md5
* md5sum
"""
logger.debug("validating %s checksums", check_hash)
for filename in self.get_files():
if check_hash == "sha1":
hash_type = hashlib.sha1()
checksums = self.get("Checksums-Sha1")
field_name = "sha1"
elif check_hash == "sha256":
hash_type = hashlib.sha256()
checksums = self.get("Checksums-Sha256")
field_name = "sha256"
elif check_hash == "md5":
hash_type = hashlib.md5()
checksums = self.get("Files")
field_name = "md5sum"
changed_files = None # appease pylint
for changed_files in checksums:
if changed_files['name'] == os.path.basename(filename):
break
else:
assert(
"get_files() returns different files than Files: knows?!")
with open(os.path.join(self._directory, filename), "rb") as fc:
while True:
chunk = fc.read(131072)
if not chunk:
break
hash_type.update(chunk)
fc.close()
if not hash_type.hexdigest() == changed_files[field_name]:
raise ChangesFileException(
"Checksum mismatch for file %s: %s != %s" % (
filename,
hash_type.hexdigest(),
changed_files[field_name]
))
else:
logger.debug("%s Checksum for file %s matches",
field_name, filename)
| gpl-3.0 | -1,668,908,580,463,681,000 | -5,437,947,124,454,223,000 | 31.962209 | 79 | 0.579063 | false |
venicegeo/eventkit-cloud | eventkit_cloud/utils/tests/test_coordinate_converter.py | 1 | 2036 | import json
import logging
import requests_mock
from django.conf import settings
from django.test import TestCase, override_settings
from eventkit_cloud.utils.geocoding.coordinate_converter import CoordinateConverter
logger = logging.getLogger(__name__)
mockURL = "http://test.test"
@override_settings(GEOCODING_AUTH_URL=None)
class TestConvert(TestCase):
def setUp(self):
self.mock_requests = requests_mock.Mocker()
self.mock_requests.start()
self.addCleanup(self.mock_requests.stop)
settings.CONVERT_API_URL = mockURL
def test_convert_success(self):
convert_response_success = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-112.61869345019069,
50.00105275281522
]
},
"properties": {
"name": "12UUA8440",
"from": "mgrs",
"to": "decdeg"
}
}
self.mock_requests.get(mockURL, text=json.dumps(convert_response_success), status_code=200)
convert = CoordinateConverter()
result = convert.get("18S TJ 97100 03003")
self.assertIsNotNone(result.get("geometry"))
self.assertEqual(result.get("type"), "Feature")
properties = result.get("properties")
geometry = result.get("geometry")
self.assertIsInstance(properties, dict)
self.assertIsInstance(geometry, dict)
self.assertEqual(geometry.get("type"), "Point")
self.assertIsInstance(geometry.get("coordinates"), list)
def test_convert_fail(self):
convert_response_fail = {
"properties": {
"name": "12UUA844",
"from": "mgrs",
"to": "decdeg"
}
}
with self.assertRaises(Exception):
self.mock_requests.get(mockURL, text=json.dumps(convert_response_fail), status_code=500)
CoordinateConverter().get_data()
| bsd-3-clause | 940,525,073,404,339,800 | 4,287,498,070,791,378,000 | 31.31746 | 100 | 0.583497 | false |
tinkerthaler/odoo | addons/resource/__init__.py | 448 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import resource
import faces
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,812,246,887,518,950,400 | 7,730,775,782,779,239,000 | 39.222222 | 79 | 0.611418 | false |
Cenditel/cenditel.comunidades.cynin | products/ATRatings/RatingsTool.py | 4 | 13993 | import os, sys
import urllib
import Globals
from AccessControl import ClassSecurityInfo
from DateTime import DateTime
from OFS.SimpleItem import SimpleItem
from Acquisition import aq_base
from Products.Archetypes.Referenceable import Referenceable
from OFS.PropertyManager import PropertyManager
from Products.CMFCore.utils import UniqueObject, getToolByName
from Products.CMFPlone.PloneBaseTool import PloneBaseTool
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
# lazy way of configuring this tool
from config import MIN_RATING_VALUE, MAX_RATING_VALUE, STORAGE_CLASS, STORAGE_ARGS, NEUTRAL_RATING_VALUE
from Permissions import ADD_RATING_PERMISSION
from Products.CMFCore.permissions import ManagePortal
from ZODBStorage import HITS_SUMMARY_ID, RATINGS_SUMMARY_ID
# ##############################################################################
class RatingsTool(PloneBaseTool, UniqueObject, SimpleItem, Referenceable, PropertyManager):
""" """
id = 'portal_ratings'
meta_type= 'Ratings Tool'
# toolicon = 'skins/plone_images/favorite_icon.gif'
security = ClassSecurityInfo()
isPrincipiaFolderish = 0
storage = None
__implements__ = (PloneBaseTool.__implements__, SimpleItem.__implements__, )
manage_options = ( ({'label':'Overview', 'action':'manage_overview'},) +
PropertyManager.manage_options + SimpleItem.manage_options)
security.declareProtected(ManagePortal, 'manage_overview')
manage_overview = PageTemplateFile('www/portal_ratings_manage_overview', globals())
manage_overview.__name__ = 'manage_overview'
manage_overview._need__name__ = 0
manage_main = manage_overview
_properties = PropertyManager._properties + (
{'id':'allowed_rating_types', 'type': 'lines', 'mode':'w',
'label':'Allowed raing types'},
{'id':'allowed_counting_types', 'type': 'lines', 'mode':'w',
'label':'Allowed hit counting types'},
)
allowed_rating_types = ['Document', 'News Item', 'File', 'Image', 'Link', ]
allowed_counting_types = ['Document', 'News Item', 'File', 'Image', 'Link', ]
def isRatingAllowedFor(self, content):
""" do content allow rating?
Add a 'allowRatings' boolean property to the context to enable it"""
allowRatings = getattr(content, 'enableRatings', 1)
if not allowRatings:
return 0
if content.getPortalTypeName() not in self.allowed_rating_types:
return 0
return hasattr(aq_base(content), 'UID')
def isCountingAllowedFor(self, content):
""" do the content allow hit count
Add a 'allowCountings' boolean property to the context to enable it"""
allowCountings = getattr(content, 'enableCountings', 0)
if not allowCountings:
return 0
if content.getPortalTypeName() not in self.allowed_counting_types:
return 0
return hasattr(aq_base(content), 'UID')
security.declarePublic('getCyninRating')
def getCyninRating(self,uid):
cyninrating = self._getCyninRating(uid)
if cyninrating is None:
return None
else:
return cyninrating
security.declarePublic('getCyninRatingCount')
def getCyninRatingCount(self,uid):
return self._getCyninRatingCount(uid)
security.declarePublic('getTopRatingsAll')
def getTopRatingsAll(self,brains):
""" get top n hot contents from catalog brains """
results = []
for brain in brains:
value = self._getCyninRating(brain.UID)
if value <> None:
ratecount = self.getRatingCount(brain.UID)
cyninratingcount = self._getCyninRatingCount(brain.UID)
results.append( (value, brain, ratecount, cyninratingcount))
def sortlist(x,y):
if cmp(y[0],x[0]) != 0:
return cmp(y[0],x[0])
else:
return cmp(y[2],x[2])
results.sort(lambda x,y:sortlist(x,y));
return results
security.declarePublic('getTopRatings')
def getTopRatings(self, brains, limit=5):
""" get top n hot contents from catalog brains """
results = []
results = self.getTopRatingsAll(brains)
return results[:limit]
security.declarePublic('getBadRatings')
def getBadRatings(self, brains, limit=5):
""" get bad ratings from catalog brains """
results = []
for brain in brains:
value = self.getRatingMean(brain.UID)
if value:
results.append((value, brain))
results.sort(lambda x,y:cmp(x[0], y[0]))
return results[:limit]
security.declarePublic('getTopCountings')
def getTopCountings(self, brains, limit=5):
""" get top n hot contents from catalog brains """
results = []
for brain in brains:
count = self.getHitCount(brain.UID)
if count:
results.append((count, brain))
results.sort(lambda x,y:cmp(y[0], x[0]))
return results[:limit]
security.declarePublic('getBadCountings')
def getBadCountings(self, brains, limit=5):
""" get top n cold contents from catalog brains """
results = []
for brain in brains:
count = self.getHitCount(brain.UID)
if count:
results.append((count, brain))
results.sort(lambda x,y:cmp(x[0], y[0]))
return results[:limit]
def addRating(self, rating, uid):
mt = getToolByName(self, 'portal_membership')
if mt.isAnonymousUser():
raise ValueError, 'Anonymous user cannot rate content'
# check permission
reference_catalog = getToolByName(self, 'reference_catalog')
object = reference_catalog.lookupObject(uid)
mt.checkPermission(ADD_RATING_PERMISSION, object)
member = mt.getAuthenticatedMember()
username = member.getUserName()
old_rating = self._getUserRating(uid, username)
if old_rating is not None:
self._deleteRating(uid, username)
return self._addRating(rating, uid, username)
def getUserRating(self, uid, username=None):
if username is None:
mt = getToolByName(self, 'portal_membership')
if mt.isAnonymousUser():
raise ValueError, 'Anonymous user cannot rate content'
member = mt.getAuthenticatedMember()
username = member.getUserName()
return self._getUserRating(uid, username)
def addHit(self, uid):
self._getStorage().addHit(uid)
# Summary statistics: HITS
# hits for individual item
def getHitCount(self, uid):
return self._getStorage().getHitCount(uid) or 0
# hits for all items
def getTotalHitCount(self):
return self._getHitsSummary().getCount()
def getHitRateTimeInterval(self):
return HIT_RATE_TIME_INTERVAL
def getHitRate(self, uid):
return self._getStorage().getHitRate(uid)
# Summary statistics: RATINGS
def getMinRating(self):
return MIN_RATING_VALUE
def getMaxRating(self):
return MAX_RATING_VALUE
# rating stats for individual items
def getRatingCount(self, uid):
return self._getStorage().getRatingCount(uid)
def getRatingSum(self, uid):
return self._getStorage().getSum(uid)
def getRatingSumSquared(self, uid):
return self._getStorage().getSumSquared(uid)
def getRatingMean(self, uid):
ratingMean = self._getStorage().getMean(uid)
if ratingMean == None:
return 0
else:
return ratingMean
def getRatingStdDev(self, uid):
return self._getStorage().getStdDev(uid)
def getRatingVariance(self, uid):
return self._getStorage().getVariance(uid)
# rating stats for all items
def getTotalRatingCount(self):
"""a count of rating means."""
return self._getStorage().getTotalRatingCount()
def getRatingMeanCount(self):
"""a count of rating means."""
return self._getStorage().getRatingMeanCount()
def getRatingMeanSum(self):
"""return a sum of rating means."""
return self._getStorage().getRatingMeanSum()
def getRatingMeanSumSquared(self):
"""a sum of rating means squared."""
return self._getStorage().getRatingMeanSumSquared()
def getRatingMeanMean(self):
"""a mean of rating means."""
return self._getStorage().getRatingMeanMean()
def getRatingMeanStdDev(self):
"""a standard deviation of rating means."""
return self._getStorage().getRatingMeanStdDev()
def getRatingMeanVariance(self):
"""a standard deviation of rating means"""
return self._getStorage().getRatingMeanVariance()
def getNoiseVariance(self):
return self._getStorage().getNoiseVariance()
def getEstimatedRating(self, uid):
"""Use a Bayesian MMSE estimator for DC in white Gaussian noise to
estimate the true rating for an item.
Motivation: a small number of very positive or very negative ratings
can make an item look much better or worse than it actually is. We
use a statistical technique to reduce this kind of small number bias.
Essentially we assume that true ratings have a Gaussian distribution.
Most true ratings are somewhere in the middle, with small numbers
very high and small numbers very low. User ratings for an item are
the item's true rating + some Gaussian noise. User ratings are
mostly close to the true rating, with a few much higher and a few
much lower.
We estimate a prior distribution of true means and the noise level
from all the data. We then use this prior info for the Bayesian
estimator. See _Fundamentals of Statistical Signal Processing_, by
Alan Kay, pp. 316 - 321 for details.
"""
priorMean = self.getRatingMeanMean()
noiseVariance = self.getNoiseVariance()
itemMean = self.getRatingMean(uid)
if priorMean is None or noiseVariance is None:
# not enough information to compute a prior -- just return the mean
if itemMean is None:
# no data for computing a mean -- return the middle rating
return 0.5 * (float(self.getMinRating()) + float(self.getMaxRating()))
return itemMean
if itemMean is None:
return priorMean
priorVariance = self.getRatingMeanVariance()
if priorVariance == 0.0 and noiseVariance == 0.0:
return itemMean
itemRatings = self.getRatingCount(uid)
alpha = priorVariance / (priorVariance + noiseVariance/itemRatings)
return alpha * itemMean + (1.0 - alpha) * priorMean
# private interface
def _getStorage(self):
if self.storage is None:
self.storage = STORAGE_CLASS(**STORAGE_ARGS)
return self.storage
def _addRating(self, rating, uid, username):
# delegate to storage
self._getStorage().addRating(rating, uid, username)
def _deleteRating(self, uid, username):
# delegate to storage
self._getStorage().deleteRating(uid, username)
def _getUserRating(self, uid, username):
# delegate to storage
return self._getStorage().getUserRating(uid, username)
def _deleteRatingsFor(self, uid):
# delegate to storage
return self._getStorage().deleteRatingsFor(uid)
def _getCyninRating(self,uid):
returnvalue = None
objRating = self._getStorage()._getObjectRatings(uid,0)
if objRating:
repository = objRating.repository
keyslist = [k for k in repository.keys() if k not in (HITS_SUMMARY_ID,RATINGS_SUMMARY_ID)]
if len(keyslist) == 0:
returnvalue = None
else:
returnvalue = 0
for eachkey in keyslist:
value = repository.get(eachkey,None)
if value and isinstance(value,int):
if value == NEUTRAL_RATING_VALUE:
self._deleteRating(uid,eachkey)
else:
returnvalue = returnvalue + (value - NEUTRAL_RATING_VALUE)
return returnvalue
def _getCyninRatingCount(self,uid):
result = {'positive':0,'negative':0,'positivescore':0,'negativescore':0}
objRating = self._getStorage()._getObjectRatings(uid,0)
if objRating:
repository = objRating.repository
keyslist = [k for k in repository.keys() if k not in (HITS_SUMMARY_ID,RATINGS_SUMMARY_ID)]
for eachkey in keyslist:
value = repository.get(eachkey,None)
if value and isinstance(value,int):
if value > NEUTRAL_RATING_VALUE:
result['positive'] = result['positive'] + 1
result['positivescore'] = result['positivescore'] + (value - NEUTRAL_RATING_VALUE)
elif value < NEUTRAL_RATING_VALUE:
result['negative'] = result['negative'] + 1
result['negativescore'] = result['negativescore'] + (value - NEUTRAL_RATING_VALUE)
return result
Globals.InitializeClass(RatingsTool)
| gpl-3.0 | -8,640,348,015,954,864,000 | -775,400,911,032,559,900 | 36.024457 | 106 | 0.604731 | false |
pchauncey/ansible | contrib/inventory/apstra_aos.py | 14 | 20398 | #!/usr/bin/env python
#
# (c) 2017 Apstra Inc, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Apstra AOS external inventory script
====================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this:
- copy this file over /etc/ansible/hosts and chmod +x the file.
- Copy both files (.py and .ini) in your preferred directory
More information about Ansible Dynamic Inventory here
http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
2 modes are currently, supported: **device based** or **blueprint based**:
- For **Device based**, the list of device is taken from the global device list
the serial ID will be used as the inventory_hostname
- For **Blueprint based**, the list of device is taken from the given blueprint
the Node name will be used as the inventory_hostname
Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
The config file takes precedence over the Environment Variables
Tested with Apstra AOS 1.1
This script has been inspired by the cobbler.py inventory. thanks
Author: Damien Garros (@dgarros)
Version: 0.2.0
"""
import json
import os
import re
import sys
try:
import argparse
HAS_ARGPARSE = True
except ImportError:
HAS_ARGPARSE = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils.six.moves import configparser
"""
##
Expected output format in Device mode
{
"Cumulus": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"EOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
},
"Generic Model": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"Ubuntu GNU/Linux": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"VX": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"_meta": {
"hostvars": {
"5254001CAFD8": {
"agent_start_time": "2017-02-03T00:49:16.000000Z",
"ansible_ssh_host": "172.20.52.6",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:58.454480Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.6",
"mgmt_macaddr": "52:54:00:1C:AF:D8",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "5254001CAFD8",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"52540022211A": {
"agent_start_time": "2017-02-03T00:45:22.000000Z",
"ansible_ssh_host": "172.20.52.7",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.019189Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.7",
"mgmt_macaddr": "52:54:00:22:21:1a",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540022211A",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"52540073956E": {
"agent_start_time": "2017-02-03T00:45:19.000000Z",
"ansible_ssh_host": "172.20.52.8",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.030113Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.8",
"mgmt_macaddr": "52:54:00:73:95:6e",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540073956E",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"525400DDDF72": {
"agent_start_time": "2017-02-03T00:49:07.000000Z",
"ansible_ssh_host": "172.20.52.5",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:46.929921Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.5",
"mgmt_macaddr": "52:54:00:DD:DF:72",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "525400DDDF72",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"525400E5486D": {
"agent_start_time": "2017-02-02T18:44:42.000000Z",
"ansible_ssh_host": "172.20.52.4",
"aos_hcl_model": "Generic_Server_1RU_1x10G",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-02T21:11:25.188734Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "Generic Model",
"hw_version": "pc-i440fx-trusty",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.4",
"mgmt_macaddr": "52:54:00:e5:48:6d",
"os_arch": "x86_64",
"os_family": "Ubuntu GNU/Linux",
"os_version": "14.04 LTS",
"os_version_info": {
"build": "",
"major": "14",
"minor": "04"
},
"serial_number": "525400E5486D",
"state": "OOS-QUARANTINED",
"vendor": "Generic Manufacturer"
}
}
},
"all": {
"hosts": [
"5254001CAFD8",
"52540073956E",
"525400DDDF72",
"525400E5486D",
"52540022211A"
],
"vars": {}
},
"vEOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
}
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
""" Main execution path """
if not HAS_AOS_PYEZ:
raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
if not HAS_ARGPARSE:
raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7')
# Initialize inventory
self.inventory = dict() # A list of groups and the hosts in that group
self.inventory['_meta'] = dict()
self.inventory['_meta']['hostvars'] = dict()
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session(server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
# Save session information in variables of group all
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ----------------------------------------------------
# Build the inventory
# 2 modes are supported: device based or blueprint based
# - For device based, the list of device is taken from the global device list
# the serial ID will be used as the inventory_hostname
# - For Blueprint based, the list of device is taken from the given blueprint
# the Node name will be used as the inventory_hostname
# ----------------------------------------------------
if self.aos_blueprint:
bp = aos.Blueprints[self.aos_blueprint]
if bp.exists is False:
fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device)
# Go over the contents data structure
for node in bp.contents['system']['nodes']:
if node['display_name'] == dev_name:
self.add_host_to_group(node['role'], dev_name)
# Check for additional attribute to import
attributes_to_import = [
'loopback_ip',
'asn',
'role',
'position',
]
for attr in attributes_to_import:
if attr in node.keys():
self.add_var_to_host(dev_name, attr, node[attr])
# if blueprint_interface is enabled in the configuration
# Collect links information
if self.aos_blueprint_int:
interfaces = dict()
for link in bp.contents['system']['links']:
# each link has 2 sides [0,1], and it's unknown which one match this device
# at first we assume, first side match(0) and peer is (1)
peer_id = 1
for side in link['endpoints']:
if side['display_name'] == dev_name:
# import local information first
int_name = side['interface']
# init dict
interfaces[int_name] = dict()
if 'ip' in side.keys():
interfaces[int_name]['ip'] = side['ip']
if 'interface' in side.keys():
interfaces[int_name]['name'] = side['interface']
if 'display_name' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
if 'ip' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
if 'type' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
else:
# if we haven't match the first time, prepare the peer_id
# for the second loop iteration
peer_id = 0
self.add_var_to_host(dev_name, 'interfaces', interfaces)
else:
for device in aos.Devices:
# If not reacheable, create by key and
# If reacheable, create by hostname
self.add_host_to_group('all', device.name)
# populate information for this host
self.add_device_status_to_var(device.name, device)
if 'user_config' in device.value.keys():
for key, value in device.value['user_config'].items():
self.add_var_to_host(device.name, key, value)
# Based on device status online|offline, collect facts as well
if device.value['status']['comm_state'] == 'on':
if 'facts' in device.value.keys():
self.add_device_facts_to_var(device.name, device)
# Check if device is associated with a blueprint
# if it's create a new group
if 'blueprint_active' in device.value['status'].keys():
if 'blueprint_id' in device.value['status'].keys():
bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
if bp:
self.add_host_to_group(bp.name, device.name)
# ----------------------------------------------------
# Convert the inventory and return a JSON String
# ----------------------------------------------------
data_to_print = ""
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def read_settings(self):
""" Reads the settings from the apstra_aos.ini file """
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
# Default Values
self.aos_blueprint = False
self.aos_blueprint_int = True
self.aos_username = 'admin'
self.aos_password = 'admin'
self.aos_server_port = 8888
# Try to reach all parameters from File, if not available try from ENV
try:
self.aos_server = config.get('aos', 'aos_server')
except:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
try:
self.aos_server_port = config.get('aos', 'port')
except:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
try:
self.aos_username = config.get('aos', 'username')
except:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
try:
self.aos_password = config.get('aos', 'password')
except:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
self.aos_blueprint_int = False
except:
pass
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def add_host_to_group(self, group, host):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['hosts'].append(host)
def add_var_to_host(self, host, var, value):
# Check if the host exist, if not initialize it
if host not in self.inventory['_meta']['hostvars'].keys():
self.inventory['_meta']['hostvars'][host] = {}
self.inventory['_meta']['hostvars'][host][var] = value
def add_var_to_group(self, group, var, value):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['vars'][var] = value
def add_device_facts_to_var(self, device_name, device):
# Populate variables for this host
self.add_var_to_host(device_name,
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():
self.add_var_to_host(device_name, key, value)
if key == 'os_family':
self.add_host_to_group(value, device_name)
elif key == 'hw_model':
self.add_host_to_group(value, device_name)
def cleanup_group_name(self, group_name):
"""
Clean up group name by :
- Replacing all non-alphanumeric caracter by underscore
- Converting to lowercase
"""
rx = re.compile('\W+')
clean_group = rx.sub('_', group_name).lower()
return clean_group
def add_device_status_to_var(self, device_name, device):
if 'status' in device.value.keys():
for key, value in device.value['status'].items():
self.add_var_to_host(device.name, key, value)
# Run the script
if __name__ == '__main__':
AosInventory()
| gpl-3.0 | 1,580,839,553,426,549,800 | -8,085,190,948,347,439,000 | 33.690476 | 124 | 0.525002 | false |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/numpy/lib/__init__.py | 16 | 1122 | from __future__ import division, absolute_import, print_function
import math
from .info import __doc__
from numpy.version import version as __version__
from .type_check import *
from .index_tricks import *
from .function_base import *
from .nanfunctions import *
from .shape_base import *
from .stride_tricks import *
from .twodim_base import *
from .ufunclike import *
from . import scimath as emath
from .polynomial import *
#import convertcode
from .utils import *
from .arraysetops import *
from .npyio import *
from .financial import *
from .arrayterator import *
from .arraypad import *
__all__ = ['emath', 'math']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
__all__ += shape_base.__all__
__all__ += stride_tricks.__all__
__all__ += twodim_base.__all__
__all__ += ufunclike.__all__
__all__ += arraypad.__all__
__all__ += polynomial.__all__
__all__ += utils.__all__
__all__ += arraysetops.__all__
__all__ += npyio.__all__
__all__ += financial.__all__
__all__ += nanfunctions.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| gpl-3.0 | -3,422,766,056,477,535,000 | -4,162,553,018,914,423,000 | 23.933333 | 64 | 0.656863 | false |
kingvuplus/EGAMI-E | lib/python/Plugins/SystemPlugins/OSDPositionSetup/plugin.py | 41 | 4968 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSlider, getConfigListEntry
config.plugins.OSDPositionSetup = ConfigSubsection()
config.plugins.OSDPositionSetup.dst_left = ConfigInteger(default = 0)
config.plugins.OSDPositionSetup.dst_width = ConfigInteger(default = 720)
config.plugins.OSDPositionSetup.dst_top = ConfigInteger(default = 0)
config.plugins.OSDPositionSetup.dst_height = ConfigInteger(default = 576)
class OSDScreenPosition(Screen, ConfigListScreen):
skin = """
<screen position="0,0" size="e,e" title="OSD position setup" backgroundColor="blue">
<widget name="config" position="c-175,c-75" size="350,150" foregroundColor="black" backgroundColor="blue" />
<ePixmap pixmap="buttons/green.png" position="c-145,e-100" zPosition="0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/red.png" position="c+5,e-100" zPosition="0" size="140,40" alphatest="on" />
<widget name="ok" position="c-145,e-100" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="green" />
<widget name="cancel" position="c+5,e-100" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="red" />
</screen>"""
def __init__(self, session):
self.skin = OSDScreenPosition.skin
Screen.__init__(self, session)
from Components.ActionMap import ActionMap
from Components.Button import Button
self["ok"] = Button(_("OK"))
self["cancel"] = Button(_("Cancel"))
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
"menu": self.closeRecursive,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
left = config.plugins.OSDPositionSetup.dst_left.value
width = config.plugins.OSDPositionSetup.dst_width.value
top = config.plugins.OSDPositionSetup.dst_top.value
height = config.plugins.OSDPositionSetup.dst_height.value
self.dst_left = ConfigSlider(default = left, increment = 1, limits = (0, 720))
self.dst_width = ConfigSlider(default = width, increment = 1, limits = (0, 720))
self.dst_top = ConfigSlider(default = top, increment = 1, limits = (0, 576))
self.dst_height = ConfigSlider(default = height, increment = 1, limits = (0, 576))
self.list.append(getConfigListEntry(_("left"), self.dst_left))
self.list.append(getConfigListEntry(_("width"), self.dst_width))
self.list.append(getConfigListEntry(_("top"), self.dst_top))
self.list.append(getConfigListEntry(_("height"), self.dst_height))
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.setPreviewPosition()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.setPreviewPosition()
def setPreviewPosition(self):
setPosition(int(self.dst_left.value), int(self.dst_width.value), int(self.dst_top.value), int(self.dst_height.value))
def keyGo(self):
config.plugins.OSDPositionSetup.dst_left.value = self.dst_left.value
config.plugins.OSDPositionSetup.dst_width.value = self.dst_width.value
config.plugins.OSDPositionSetup.dst_top.value = self.dst_top.value
config.plugins.OSDPositionSetup.dst_height.value = self.dst_height.value
config.plugins.OSDPositionSetup.save()
self.close()
def keyCancel(self):
setConfiguredPosition()
self.close()
def setPosition(dst_left, dst_width, dst_top, dst_height):
if dst_left + dst_width > 720:
dst_width = 720 - dst_left
if dst_top + dst_height > 576:
dst_height = 576 - dst_top
try:
file = open("/proc/stb/vmpeg/0/dst_left", "w")
file.write('%X' % dst_left)
file.close()
file = open("/proc/stb/vmpeg/0/dst_width", "w")
file.write('%X' % dst_width)
file.close()
file = open("/proc/stb/vmpeg/0/dst_top", "w")
file.write('%X' % dst_top)
file.close()
file = open("/proc/stb/vmpeg/0/dst_height", "w")
file.write('%X' % dst_height)
file.close()
except:
return
def setConfiguredPosition():
setPosition(int(config.plugins.OSDPositionSetup.dst_left.value), int(config.plugins.OSDPositionSetup.dst_width.value), int(config.plugins.OSDPositionSetup.dst_top.value), int(config.plugins.OSDPositionSetup.dst_height.value))
def main(session, **kwargs):
session.open(OSDScreenPosition)
def startup(reason, **kwargs):
setConfiguredPosition()
def Plugins(**kwargs):
from os import path
if path.exists("/proc/stb/vmpeg/0/dst_left"):
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(name = "OSD position setup", description = "Compensate for overscan", where = PluginDescriptor.WHERE_PLUGINMENU, fnc = main),
PluginDescriptor(name = "OSD position setup", description = "", where = PluginDescriptor.WHERE_SESSIONSTART, fnc = startup)]
return []
| gpl-2.0 | -8,980,599,858,485,519,000 | -7,589,227,863,710,772 | 40.747899 | 226 | 0.72504 | false |
onceuponatimeforever/oh-mainline | vendor/packages/Django/django/contrib/gis/db/backends/spatialite/creation.py | 100 | 5765 | import os
from django.conf import settings
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.creation import DatabaseCreation
class SpatiaLiteCreation(DatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
This method is overloaded to load up the SpatiaLite initialization
SQL prior to calling the `syncdb` command.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
self.connection.ops.confirm_spatial_components_versions()
# Need to load the SpatiaLite initialization SQL before running `syncdb`.
self.load_spatialite_sql()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(SpatiaLiteCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ', ' +
style.SQL_KEYWORD(str(int(not f.null))) +
');')
if f.spatial_index:
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('CreateSpatialIndex') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ');')
return output
def load_spatialite_sql(self):
"""
This routine loads up the SpatiaLite SQL file.
"""
if self.connection.ops.spatial_version[:2] >= (2, 4):
# Spatialite >= 2.4 -- No need to load any SQL file, calling
# InitSpatialMetaData() transparently creates the spatial metadata
# tables
cur = self.connection._cursor()
cur.execute("SELECT InitSpatialMetaData()")
else:
# Spatialite < 2.4 -- Load the initial SQL
# Getting the location of the SpatiaLite SQL file, and confirming
# it exists.
spatialite_sql = self.spatialite_init_file()
if not os.path.isfile(spatialite_sql):
raise ImproperlyConfigured('Could not find the required SpatiaLite initialization '
'SQL file (necessary for testing): %s' % spatialite_sql)
# Opening up the SpatiaLite SQL initialization file and executing
# as a script.
with open(spatialite_sql, 'r') as sql_fh:
cur = self.connection._cursor()
cur.executescript(sql_fh.read())
def spatialite_init_file(self):
# SPATIALITE_SQL may be placed in settings to tell GeoDjango
# to use a specific path to the SpatiaLite initilization SQL.
return getattr(settings, 'SPATIALITE_SQL',
'init_spatialite-%s.%s.sql' %
self.connection.ops.spatial_version[:2])
| agpl-3.0 | -8,276,387,942,045,109,000 | 2,960,915,482,374,309,000 | 43.346154 | 103 | 0.595663 | false |
bleib1dj/boto | boto/s3/key.py | 22 | 82475 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds.
:type method: string
:param method: The method to use for retrieving the file
(default is GET).
:type headers: dict
:param headers: Any headers to pass along in the request.
:type query_auth: bool
:param query_auth: If True, signs the request in the URL.
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| mit | 8,866,681,215,161,481,000 | -6,628,844,785,022,669,000 | 41.933368 | 89 | 0.580018 | false |
pubnub/Zopkio | examples/zookeeper/test_suites/zookeeper_test_faulttolerance.py | 4 | 1959 | # Copyright 2015 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from kazoo.client import KazooClient
from multiprocessing import Process
import time
import zopkio.runtime as runtime
import zopkio.test_utils as testutilities
import zopkio.adhoc_deployer as adhoc_deployer
zookeper_deployer = None
test_phase = 2
def test_zookeeper_fault_tolerance():
"""
Kill zookeeper1 and see if other zookeeper instances are in quorum
"""
zookeper_deployer = runtime.get_deployer("zookeeper")
kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
zkclient = KazooClient(hosts=kazoo_connection_url)
zkclient.start()
zkclient.ensure_path("/my/zookeeper_errorinjection")
# kill the Zookeeper1 instance
print "killing zoookeeper instance1"
zookeper_deployer.kill("zookeeper1")
time.sleep(20)
zkclient.stop()
def validate_zookeeper_fault_tolerance():
"""
Validate that we can still connect to zookeeper instance 2 to read the node
"""
zk2 = KazooClient(hosts=str(runtime.get_active_config('zookeeper_host') + ':2182'))
zk2.start()
assert zk2.exists("/my/zookeeper_errorinjection/"), "zookeeper_errorinjection node not found"
zk2.stop()
| apache-2.0 | -1,074,735,724,925,213,300 | -6,341,134,196,469,463,000 | 32.20339 | 95 | 0.761103 | false |
Champii/runtime | deps/v8/tools/release/common_includes.py | 21 | 29230 | #!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import datetime
import httplib
import glob
import imp
import json
import os
import re
import shutil
import subprocess
import sys
import textwrap
import time
import urllib
import urllib2
from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
CHANGELOG_FILE = "ChangeLog"
DAY_IN_SECONDS = 24 * 60 * 60
PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("include", "v8-version.h")
VERSION_RE = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
# V8 base directory.
V8_BASE = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def TextToFile(text, file_name):
with open(file_name, "w") as f:
f.write(text)
def AppendToFile(text, file_name):
with open(file_name, "a") as f:
f.write(text)
def LinesInFile(file_name):
with open(file_name) as f:
for line in f:
yield line
def FileToText(file_name):
with open(file_name) as f:
return f.read()
def MSub(rexp, replacement, text):
return re.sub(rexp, replacement, text, flags=re.MULTILINE)
def Fill80(line):
# Replace tabs and remove surrounding space.
line = re.sub(r"\t", r" ", line.strip())
# Format with 8 characters indentation and line width 80.
return textwrap.fill(line, width=80, initial_indent=" ",
subsequent_indent=" ")
def MakeComment(text):
return MSub(r"^( ?)", "#", text)
def StripComments(text):
# Use split not splitlines to keep terminal newlines.
return "\n".join(filter(lambda x: not x.startswith("#"), text.split("\n")))
def MakeChangeLogBody(commit_messages, auto_format=False):
result = ""
added_titles = set()
for (title, body, author) in commit_messages:
# TODO(machenbach): Better check for reverts. A revert should remove the
# original CL from the actual log entry.
title = title.strip()
if auto_format:
# Only add commits that set the LOG flag correctly.
log_exp = r"^[ \t]*LOG[ \t]*=[ \t]*(?:(?:Y(?:ES)?)|TRUE)"
if not re.search(log_exp, body, flags=re.I | re.M):
continue
# Never include reverts.
if title.startswith("Revert "):
continue
# Don't include duplicates.
if title in added_titles:
continue
# Add and format the commit's title and bug reference. Move dot to the end.
added_titles.add(title)
raw_title = re.sub(r"(\.|\?|!)$", "", title)
bug_reference = MakeChangeLogBugReference(body)
space = " " if bug_reference else ""
result += "%s\n" % Fill80("%s%s%s." % (raw_title, space, bug_reference))
# Append the commit's author for reference if not in auto-format mode.
if not auto_format:
result += "%s\n" % Fill80("(%s)" % author.strip())
result += "\n"
return result
def MakeChangeLogBugReference(body):
"""Grep for "BUG=xxxx" lines in the commit message and convert them to
"(issue xxxx)".
"""
crbugs = []
v8bugs = []
def AddIssues(text):
ref = re.match(r"^BUG[ \t]*=[ \t]*(.+)$", text.strip())
if not ref:
return
for bug in ref.group(1).split(","):
bug = bug.strip()
match = re.match(r"^v8:(\d+)$", bug)
if match: v8bugs.append(int(match.group(1)))
else:
match = re.match(r"^(?:chromium:)?(\d+)$", bug)
if match: crbugs.append(int(match.group(1)))
# Add issues to crbugs and v8bugs.
map(AddIssues, body.splitlines())
# Filter duplicates, sort, stringify.
crbugs = map(str, sorted(set(crbugs)))
v8bugs = map(str, sorted(set(v8bugs)))
bug_groups = []
def FormatIssues(prefix, bugs):
if len(bugs) > 0:
plural = "s" if len(bugs) > 1 else ""
bug_groups.append("%sissue%s %s" % (prefix, plural, ", ".join(bugs)))
FormatIssues("", v8bugs)
FormatIssues("Chromium ", crbugs)
if len(bug_groups) > 0:
return "(%s)" % ", ".join(bug_groups)
else:
return ""
def SortingKey(version):
"""Key for sorting version number strings: '3.11' > '3.2.1.1'"""
version_keys = map(int, version.split("."))
# Fill up to full version numbers to normalize comparison.
while len(version_keys) < 4: # pragma: no cover
version_keys.append(0)
# Fill digits.
return ".".join(map("{0:04d}".format, version_keys))
# Some commands don't like the pipe, e.g. calling vi from within the script or
# from subscripts like git cl upload.
def Command(cmd, args="", prefix="", pipe=True, cwd=None):
cwd = cwd or os.getcwd()
# TODO(machenbach): Use timeout.
cmd_line = "%s %s %s" % (prefix, cmd, args)
print "Command: %s" % cmd_line
print "in %s" % cwd
sys.stdout.flush()
try:
if pipe:
return subprocess.check_output(cmd_line, shell=True, cwd=cwd)
else:
return subprocess.check_call(cmd_line, shell=True, cwd=cwd)
except subprocess.CalledProcessError:
return None
finally:
sys.stdout.flush()
sys.stderr.flush()
# Wrapper for side effects.
class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
return fun(*args, **kwargs)
def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
return Command(cmd, args, prefix, pipe, cwd=cwd)
def ReadLine(self):
return sys.stdin.readline().strip()
def ReadURL(self, url, params=None):
# pylint: disable=E1121
url_fh = urllib2.urlopen(url, params, 60)
try:
return url_fh.read()
finally:
url_fh.close()
def ReadClusterFuzzAPI(self, api_key, **params):
params["api_key"] = api_key.strip()
params = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("backend-dot-cluster-fuzz.appspot.com")
conn.request("POST", "/_api/", params, headers)
response = conn.getresponse()
data = response.read()
try:
return json.loads(data)
except:
print data
print "ERROR: Could not read response. Is your key valid?"
raise
def Sleep(self, seconds):
time.sleep(seconds)
def GetDate(self):
return datetime.date.today().strftime("%Y-%m-%d")
def GetUTCStamp(self):
return time.mktime(datetime.datetime.utcnow().timetuple())
DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
class NoRetryException(Exception):
pass
class VCInterface(object):
def InjectStep(self, step):
self.step=step
def Pull(self):
raise NotImplementedError()
def Fetch(self):
raise NotImplementedError()
def GetTags(self):
raise NotImplementedError()
def GetBranches(self):
raise NotImplementedError()
def MasterBranch(self):
raise NotImplementedError()
def CandidateBranch(self):
raise NotImplementedError()
def RemoteMasterBranch(self):
raise NotImplementedError()
def RemoteCandidateBranch(self):
raise NotImplementedError()
def RemoteBranch(self, name):
raise NotImplementedError()
def CLLand(self):
raise NotImplementedError()
def Tag(self, tag, remote, message):
"""Sets a tag for the current commit.
Assumptions: The commit already landed and the commit message is unique.
"""
raise NotImplementedError()
class GitInterface(VCInterface):
def Pull(self):
self.step.GitPull()
def Fetch(self):
self.step.Git("fetch")
def GetTags(self):
return self.step.Git("tag").strip().splitlines()
def GetBranches(self):
# Get relevant remote branches, e.g. "branch-heads/3.25".
branches = filter(
lambda s: re.match(r"^branch\-heads/\d+\.\d+$", s),
self.step.GitRemotes())
# Remove 'branch-heads/' prefix.
return map(lambda s: s[13:], branches)
def MasterBranch(self):
return "master"
def CandidateBranch(self):
return "candidates"
def RemoteMasterBranch(self):
return "origin/master"
def RemoteCandidateBranch(self):
return "origin/candidates"
def RemoteBranch(self, name):
# Assume that if someone "fully qualified" the ref, they know what they
# want.
if name.startswith('refs/'):
return name
if name in ["candidates", "master"]:
return "refs/remotes/origin/%s" % name
try:
# Check if branch is in heads.
if self.step.Git("show-ref refs/remotes/origin/%s" % name).strip():
return "refs/remotes/origin/%s" % name
except GitFailedException:
pass
try:
# Check if branch is in branch-heads.
if self.step.Git("show-ref refs/remotes/branch-heads/%s" % name).strip():
return "refs/remotes/branch-heads/%s" % name
except GitFailedException:
pass
self.Die("Can't find remote of %s" % name)
def Tag(self, tag, remote, message):
# Wait for the commit to appear. Assumes unique commit message titles (this
# is the case for all automated merge and push commits - also no title is
# the prefix of another title).
commit = None
for wait_interval in [3, 7, 15, 35, 45, 60]:
self.step.Git("fetch")
commit = self.step.GitLog(n=1, format="%H", grep=message, branch=remote)
if commit:
break
print("The commit has not replicated to git. Waiting for %s seconds." %
wait_interval)
self.step._side_effect_handler.Sleep(wait_interval)
else:
self.step.Die("Couldn't determine commit for setting the tag. Maybe the "
"git updater is lagging behind?")
self.step.Git("tag %s %s" % (tag, commit))
self.step.Git("push origin %s" % tag)
def CLLand(self):
self.step.GitCLLand()
class Step(GitRecipesMixin):
def __init__(self, text, number, config, state, options, handler):
self._text = text
self._number = number
self._config = config
self._state = state
self._options = options
self._side_effect_handler = handler
self.vc = GitInterface()
self.vc.InjectStep(self)
# The testing configuration might set a different default cwd.
self.default_cwd = (self._config.get("DEFAULT_CWD") or
os.path.join(self._options.work_dir, "v8"))
assert self._number >= 0
assert self._config is not None
assert self._state is not None
assert self._side_effect_handler is not None
def __getitem__(self, key):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
return self._state.get(key)
def __setitem__(self, key, value):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
self._state[key] = value
def Config(self, key):
return self._config[key]
def Run(self):
# Restore state.
state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if not self._state and os.path.exists(state_file):
self._state.update(json.loads(FileToText(state_file)))
print ">>> Step %d: %s" % (self._number, self._text)
try:
return self.RunStep()
finally:
# Persist state.
TextToFile(json.dumps(self._state), state_file)
def RunStep(self): # pragma: no cover
raise NotImplementedError
def Retry(self, cb, retry_on=None, wait_plan=None):
""" Retry a function.
Params:
cb: The function to retry.
retry_on: A callback that takes the result of the function and returns
True if the function should be retried. A function throwing an
exception is always retried.
wait_plan: A list of waiting delays between retries in seconds. The
maximum number of retries is len(wait_plan).
"""
retry_on = retry_on or (lambda x: False)
wait_plan = list(wait_plan or [])
wait_plan.reverse()
while True:
got_exception = False
try:
result = cb()
except NoRetryException as e:
raise e
except Exception as e:
got_exception = e
if got_exception or retry_on(result):
if not wait_plan: # pragma: no cover
raise Exception("Retried too often. Giving up. Reason: %s" %
str(got_exception))
wait_time = wait_plan.pop()
print "Waiting for %f seconds." % wait_time
self._side_effect_handler.Sleep(wait_time)
print "Retrying..."
else:
return result
def ReadLine(self, default=None):
# Don't prompt in forced mode.
if self._options.force_readline_defaults and default is not None:
print "%s (forced)" % default
return default
else:
return self._side_effect_handler.ReadLine()
def Command(self, name, args, cwd=None):
cmd = lambda: self._side_effect_handler.Command(
name, args, "", True, cwd=cwd or self.default_cwd)
return self.Retry(cmd, None, [5])
def Git(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
cmd = lambda: self._side_effect_handler.Command(
"git", args, prefix, pipe, cwd=cwd or self.default_cwd)
result = self.Retry(cmd, retry_on, [5, 30])
if result is None:
raise GitFailedException("'git %s' failed." % args)
return result
def Editor(self, args):
if self._options.requires_editor:
return self._side_effect_handler.Command(
os.environ["EDITOR"],
args,
pipe=False,
cwd=self.default_cwd)
def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
wait_plan = wait_plan or [3, 60, 600]
cmd = lambda: self._side_effect_handler.ReadURL(url, params)
return self.Retry(cmd, retry_on, wait_plan)
def GetDate(self):
return self._side_effect_handler.GetDate()
def Die(self, msg=""):
if msg != "":
print "Error: %s" % msg
print "Exiting"
raise Exception(msg)
def DieNoManualMode(self, msg=""):
if not self._options.manual: # pragma: no cover
msg = msg or "Only available in manual mode."
self.Die(msg)
def Confirm(self, msg):
print "%s [Y/n] " % msg,
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
def DeleteBranch(self, name, cwd=None):
for line in self.GitBranch(cwd=cwd).splitlines():
if re.match(r"\*?\s*%s$" % re.escape(name), line):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
self.GitDeleteBranch(name, cwd=cwd)
print "Branch %s deleted." % name
else:
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
def InitialEnvironmentChecks(self, cwd):
# Cancel if this is not a git checkout.
if not os.path.exists(os.path.join(cwd, ".git")): # pragma: no cover
self.Die("This is not a git checkout, this script won't work for you.")
# Cancel if EDITOR is unset or not executable.
if (self._options.requires_editor and (not os.environ.get("EDITOR") or
self.Command(
"which", os.environ["EDITOR"]) is None)): # pragma: no cover
self.Die("Please set your EDITOR environment variable, you'll need it.")
def CommonPrepare(self):
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Checkout master in case the script was left on a work branch.
self.GitCheckout('origin/master')
# Fetch unfetched revisions.
self.vc.Fetch()
def PrepareBranch(self):
# Delete the branch that will be created later if it exists already.
self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
self.GitCheckout('origin/master')
self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
if os.path.isfile(f):
os.remove(f)
if os.path.isdir(f):
shutil.rmtree(f)
def ReadAndPersistVersion(self, prefix=""):
def ReadAndPersist(var_name, def_name):
match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
if match:
value = match.group(1)
self["%s%s" % (prefix, var_name)] = value
for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
for (var_name, def_name) in [("major", "V8_MAJOR_VERSION"),
("minor", "V8_MINOR_VERSION"),
("build", "V8_BUILD_NUMBER"),
("patch", "V8_PATCH_LEVEL")]:
ReadAndPersist(var_name, def_name)
def WaitForLGTM(self):
print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
"your change. (If you need to iterate on the patch or double check "
"that it's sane, do so in another shell, but remember to not "
"change the headline of the uploaded CL.")
answer = ""
while answer != "LGTM":
print "> ",
answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
if answer != "LGTM":
print "That was not 'LGTM'."
def WaitForResolvingConflicts(self, patch_file):
print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
"or resolve the conflicts, stage *all* touched files with "
"'git add', and type \"RESOLVED<Return>\"")
self.DieNoManualMode()
answer = ""
while answer != "RESOLVED":
if answer == "ABORT":
self.Die("Applying the patch failed.")
if answer != "":
print "That was not 'RESOLVED' or 'ABORT'."
print "> ",
answer = self.ReadLine()
# Takes a file containing the patch to apply as first argument.
def ApplyPatch(self, patch_file, revert=False):
try:
self.GitApplyPatch(patch_file, revert)
except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
def GetVersionTag(self, revision):
tag = self.Git("describe --tags %s" % revision).strip()
if VERSION_RE.match(tag):
return tag
else:
return None
def GetRecentReleases(self, max_age):
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
# Current timestamp.
time_now = int(self._side_effect_handler.GetUTCStamp())
# List every tag from a given period.
revisions = self.Git("rev-list --max-age=%d --tags" %
int(time_now - max_age)).strip()
# Filter out revisions who's tag is off by one or more commits.
return filter(lambda r: self.GetVersionTag(r), revisions.splitlines())
def GetLatestVersion(self):
# Use cached version if available.
if self["latest_version"]:
return self["latest_version"]
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
version = sorted(filter(VERSION_RE.match, self.vc.GetTags()),
key=SortingKey, reverse=True)[0]
self["latest_version"] = version
return version
def GetLatestRelease(self):
"""The latest release is the git hash of the latest tagged version.
This revision should be rolled into chromium.
"""
latest_version = self.GetLatestVersion()
# The latest release.
latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
assert latest_hash
return latest_hash
def GetLatestReleaseBase(self, version=None):
"""The latest release base is the latest revision that is covered in the
last change log file. It doesn't include cherry-picked patches.
"""
latest_version = version or self.GetLatestVersion()
# Strip patch level if it exists.
latest_version = ".".join(latest_version.split(".")[:3])
# The latest release base.
latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
assert latest_hash
title = self.GitLog(n=1, format="%s", git_hash=latest_hash)
match = PUSH_MSG_GIT_RE.match(title)
if match:
# Legacy: In the old process there's one level of indirection. The
# version is on the candidates branch and points to the real release
# base on master through the commit message.
return match.group("git_rev")
match = PUSH_MSG_NEW_RE.match(title)
if match:
# This is a new-style v8 version branched from master. The commit
# "latest_hash" is the version-file change. Its parent is the release
# base on master.
return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
self.Die("Unknown latest release: %s" % latest_hash)
def ArrayToVersion(self, prefix):
return ".".join([self[prefix + "major"],
self[prefix + "minor"],
self[prefix + "build"],
self[prefix + "patch"]])
def StoreVersion(self, version, prefix):
version_parts = version.split(".")
if len(version_parts) == 3:
version_parts.append("0")
major, minor, build, patch = version_parts
self[prefix + "major"] = major
self[prefix + "minor"] = minor
self[prefix + "build"] = build
self[prefix + "patch"] = patch
def SetVersion(self, version_file, prefix):
output = ""
for line in FileToText(version_file).splitlines():
if line.startswith("#define V8_MAJOR_VERSION"):
line = re.sub("\d+$", self[prefix + "major"], line)
elif line.startswith("#define V8_MINOR_VERSION"):
line = re.sub("\d+$", self[prefix + "minor"], line)
elif line.startswith("#define V8_BUILD_NUMBER"):
line = re.sub("\d+$", self[prefix + "build"], line)
elif line.startswith("#define V8_PATCH_LEVEL"):
line = re.sub("\d+$", self[prefix + "patch"], line)
elif (self[prefix + "candidate"] and
line.startswith("#define V8_IS_CANDIDATE_VERSION")):
line = re.sub("\d+$", self[prefix + "candidate"], line)
output += "%s\n" % line
TextToFile(output, version_file)
class BootstrapStep(Step):
MESSAGE = "Bootstapping v8 checkout."
def RunStep(self):
if os.path.realpath(self.default_cwd) == os.path.realpath(V8_BASE):
self.Die("Can't use v8 checkout with calling script as work checkout.")
# Directory containing the working v8 checkout.
if not os.path.exists(self._options.work_dir):
os.makedirs(self._options.work_dir)
if not os.path.exists(self.default_cwd):
self.Command("fetch", "v8", cwd=self._options.work_dir)
class UploadStep(Step):
MESSAGE = "Upload for code review."
def RunStep(self):
if self._options.reviewer:
print "Using account %s for review." % self._options.reviewer
reviewer = self._options.reviewer
else:
print "Please enter the email address of a V8 reviewer for your patch: ",
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
self.GitUpload(reviewer, self._options.author, self._options.force_upload,
bypass_hooks=self._options.bypass_upload_hooks,
cc=self._options.cc)
class DetermineV8Sheriff(Step):
MESSAGE = "Determine the V8 sheriff for code review."
def RunStep(self):
self["sheriff"] = None
if not self._options.sheriff: # pragma: no cover
return
# The sheriff determined by the rotation on the waterfall has a
# @google.com account.
url = "https://chromium-build.appspot.com/p/chromium/sheriff_v8.js"
match = re.match(r"document\.write\('(\w+)'\)", self.ReadURL(url))
# If "channel is sheriff", we can't match an account.
if match:
g_name = match.group(1)
# Optimistically assume that google and chromium account name are the
# same.
self["sheriff"] = g_name + "@chromium.org"
self._options.reviewer = ("%s,%s" %
(self["sheriff"], self._options.reviewer))
print "Found active sheriff: %s" % self["sheriff"]
else:
print "No active sheriff found."
def MakeStep(step_class=Step, number=0, state=None, config=None,
options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
# Allow to pass in empty dictionaries.
state = state if state is not None else {}
config = config if config is not None else {}
try:
message = step_class.MESSAGE
except AttributeError:
message = step_class.__name__
return step_class(message, number=number, config=config,
state=state, options=options,
handler=side_effect_handler)
class ScriptsBase(object):
def __init__(self,
config=None,
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
state=None):
self._config = config or self._Config()
self._side_effect_handler = side_effect_handler
self._state = state if state is not None else {}
def _Description(self):
return None
def _PrepareOptions(self, parser):
pass
def _ProcessOptions(self, options):
return True
def _Steps(self): # pragma: no cover
raise Exception("Not implemented.")
def _Config(self):
return {}
def MakeOptions(self, args=None):
parser = argparse.ArgumentParser(description=self._Description())
parser.add_argument("-a", "--author", default="",
help="The author email used for rietveld.")
parser.add_argument("--dry-run", default=False, action="store_true",
help="Perform only read-only actions.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
parser.add_argument("--sheriff", default=False, action="store_true",
help=("Determine current sheriff to review CLs. On "
"success, this will overwrite the reviewer "
"option."))
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
parser.add_argument("--work-dir",
help=("Location where to bootstrap a working v8 "
"checkout."))
self._PrepareOptions(parser)
if args is None: # pragma: no cover
options = parser.parse_args()
else:
options = parser.parse_args(args)
# Process common options.
if options.step < 0: # pragma: no cover
print "Bad step number %d" % options.step
parser.print_help()
return None
# Defaults for options, common to all scripts.
options.manual = getattr(options, "manual", True)
options.force = getattr(options, "force", False)
options.bypass_upload_hooks = False
# Derived options.
options.requires_editor = not options.force
options.wait_for_lgtm = not options.force
options.force_readline_defaults = not options.manual
options.force_upload = not options.manual
# Process script specific options.
if not self._ProcessOptions(options):
parser.print_help()
return None
if not options.work_dir:
options.work_dir = "/tmp/v8-release-scripts-work-dir"
return options
def RunSteps(self, step_classes, args=None):
options = self.MakeOptions(args)
if not options:
return 1
state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if options.step == 0 and os.path.exists(state_file):
os.remove(state_file)
steps = []
for (number, step_class) in enumerate([BootstrapStep] + step_classes):
steps.append(MakeStep(step_class, number, self._state, self._config,
options, self._side_effect_handler))
for step in steps[options.step:]:
if step.Run():
return 0
return 0
def Run(self, args=None):
return self.RunSteps(self._Steps(), args)
| apache-2.0 | -377,460,586,589,952,100 | -8,062,655,725,383,334,000 | 32.178207 | 79 | 0.637906 | false |
daasbank/swift | swift/common/splice.py | 36 | 5500 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Bindings to the `tee` and `splice` system calls
'''
import os
import operator
import six
import ctypes
import ctypes.util
__all__ = ['tee', 'splice']
c_loff_t = ctypes.c_long
# python 2.6 doesn't have c_ssize_t
c_ssize_t = getattr(ctypes, 'c_ssize_t', ctypes.c_long)
class Tee(object):
'''Binding to `tee`'''
__slots__ = '_c_tee',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_tee = libc.tee
except AttributeError:
self._c_tee = None
return
c_tee.argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_size_t,
ctypes.c_uint
]
c_tee.restype = c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'tee: %s' % os.strerror(errno))
else:
return result
c_tee.errcheck = errcheck
self._c_tee = c_tee
def __call__(self, fd_in, fd_out, len_, flags):
'''See `man 2 tee`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on
`splice`).
This function returns the number of bytes transferred (i.e. the actual
result of the call to `tee`).
Upon other errors, an `IOError` is raised with the proper `errno` set.
'''
if not self.available:
raise EnvironmentError('tee not available')
if not isinstance(flags, six.integer_types):
c_flags = six.moves.reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
return self._c_tee(c_fd_in, c_fd_out, len_, c_flags)
@property
def available(self):
'''Availability of `tee`'''
return self._c_tee is not None
tee = Tee()
del Tee
class Splice(object):
'''Binding to `splice`'''
# From `bits/fcntl-linux.h`
SPLICE_F_MOVE = 1
SPLICE_F_NONBLOCK = 2
SPLICE_F_MORE = 4
SPLICE_F_GIFT = 8
__slots__ = '_c_splice',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_splice = libc.splice
except AttributeError:
self._c_splice = None
return
c_loff_t_p = ctypes.POINTER(c_loff_t)
c_splice.argtypes = [
ctypes.c_int, c_loff_t_p,
ctypes.c_int, c_loff_t_p,
ctypes.c_size_t,
ctypes.c_uint
]
c_splice.restype = c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'splice: %s' % os.strerror(errno))
else:
off_in = arguments[1]
off_out = arguments[3]
return (
result,
off_in.contents.value if off_in is not None else None,
off_out.contents.value if off_out is not None else None)
c_splice.errcheck = errcheck
self._c_splice = c_splice
def __call__(self, fd_in, off_in, fd_out, off_out, len_, flags):
'''See `man 2 splice`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on this
object).
Returns a tuple of the result of the `splice` call, the output value of
`off_in` and the output value of `off_out` (or `None` for any of these
output values, if applicable).
Upon other errors, an `IOError` is raised with the proper `errno` set.
Note: if you want to pass `NULL` as value for `off_in` or `off_out` to
the system call, you must pass `None`, *not* 0!
'''
if not self.available:
raise EnvironmentError('splice not available')
if not isinstance(flags, six.integer_types):
c_flags = six.moves.reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
c_off_in = \
ctypes.pointer(c_loff_t(off_in)) if off_in is not None else None
c_off_out = \
ctypes.pointer(c_loff_t(off_out)) if off_out is not None else None
return self._c_splice(
c_fd_in, c_off_in, c_fd_out, c_off_out, len_, c_flags)
@property
def available(self):
'''Availability of `splice`'''
return self._c_splice is not None
splice = Splice()
del Splice
| apache-2.0 | -7,589,059,495,466,492,000 | -8,096,559,537,756,905,000 | 26.227723 | 79 | 0.566909 | false |
omondiy/foursquared.eclair | mock_server/playfoursquare.py | 127 | 1999 | #!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
server_address = ('0.0.0.0', 8080)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| apache-2.0 | -5,893,191,982,023,590,000 | 5,449,696,134,608,997,000 | 25.302632 | 67 | 0.624812 | false |
IV-GII/SocialCookies | ENV1/lib/python2.7/site-packages/django/contrib/sites/management.py | 232 | 1587 | """
Creates the default Site object.
"""
from django.db.models import signals
from django.db import connections
from django.db import router
from django.contrib.sites.models import Site
from django.contrib.sites import models as site_app
from django.core.management.color import no_style
def create_default_site(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table
if Site in created_models and router.allow_syncdb(db, Site) :
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=1, domain="example.com", name="example.com").save(using=db)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[db].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
cursor = connections[db].cursor()
for command in sequence_sql:
cursor.execute(command)
Site.objects.clear_cache()
signals.post_syncdb.connect(create_default_site, sender=site_app)
| gpl-2.0 | -2,607,519,493,348,028,000 | 562,865,427,759,013,700 | 43.083333 | 81 | 0.674858 | false |
jaggu303619/asylum | openerp/addons/hr_holidays/wizard/hr_holidays_summary_employees.py | 52 | 2187 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_holidays_summary_employee(osv.osv_memory):
_name = 'hr.holidays.summary.employee'
_description = 'HR Leaves Summary Report By Employee'
_columns = {
'date_from': fields.date('From', required=True),
'emp': fields.many2many('hr.employee', 'summary_emp_rel', 'sum_id', 'emp_id', 'Employee(s)'),
'holiday_type': fields.selection([('Approved','Approved'),('Confirmed','Confirmed'),('both','Both Approved and Confirmed')], 'Select Leave Type', required=True)
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'holiday_type': 'Approved',
}
def print_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, [], context=context)[0]
data['emp'] = context['active_ids']
datas = {
'ids': [],
'model': 'hr.employee',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'holidays.summary',
'datas': datas,
}
hr_holidays_summary_employee()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,452,828,558,203,905,000 | -7,500,045,337,396,497,000 | 38.763636 | 168 | 0.584819 | false |
rsharris/jun2015lgl | create_script_insert_length_sparse.py | 1 | 17318 | #!/usr/bin/env python
"""
Create a cluster job file to create an average insert length sparsity
discriminator track.
"""
from sys import argv,stdin,stdout,stderr,exit
def usage(s=None):
message = """
usage: create_script_insert_length_sparse [options] > insert_length_sparse.sh
<sub>_<samp>_<type> (required) run descriptor; for example, CS_NORM_PE
means subject "CS", sample "NORM", and type "PE";
other filenames can use "{run}" to refer to this
string
--control=<filename> read control values from a file (see list below)
--base=<path> path prefix; other filenames can use "{base}" to
refer to this path
--chromosomes=<filename> read chromosome names and lengths from a file
(default is {base}/data/hg19.chrom_lengths)
--blacklist=<filename> (cumulative) track file of blacklist intervals
--input=<filename> (required) track file to process
--track=<filename> (required) track file to create
(default is {base}/tracks/{run}.insert_length.sparse)
--tempinput=<filename> temporary file to hold input track file, if
needed; only needed if the input track was gzipped
(default is {base}/tracks/{run}.insert_length.sparse.scratch)
--temp=<filename> temporary file to hold track file, if needed; only
needed if --gzip and --bigwig are both used
(default is {base}/tracks/{run}.insert_length.sparse.temp)
--gzip compress track file
--undated don't include today's date in the track name
--bigwig[=<filename>] create bigwig file in addition to track file
--bigwigchroms=<filename> chromosomes file for bedGraphToBigWig
(default is {base}/temp/ucsc.hg19.chrom_lengths)
--bigwigurl=<url> url for the bigwig file; this can use {bigwig}
for the bigwig filename
--bigwiglink=<filename> path at which to create a symbolic link to the
bigwig and info files; this can use {bigwig}
for the bigwig filename
--bigwigposition=<interval> intitial UCSC browser interval for bigwig track
--initialize=<text> (cumulative) shell command to add to job beginning
"shebang:bash" is mapped "#!/usr/bin/env bash"
other commands are copied "as is"
values read from control file:
avgInsertLen.{run}
insert_length_sparse.maskLevel
insert_length_sparse.windowLength
insert_length_sparse.minLength
insert_length_sparse.density
insert_length_sparse.densityClip
insert_length_sparse.samplingStep"""
if (s == None): exit (message)
else: exit ("%s%s" % (s,message))
def main():
global basePath,runName
global debug
bashShebang = "#!/usr/bin/env bash"
# parse args
runName = None
controlFilename = None
basePath = None
blacklistFilenames = []
inputFilename = None
chromsFilename = None
trackName = None
tempInputFilename = None
tempFilename = None
gzipOutput = False
dateInTrackname = True
bigWigFilename = None
bigWigChromsFilename = None
bigWigUrl = None
bigWigLink = None
bigWigPosition = None
bashInitializers = ["set -eu"]
debug = []
for arg in argv[1:]:
if ("=" in arg):
argVal = arg.split("=",1)[1].strip()
if (arg.startswith("--control=")):
controlFilename = argVal
elif (arg.startswith("--base=")) or (arg.startswith("--basepath=")) or (arg.startswith("--path=")):
basePath = argVal
elif (arg.startswith("--blacklist=")):
blacklistFilenames += [argVal]
elif (arg.startswith("--input=")):
inputFilename = argVal
elif (arg.startswith("--chromosomes=")) or (arg.startswith("--chroms=")):
chromsFilename = argVal
elif (arg.startswith("--track=")):
trackName = argVal
elif (arg.startswith("--tempinput=")):
tempInputFilename = argVal
elif (arg.startswith("--temp=")):
tempFilename = argVal
elif (arg == "--gzip"):
gzipOutput = True
elif (arg == "--undated"):
dateInTrackname = False
elif (arg == "--bigwig"):
bigWigFilename = "{track}.bw"
elif (arg.startswith("--bigwig=")):
bigWigFilename = argVal
elif (arg.startswith("--bigwigchromosomes=")) or (arg.startswith("--bigwigchroms=")):
bigWigChromsFilename = argVal
elif (arg.startswith("--bigwigurl=")) or (arg.startswith("--url=")):
bigWigUrl = argVal
elif (arg.startswith("--bigwiglink=")) or (arg.startswith("--link=")):
bigWigLink = argVal
elif (arg.startswith("--bigwigposition=")) or (arg.startswith("--bigwigpos=")):
bigWigPosition = argVal
elif (arg.startswith("--initialize=")) or (arg.startswith("--init=")):
if (argVal == "shebang:bash"):
argVal = bashShebang
if (argVal == "set -eu"):
bashInitializers = [x for x in bashInitializers if (x != "set -eu")]
bashInitializers += [argVal]
elif (arg == "--debug"):
debug += ["debug"]
elif (arg.startswith("--debug=")):
debug += argVal.split(",")
elif (arg.startswith("--")):
usage("unrecognized option: %s" % arg)
elif (runName == None):
fields = arg.split(":",2)
if (len(fields) != 3):
fields = arg.split("_")
if (len(fields) < 3) or (fields[-1] not in ["PE","MP"]):
usage("\"%s\" is not a valid run descriptor" % arg)
runName = "_".join(fields)
else:
usage("unrecognized option: %s" % arg)
if (runName == None):
usage("you have to give me a run descriptor")
if (controlFilename == None):
usage("you have to give me a control filename")
if (inputFilename == None):
usage("you have to give me an input track filename")
if (chromsFilename == None):
chromsFilename = "{base}/data/hg19.chrom_lengths"
if (trackName == None):
trackName = "{base}/tracks/{run}.insert_length.sparse"
if (tempInputFilename == None) and (inputFilename.endswith(".gz")):
tempInputFilename = trackName + ".scratch"
if (tempFilename == None) and (bigWigFilename != None) and (gzipOutput):
tempFilename = trackName + ".temp"
if (bigWigFilename != None):
if (bigWigChromsFilename == None):
bigWigChromsFilename = "{base}/temp/ucsc.hg19.chrom_lengths"
if (bigWigUrl == None):
usage("you have to give me a url for the bigwig file")
trackId = "%s.insert_length.sparse" % runName
##########
# perform filename substitution
##########
if (basePath == None): basePath = "."
elif (basePath.endswith("/")): basePath = basePath[:-1]
controlFilename = do_filename_substitutition(controlFilename)
chromsFilename = do_filename_substitutition(chromsFilename)
# blacklist track names
for (ix,blacklistFilename) in enumerate(blacklistFilenames):
blacklistFilename = do_filename_substitutition(blacklistFilename)
assert (not blacklistFilename.endswith(".gz"))
assert (not blacklistFilename.endswith(".gzip"))
if (not blacklistFilename.endswith(".dat")): blacklistFilename += ".dat"
blacklistFilenames[ix] = blacklistFilename
# input track name
inputFilename = do_filename_substitutition(inputFilename)
gzipInput = False
if (inputFilename.endswith(".gz")): gzipInput = True
elif (inputFilename.endswith(".gzip")): gzipInput = True
elif (not inputFilename.endswith(".dat")): inputFilename += ".dat"
# track name
trackName = do_filename_substitutition(trackName)
trackFilename = trackName
if (gzipOutput):
if (not trackFilename.endswith(".gz")): trackFilename += ".gz"
else:
if (not trackFilename.endswith(".dat")): trackFilename += ".dat"
if (tempInputFilename != None):
tempInputFilename = do_filename_substitutition(tempInputFilename)
if (tempFilename != None):
tempFilename = do_filename_substitutition(tempFilename)
# big wig name
if (bigWigFilename != None):
bigWigFilename = do_filename_substitutition(bigWigFilename)
if ("{track}" in bigWigFilename):
trackTemp = trackName
if (trackTemp.endswith(".gz")): trackTemp = trackTemp[:-3]
elif (trackTemp.endswith(".dat")): trackTemp = trackTemp[:-4]
bigWigFilename = bigWigFilename.replace("{track}",trackTemp)
if (bigWigFilename.endswith(".bw")): infoFilename = bigWigFilename[:-3] + ".info"
else: infoFilename = bigWigFilename + ".info"
if (bigWigChromsFilename != None):
bigWigChromsFilename = do_filename_substitutition(bigWigChromsFilename)
if (bigWigUrl != None):
bigWigTemp = bigWigFilename
slashIx = bigWigTemp.rfind("/")
if (slashIx >= 0): bigWigTemp = bigWigTemp[slashIx+1:]
bigWigUrl = bigWigUrl.replace("{bigwig}",bigWigTemp)
if (bigWigLink != None):
bigWigSave = bigWigLink
bigWigTemp = bigWigFilename
slashIx = bigWigTemp.rfind("/")
if (slashIx >= 0): bigWigTemp = bigWigTemp[slashIx+1:]
bigWigLink = bigWigLink.replace("{bigwig}",bigWigTemp)
infoTemp = infoFilename
slashIx = infoTemp.rfind("/")
if (slashIx >= 0): infoTemp = infoTemp[slashIx+1:]
infoLink = bigWigSave.replace("{bigwig}",infoTemp)
##########
# get values from the control file
##########
avgInsertLen = None
maskLevel = None
windowLength = None
minLength = None
densityThreshold = None
densityClip = None
samplingStep = None
f = file(controlFilename,"rt")
lineNumber = 0
for line in f:
lineNumber += 1
line = line.strip()
if (line == ""): continue
if (line.startswith("#")): continue
fields = line.split()
assert (len(fields) >= 3), \
"not enough fields at control file line %d (%d, expected at least 3)" \
% (lineNumber,len(fields))
assert (fields[1] == "="), \
"can't understand control file line %d:\n%s" \
% (lineNumber,line)
(name,_,val) = fields[:3]
if (name == "avgInsertLen." + runName): avgInsertLen = int(val)
if (name == "insert_length_sparse.maskLevel"): maskLevel = val
if (name == "insert_length_sparse.windowLength"): windowLength = int(val)
if (name == "insert_length_sparse.minLength"): minLength = int(val)
if (name == "insert_length_sparse.density"): densityThreshold = val
if (name == "insert_length_sparse.densityClip"): densityClip = val
if (name == "insert_length_sparse.samplingStep"): samplingStep = int(val)
f.close()
if (windowLength == None): windowLength = minLength
if (avgInsertLen == None): assert(False), "control file lacks avgInsertLen"
if (windowLength == None): assert(False), "control file lacks windowLength"
if (minLength == None): assert(False), "control file lacks minLength"
if (densityThreshold == None): assert(False), "control file lacks density"
if (densityClip == None): assert(False), "control file lacks density clip"
if (samplingStep == None): assert(False), "control filelacks samplingStep"
if (blacklistFilenames != []):
if (maskLevel == None): assert(False), "control file lacks maskLevel"
if ("." in maskLevel):
while (maskLevel.endswith("0")):
maskLevel = maskLevel[:-1]
if (maskLevel.endswith(".")):
maskLevel = maskLevel[:-1]
if ("." in densityThreshold):
while (densityThreshold.endswith("0")):
densityThreshold = densityThreshold[:-1]
if (densityThreshold.endswith(".")):
densityThreshold = densityThreshold[:-1]
if ("." in densityClip):
while (densityClip.endswith("0")):
densityClip = densityClip[:-1]
if (densityClip.endswith(".")):
densityClip = densityClip[:-1]
##########
# create the job's shell script
##########
# write bash intitializers
if (bashInitializers != None):
for (ix,bashInitializer) in enumerate(bashInitializers):
if (bashInitializer != bashShebang): continue
print bashInitializer
bashInitializers[ix] = None
for (ix,bashInitializer) in enumerate(bashInitializers):
if (bashInitializer != "set -eu"): continue
print bashInitializer
bashInitializers[ix] = None
for bashInitializer in bashInitializers:
if (bashInitializer == None): continue
print do_filename_substitutition(bashInitializer)
print
if (dateInTrackname):
print "today=`today {mmm}/{d}/{yyyy}`"
# write commands describing the files the script will create
if (tempInputFilename != None):
print "echo \"will use %s as a temporary input file\"" % tempInputFilename
if (tempFilename != None):
print "echo \"will write temporary files to %s\"" % tempFilename
print "echo \"will write track file to %s\"" % trackFilename
if (bigWigFilename != None):
print "echo \"will write bigwig file to %s\"" % bigWigFilename
# write command(s) to create track file
print
print "echo \"=== creating track %s ===\"" % trackId
if (gzipInput):
commands = []
command = ["time gzip -dc %s" % inputFilename]
commands += [command]
command = ["> %s" % tempInputFilename]
commands += [command]
print
print commands_to_pipeline(commands)
trackSourceFilename = tempInputFilename
else:
trackSourceFilename = inputFilename
commands = []
command = ["time genodsp"]
command += ["--chromosomes=%s" % chromsFilename]
command += ["--show:uncovered"]
command += ["= input %s --missing=-inf" % trackSourceFilename]
command += ["= addconst %s" % avgInsertLen]
command += ["= slidingsum W=%d D=W" % windowLength]
if (blacklistFilenames != []):
command += ["= percentile %s W=%d --min=1/inf --quiet" % (maskLevel,samplingStep)]
command += ["= input %s --missing=-inf" % trackSourceFilename]
command += ["= addconst %s" % avgInsertLen]
command += ["= slidingsum W=%d D=W" % windowLength]
command += ["= percentile %s,%s W=%d --min=1/inf --quiet" \
% (densityClip,densityThreshold,samplingStep)]
if (gzipInput): command += ["= input %s --missing=-inf --destroy" % trackSourceFilename]
else: command += ["= input %s --missing=-inf" % trackSourceFilename]
command += ["= addconst %s" % avgInsertLen]
command += ["= clip --min=percentile%s" % densityClip]
for blacklistFilename in blacklistFilenames:
command += ["= mask %s --mask=percentile%s" % (blacklistFilename,maskLevel)]
command += ["= anticlump --average=percentile%s L=%d" \
% (densityThreshold,minLength)]
for blacklistFilename in blacklistFilenames:
command += ["= mask %s --mask=0" % blacklistFilename]
commands += [command]
if (gzipOutput):
if (tempFilename != None):
command = ["tee %s" % tempFilename]
commands += [command]
command = ["gzip"]
commands += [command]
command = ["> %s" % trackFilename]
commands += [command]
print
print commands_to_pipeline(commands)
# write command(s) to convert track file to bigwig
if (bigWigFilename != None):
print
print "echo \"=== converting track %s to bigwig ===\"" % trackId
if (tempFilename != None): trackInput = tempFilename
else: trackInput = trackFilename
commands = []
command = ["time bedGraphToBigWig"]
command += [trackInput]
command += [bigWigChromsFilename]
command += [bigWigFilename]
commands += [command]
print
print commands_to_pipeline(commands)
if (tempFilename != None):
commands = []
command = ["rm %s" % tempFilename]
commands += [command]
print
print commands_to_pipeline(commands)
description = "sparse intervals in average insert lengths"
if (dateInTrackname): description += " (${today})"
commands = []
command = ["make_bigwig_info"]
command += ["--url=%s" % bigWigUrl]
command += ["--name=\"%s insert lengths sparse\"" % runName]
command += ["--desc=\"%s %s\"" % (runName,description)]
command += ["--autoscale=\"on\""]
command += ["--alwayszero=\"on\""]
command += ["--maxheight=\"10:10:10\""]
command += ["--color=250,30,100"]
if (bigWigPosition != None): command += ["--pos=\"%s\"" % bigWigPosition]
command += ["> %s" % infoFilename]
commands += [command]
print
print commands_to_pipeline(commands)
if (bigWigLink != None):
print
print "rm -f %s" % infoLink
print "ln -s %s %s" % (infoFilename,infoLink)
print "rm -f %s" % bigWigLink
print "ln -s %s %s" % (bigWigFilename,bigWigLink)
infoUrl = bigWigUrl
slashIx = infoUrl.rfind("/")
if (slashIx >= 0): infoUrl = infoUrl[:slashIx]
infoTemp = infoFilename
slashIx = infoTemp.rfind("/")
if (slashIx >= 0): infoTemp = infoTemp[slashIx+1:]
infoUrl = infoUrl + "/" + infoTemp
print >>stderr, infoUrl
print
print "echo \"track URL is %s\"" % (infoUrl)
def commands_to_pipeline(commands):
pipeline = []
for (cmdNum,cmd) in enumerate(commands):
if (cmdNum == 0): prefix = ""
else: prefix = " | "
if (cmd[0].startswith(">")):
assert (cmdNum != 0)
assert (len(cmd) == 1)
prefix = " "
pipeline += [prefix + cmd[0]]
for line in cmd[1:]:
pipeline += [" " + line]
return " \\\n".join(pipeline)
def do_filename_substitutition(s):
if ("{base}" in s):
assert (basePath != None)
s = s.replace("{base}",basePath)
if ("{run}" in s):
assert (runName != None)
s = s.replace("{run}",runName)
return s
if __name__ == "__main__": main()
| gpl-3.0 | 2,705,729,055,650,629,600 | -3,758,025,450,139,788,000 | 33.090551 | 101 | 0.636332 | false |
gaddman/ansible | lib/ansible/modules/network/aci/aci_contract_subject_to_filter.py | 2 | 9063 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_contract_subject_to_filter
short_description: Bind Contract Subjects to Filters (vz:RsSubjFiltAtt)
description:
- Bind Contract Subjects to Filters on Cisco ACI fabrics.
notes:
- The C(tenant), C(contract), C(subject), and C(filter_name) must exist before using this module in your playbook.
The M(aci_tenant), M(aci_contract), M(aci_contract_subject), and M(aci_filter) modules can be used for these.
seealso:
- module: aci_tenant
- module: aci_contract
- module: aci_contract_subject
- module: aci_filter
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(vz:RsSubjFiltAtt).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
contract:
description:
- The name of the contract.
aliases: [ contract_name ]
filter:
description:
- The name of the Filter to bind to the Subject.
aliases: [ filter_name ]
log:
description:
- Determines if the binding should be set to log.
- The APIC defaults to C(none) when unset during creation.
choices: [ log, none ]
aliases: [ directive ]
subject:
description:
- The name of the Contract Subject.
aliases: [ contract_subject, subject_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new contract subject to filer binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
delegate_to: localhost
- name: Remove an existing contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
delegate_to: localhost
- name: Query a specific contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
state: query
delegate_to: localhost
register: query_result
- name: Query all contract subject to filter bindings
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
filter=dict(type='str', aliases=['filter_name']), # Not required for querying all objects
log=dict(tyep='str', choices=['log', 'none'], aliases=['directive']),
subject=dict(type='str', aliases=['contract_subject', 'subject_name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'filter', 'subject', 'tenant']],
['state', 'present', ['contract', 'filter', 'subject', 'tenant']],
],
)
contract = module.params['contract']
filter_name = module.params['filter']
log = module.params['log']
subject = module.params['subject']
tenant = module.params['tenant']
state = module.params['state']
# Add subject_filter key to modul.params for building the URL
module.params['subject_filter'] = filter_name
# Convert log to empty string if none, as that is what API expects. An empty string is not a good option to present the user.
if log == 'none':
log = ''
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
module_object=contract,
target_filter={'name': contract},
),
subclass_2=dict(
aci_class='vzSubj',
aci_rn='subj-{0}'.format(subject),
module_object=subject,
target_filter={'name': subject},
),
subclass_3=dict(
aci_class='vzRsSubjFiltAtt',
aci_rn='rssubjFiltAtt-{0}'.format(filter_name),
module_object=filter_name,
target_filter={'tnVzFilterName': filter_name},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzRsSubjFiltAtt',
class_config=dict(
tnVzFilterName=filter_name,
directives=log,
),
)
aci.get_diff(aci_class='vzRsSubjFiltAtt')
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Remove subject_filter used to build URL from module.params
module.params.pop('subject_filter')
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | -5,069,233,169,852,420,000 | -2,406,569,308,089,352,000 | 27.771429 | 141 | 0.609842 | false |
alistair-broomhead/robotframework-selenium2library | test/lib/mockito/spying.py | 70 | 1062 | #!/usr/bin/env python
# coding: utf-8
'''Spying on real objects.'''
from invocation import RememberedProxyInvocation, VerifiableInvocation
from mocking import TestDouble
__author__ = "Serhiy Oplakanets <[email protected]>"
__copyright__ = "Copyright 2009-2010, Mockito Contributors"
__license__ = "MIT"
__maintainer__ = "Mockito Maintainers"
__email__ = "[email protected]"
__all__ = ['spy']
def spy(original_object):
return Spy(original_object)
class Spy(TestDouble):
strict = True # spies always have to check if method exists
def __init__(self, original_object):
self.original_object = original_object
self.invocations = []
self.verification = None
def __getattr__(self, name):
if self.verification:
return VerifiableInvocation(self, name)
else:
return RememberedProxyInvocation(self, name)
def remember(self, invocation):
self.invocations.insert(0, invocation)
def pull_verification(self):
v = self.verification
self.verification = None
return v
| apache-2.0 | 3,299,838,120,141,017,600 | -3,963,492,023,984,240,600 | 24.926829 | 70 | 0.69209 | false |
michaelliao/learn-python3 | samples/commonlib/use_urllib.py | 20 | 2195 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib import request, parse
# get:
with request.urlopen('https://api.douban.com/v2/book/2129650') as f:
data = f.read()
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', data.decode('utf-8'))
# advanced get:
req = request.Request('http://www.douban.com/')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
with request.urlopen(req) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
# post:
print('Login to weibo.cn...')
email = input('Email: ')
passwd = input('Password: ')
login_data = parse.urlencode([
('username', email),
('password', passwd),
('entry', 'mweibo'),
('client_id', ''),
('savestate', '1'),
('ec', ''),
('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F%3Fjumpfrom%3Dweibocom&jumpfrom=weibocom')
])
req = request.Request('https://passport.weibo.cn/sso/login')
req.add_header('Origin', 'https://passport.weibo.cn')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
req.add_header('Referer', 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F%3Fjumpfrom%3Dweibocom')
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
# with proxy and proxy auth:
proxy_handler = urllib.request.ProxyHandler({'http': 'http://www.example.com:3128/'})
proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
opener = urllib.request.build_opener(proxy_handler, proxy_auth_handler)
with opener.open('http://www.example.com/login.html') as f:
pass
| gpl-2.0 | 3,295,583,119,486,425,000 | -6,548,455,805,084,263,000 | 36.844828 | 167 | 0.656948 | false |
dongritengfei/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/activeworkitems.py | 140 | 3975 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import db
from datetime import timedelta, datetime
import time
from model.queuepropertymixin import QueuePropertyMixin
class ActiveWorkItems(db.Model, QueuePropertyMixin):
queue_name = db.StringProperty()
item_ids = db.ListProperty(int)
item_dates = db.ListProperty(float)
date = db.DateTimeProperty(auto_now_add=True)
# The id/date pairs should probably just be their own class.
def _item_time_pairs(self):
return zip(self.item_ids, self.item_dates)
def _set_item_time_pairs(self, pairs):
if pairs:
# The * operator raises on an empty list.
# db.Model does not tuples, we have to make lists.
self.item_ids, self.item_dates = map(list, zip(*pairs))
else:
self.item_ids = []
self.item_dates = []
def _append_item_time_pair(self, pair):
self.item_ids.append(pair[0])
self.item_dates.append(pair[1])
def _remove_item(self, item_id):
nonexpired_pairs = [pair for pair in self._item_time_pairs() if pair[0] != item_id]
self._set_item_time_pairs(nonexpired_pairs)
@classmethod
def key_for_queue(cls, queue_name):
return "active-work-items-%s" % (queue_name)
@classmethod
def lookup_by_queue(cls, queue_name):
return cls.get_or_insert(key_name=cls.key_for_queue(queue_name), queue_name=queue_name)
@staticmethod
def _expire_item(key, item_id):
active_work_items = db.get(key)
active_work_items._remove_item(item_id)
active_work_items.put()
def expire_item(self, item_id):
return db.run_in_transaction(self._expire_item, self.key(), item_id)
def deactivate_expired(self, now):
one_hour_ago = time.mktime((now - timedelta(minutes=60)).timetuple())
nonexpired_pairs = [pair for pair in self._item_time_pairs() if pair[1] > one_hour_ago]
self._set_item_time_pairs(nonexpired_pairs)
def next_item(self, work_item_ids, now):
for item_id in work_item_ids:
if item_id not in self.item_ids:
self._append_item_time_pair([item_id, time.mktime(now.timetuple())])
return item_id
return None
def time_for_item(self, item_id):
for active_item_id, time in self._item_time_pairs():
if active_item_id == item_id:
return datetime.fromtimestamp(time)
return None
| bsd-3-clause | 8,061,052,847,270,476,000 | 2,621,520,128,611,689,000 | 39.979381 | 95 | 0.689308 | false |
ritzk/ansible-modules-core | utilities/logic/fail.py | 198 | 1458 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: fail
short_description: Fail with custom message
description:
- This module fails the progress with a custom message. It can be
useful for bailing out when a certain condition is met using C(when).
version_added: "0.8"
options:
msg:
description:
- The customized message used for failing execution. If omitted,
fail will simple bail out with a generic message.
required: false
default: "'Failed as requested from task'"
author: "Dag Wieers (@dagwieers)"
'''
EXAMPLES = '''
# Example playbook using fail and when together
- fail: msg="The system may not be provisioned according to the CMDB status."
when: cmdb_status != "to-be-staged"
'''
| gpl-3.0 | -42,441,157,028,369,100 | 1,961,506,380,490,106,000 | 32.136364 | 77 | 0.72428 | false |
vivisect/synapse | synapse/tools/pushfile.py | 1 | 1334 | import os
import sys
import argparse
import synapse.telepath as s_telepath
import synapse.lib.output as s_output
def getArgParser():
p = argparse.ArgumentParser()
p.add_argument('cortex', help='telepath URL for a target cortex')
p.add_argument('filenames', nargs='+', help='files to upload')
p.add_argument('--tags', help='comma separated list of tags to add to the nodes')
return p
def main(argv, outp=None):
if outp is None: # pragma: no cover
outp = s_output.OutPut()
p = getArgParser()
opts = p.parse_args(argv)
core = s_telepath.openurl(opts.cortex)
tags = []
if opts.tags:
for tag in opts.tags.split(','):
tags.append(tag)
if tags:
outp.printf('adding tags: %r' % (tags,))
for path in opts.filenames:
with open(path, 'rb') as fd:
base = os.path.basename(path)
node = core.formNodeByFd(fd, name=base)
core.addTufoTags(node, tags)
iden = node[1].get('file:bytes')
size = node[1].get('file:bytes:size')
name = node[1].get('file:bytes:name')
outp.printf('file: %s (%d) added (%s) as %s' % (base, size, iden, name))
core.fini() # Shut down the proxy
if __name__ == '__main__': # pragma: no cover
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 7,002,713,295,952,166,000 | 7,364,963,159,218,336,000 | 24.653846 | 85 | 0.587706 | false |
StefanRijnhart/odoomrp-wip | account_treasury_forecast/wizard/wiz_create_invoice.py | 31 | 2577 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import openerp.addons.decimal_precision as dp
from openerp import models, fields, api
class WizCreateInvoice(models.TransientModel):
_name = 'wiz.create.invoice'
_description = 'Wizard to create invoices'
partner_id = fields.Many2one("res.partner", string="Partner")
journal_id = fields.Many2one("account.journal", string="Journal",
domain=[("type", "=", "purchase")])
description = fields.Char(string="Description")
amount = fields.Float(string="Amount",
digits_compute=dp.get_precision('Account'))
line_id = fields.Many2one("account.treasury.forecast.line.template",
string="Payment")
@api.one
def button_create_inv(self):
invoice_obj = self.env['account.invoice']
res_inv = invoice_obj.onchange_partner_id('in_invoice',
self.partner_id.id)
values = res_inv['value']
values['name'] = ('Treasury: ' + self.description + '/ Amount: ' +
str(self.amount))
values['reference'] = ('Treasury: ' + self.description + '/ Amount: ' +
str(self.amount))
values['partner_id'] = self.partner_id.id
values['journal_id'] = self.journal_id.id
values['type'] = 'in_invoice'
invoice_id = invoice_obj.create(values)
self.line_id.write({'invoice_id': invoice_id.id, 'paid': 1,
'journal_id': self.journal_id.id,
'partner_id': self.partner_id.id,
'amount': self.amount})
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 | -2,845,852,556,418,254,000 | -5,822,488,278,300,775,000 | 46.722222 | 79 | 0.563834 | false |
FedoraScientific/salome-paravis | src/PV_SWIG/paravis.py | 1 | 3020 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# File : paravis.py
# Module : PARAVIS
#
import os, new
import PARAVIS
import SALOME
import SALOME_Session_idl
import SALOMEDS
import SALOME_ModuleCatalog
from omniORB import CORBA
from time import sleep
from salome import *
myORB = None
myNamingService = None
myLifeCycleCORBA = None
myNamingService = None
myLocalStudyManager = None
myLocalStudy = None
myLocalParavis = None
myDelay = None
mySession = None
## Initialization of paravis server
def Initialize(theORB, theNamingService, theLifeCycleCORBA, theStudyManager, theStudy, theDelay) :
global myORB, myNamingService, myLifeCycleCORBA, myLocalStudyManager, myLocalStudy
global mySession, myDelay
myDelay = theDelay
myORB = theORB
myNamingService = theNamingService
myLifeCycleCORBA = theLifeCycleCORBA
myLocalStudyManager = theStudyManager
while mySession == None:
mySession = myNamingService.Resolve("/Kernel/Session")
mySession = mySession._narrow(SALOME.Session)
mySession.GetInterface()
myDelay = theDelay
sleep(myDelay)
myLocalParavis = myLifeCycleCORBA.FindOrLoadComponent("FactoryServer", "PARAVIS")
myLocalStudy = theStudy
myLocalParavis.SetCurrentStudy(myLocalStudy)
myLocalParavis.ActivateModule()
return myLocalParavis
def ImportFile(theFileName):
"Import a file of any format supported by ParaView"
myParavis.ImportFile(theFileName)
def createFunction(theName):
"Create function - constructor of Paravis object"
def MyFunction():
return myParavis.CreateClass(theName)
return MyFunction
def createConstructors():
"Create constructor functions according to list of extracted classes"
g = globals()
aClassNames = myParavis.GetClassesList();
for aName in aClassNames:
g[aName] = createFunction(aName)
## Initialize of a PARAVIS interface
myParavis = Initialize(orb, naming_service,lcc,myStudyManager,myStudy, 2)
## Initialize constructor functions
createConstructors()
## Initialize Paravis static objects
vtkSMObject = vtkSMObject()
vtkProcessModule = vtkProcessModule()
vtkPVPythonModule = vtkPVPythonModule()
vtkSMProxyManager = vtkSMProxyManager()
| lgpl-2.1 | -9,219,009,834,608,368,000 | 1,275,376,687,966,188,000 | 30.134021 | 98 | 0.765232 | false |
viktorradnai/screenwarp | utils/create_chessboard.py | 1 | 2170 | #!/usr/bin/python
import cv2
import wand.image
import wand.color
import wand.drawing
import sys
import logging
import argparse
logger = logging.getLogger(__name__)
def parse_cmdline():
parser = argparse.ArgumentParser(description='''
TODO: insert description.'''
)
parser.add_argument('-v', '--verbose', action='store_true', help="Enable verbose output")
parser.add_argument('-q', '--quiet', action='store_true', help="Output errors only")
parser.add_argument('-W', '--width', type=int, help="Target screen width. This will be the width of the output image.", default=1920)
parser.add_argument('-H', '--height', type=int, help="Target screen height. This will be the height of the output image.", default=1080)
parser.add_argument('-c', '--cols', type=int, help="Number of squares per column", default=16)
parser.add_argument('-r', '--rows', type=int, help="Number of squares per row", default=9)
parser.add_argument('filename', help="Image file")
args = parser.parse_args()
if args.verbose: loglevel = logging.DEBUG
elif args.quiet: loglevel = logging.ERROR
else: loglevel = logging.INFO
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s %(message)s')
return args
def main():
args = parse_cmdline()
screen_width = args.width
screen_height = args.height
square_width = screen_width / args.cols
square_height = screen_height / args.rows
image = wand.image.Image(width=screen_width, height=screen_height, background=wand.color.Color('#fff'))
with wand.drawing.Drawing() as draw:
draw.fill_color = wand.color.Color('#000')
for r in range(args.rows):
for c in range(args.cols):
if not (c + r) % 2:
continue
x = square_width * c
y = square_height * r
logger.debug("%s %s %s %s", x, y, square_width, square_height)
draw.rectangle(x, y, width=square_width, height=square_height)
draw.draw(image)
image.save(filename=args.filename)
exit(0)
# call main()
if __name__ == '__main__':
main()
| gpl-3.0 | -5,322,884,914,486,194,000 | -256,864,284,461,352,000 | 32.90625 | 140 | 0.632258 | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/IPython/utils/dir2.py | 3 | 2232 | # encoding: utf-8
"""A fancy version of Python's builtin :func:`dir` function.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import inspect
import types
def safe_hasattr(obj, attr):
"""In recent versions of Python, hasattr() only catches AttributeError.
This catches all errors.
"""
try:
getattr(obj, attr)
return True
except:
return False
def dir2(obj):
"""dir2(obj) -> list of strings
Extended version of the Python builtin dir(), which does a few extra
checks.
This version is guaranteed to return only a list of true strings, whereas
dir() returns anything that objects inject into themselves, even if they
are later not really valid for attribute access (many extension libraries
have such bugs).
"""
# Start building the attribute list via dir(), and then complete it
# with a few extra special-purpose calls.
try:
words = set(dir(obj))
except Exception:
# TypeError: dir(obj) does not return a list
words = set()
if safe_hasattr(obj, '__class__'):
words |= set(dir(obj.__class__))
# filter out non-string attributes which may be stuffed by dir() calls
# and poor coding in third-party modules
words = [w for w in words if isinstance(w, str)]
return sorted(words)
def get_real_method(obj, name):
"""Like getattr, but with a few extra sanity checks:
- If obj is a class, ignore everything except class methods
- Check if obj is a proxy that claims to have all attributes
- Catch attribute access failing with any exception
- Check that the attribute is a callable object
Returns the method or None.
"""
try:
canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None)
except Exception:
return None
if canary is not None:
# It claimed to have an attribute it should never have
return None
try:
m = getattr(obj, name, None)
except Exception:
return None
if inspect.isclass(obj) and not isinstance(m, types.MethodType):
return None
if callable(m):
return m
return None
| mit | 1,925,970,740,440,449,800 | -9,162,947,593,640,590,000 | 25.571429 | 79 | 0.655466 | false |
Symphonia/Searcher | ImageView.py | 1 | 4861 | from PySide import QtGui, QtCore
import sys, os
class ImageView(QtGui.QWidget):
def __init__(self,imagelist,parent = None):
super(ImageView,self).__init__(parent)
self.imagesize = None
self.mode = ''
self.imageList = imagelist[0]
self.index = imagelist[1]
self.title_label = QtGui.QLabel(self)
self.imagesizelabel = QtGui.QLabel(self)
self.cursizelabel = QtGui.QLabel(self)
self.image_label = QtGui.QLabel(self)
self.image_label.setBackgroundRole(QtGui.QPalette.Base)
self.image_label.setSizePolicy(QtGui.QSizePolicy.Ignored,QtGui.QSizePolicy.Ignored)
self.image_label.setScaledContents(True)
self.installEventFilter(self)
CloseWindowAction = QtGui.QAction(self)
CloseWindowAction.setShortcut("Ctrl+W")
CloseWindowAction.triggered.connect(self.close)
self.addAction(CloseWindowAction)
self.scrollarea = QtGui.QScrollArea(self)
self.scrollarea.setBackgroundRole(QtGui.QPalette.Dark)
self.scrollarea.setWidget(self.image_label)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.imagesizelabel)
hbox.addWidget(self.title_label)
hbox.addWidget(self.cursizelabel)
hbox.setContentsMargins(3,0,3,0)
qbox = QtGui.QVBoxLayout(self)
qbox.addLayout(hbox)
qbox.addWidget(self.scrollarea)
qbox.setContentsMargins(0,5,0,0)
info = QtCore.QFileInfo(self.imageList[self.index])
self.title_label.setText(info.fileName())
self.title_label.setAlignment(QtCore.Qt.AlignCenter)
self.imagesizelabel.setAlignment(QtCore.Qt.AlignLeft)
self.cursizelabel.setAlignment(QtCore.Qt.AlignRight)
self.setMinimumHeight(10)
self.setMinimumWidth(10)
self.setWindowTitle('Image Viewer')
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setGeometry(10,10,500,400)
if self.index ==0:
self.open(self.imageList[0])
self.show()
def open(self,image_path):
if image_path:
image = QtGui.QImage(image_path)
if image.isNull():
QtGui.QMessageBox.information(self,"Image View","Cannot load %s." %image_path)
return
self.image_label.setPixmap(QtGui.QPixmap.fromImage(image))
self.scrollarea.setWidgetResizable(True)
self.imagesize = image.size()
self.updateTitle()
def eventFilter(self,object,event):
if event.type() == QtCore.QEvent.KeyRelease:
if event.key() == QtCore.Qt.Key_Left:
if self.index ==0: self.index = len(self.imageList)
self.index-=1
self.open(self.imageList[self.index])
if event.key() == QtCore.Qt.Key_Right:
if self.index>=len(self.imageList)-1: self.index =0
self.index+=1
self.open(self.imageList[self.index])
if event.key() == QtCore.Qt.Key_W:
self.resize(500,400)
if event.key() == QtCore.Qt.Key_R:
if self.imagesize.height() > QtGui.QDesktopWidget().availableGeometry().height():
ratio = QtGui.QDesktopWidget().availableGeometry().height() / self.imagesize.height()
self.resize(int(self.imagesize.width()*ratio),int(self.imagesize.height()*ratio))
else:
self.resize(self.imagesize.width(),self.imagesize.height())
if event.key() == QtCore.Qt.Key_E:
self.move(self.pos().x(),0)
ratio = QtGui.QDesktopWidget().availableGeometry().height() / self.imagesize.height()
self.resize(int(self.imagesize.width()*ratio),int(self.imagesize.height()*ratio))
self.updateTitle()
if event.key() == QtCore.Qt.Key_Escape:
self.close()
if event.type() == QtCore.QEvent.MouseButtonPress:
if event.pos().x() < self.size().width() -20:
self.diff = event.globalPos() - self.frameGeometry().topLeft()
self.mode = 'drag'
else:
self.mode = 'resize'
if event.type() == QtCore.QEvent.MouseMove:
if self.mode == 'drag':
self.move(event.globalPos()-self.diff)
else:
self.resize(event.pos().x(),event.pos().y())
self.updateTitle()
return False
def updateTitle(self):
info = QtCore.QFileInfo(self.imageList[self.index])
self.imagesizelabel.setText(str(self.imagesize.width()) +','+ str(self.imagesize.height()) + ' ->')
self.title_label.setText(info.fileName())
self.cursizelabel.setText('<- ' + str(self.size().width()) + ',' + str(self.size().height()))
def mousePressEvent(self,event):
print(event.buttons())
if event.buttons() == QtCore.Qt.LeftButton:
if event.pos().x() < self.size().width() -20:
self.diff = event.globalPos() - self.frameGeometry().topLeft()
self.mode = 'drag'
else:
self.mode = 'resize'
def mouseMoveEvent(self,event):
if event.buttons() == QtCore.Qt.LeftButton:
if self.mode == 'drag':
self.move(event.globalPos()-self.diff)
else:
self.resize(event.pos().x(),event.pos().y())
# def main():
# app = QtGui.QApplication(sys.argv)
# imageview = ImageView()
# sys.exit(app.exec_())
# if __name__ == '__main__':
# main()
| mit | 4,769,587,025,275,831,000 | 5,212,855,346,009,674,000 | 32.763889 | 104 | 0.687513 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.