repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
alistairlow/tensorflow | tensorflow/python/framework/dtypes_test.py | 10 | 14160 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.dtypes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _is_numeric_dtype_enum(datatype_enum):
non_numeric_dtypes = [types_pb2.DT_VARIANT,
types_pb2.DT_VARIANT_REF,
types_pb2.DT_INVALID,
types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF]
return datatype_enum not in non_numeric_dtypes
class TypesTest(test_util.TensorFlowTestCase):
def testAllTypesConstructible(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(datatype_enum,
dtypes.DType(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dt = dtypes.as_dtype(datatype_enum)
self.assertEqual(datatype_enum, dt.as_datatype_enum)
def testAllTypesConvertibleToNumpyDtype(self):
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != dtypes.bfloat16:
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(
dtypes.as_dtype(datatype_enum).base_dtype,
dtypes.as_dtype(numpy_dtype))
def testInvalid(self):
with self.assertRaises(TypeError):
dtypes.DType(types_pb2.DT_INVALID)
with self.assertRaises(TypeError):
dtypes.as_dtype(types_pb2.DT_INVALID)
def testNumpyConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(np.float32))
self.assertIs(dtypes.float64, dtypes.as_dtype(np.float64))
self.assertIs(dtypes.int32, dtypes.as_dtype(np.int32))
self.assertIs(dtypes.int64, dtypes.as_dtype(np.int64))
self.assertIs(dtypes.uint8, dtypes.as_dtype(np.uint8))
self.assertIs(dtypes.uint16, dtypes.as_dtype(np.uint16))
self.assertIs(dtypes.int16, dtypes.as_dtype(np.int16))
self.assertIs(dtypes.int8, dtypes.as_dtype(np.int8))
self.assertIs(dtypes.complex64, dtypes.as_dtype(np.complex64))
self.assertIs(dtypes.complex128, dtypes.as_dtype(np.complex128))
self.assertIs(dtypes.string, dtypes.as_dtype(np.object))
self.assertIs(dtypes.string,
dtypes.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(dtypes.bool, dtypes.as_dtype(np.bool))
with self.assertRaises(TypeError):
dtypes.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
def testRealDtype(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.bool, dtypes.uint8, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64
]:
self.assertIs(dtype.real_dtype, dtype)
self.assertIs(dtypes.complex64.real_dtype, dtypes.float32)
self.assertIs(dtypes.complex128.real_dtype, dtypes.float64)
def testStringConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype("float32"))
self.assertIs(dtypes.float64, dtypes.as_dtype("float64"))
self.assertIs(dtypes.int32, dtypes.as_dtype("int32"))
self.assertIs(dtypes.uint8, dtypes.as_dtype("uint8"))
self.assertIs(dtypes.uint16, dtypes.as_dtype("uint16"))
self.assertIs(dtypes.int16, dtypes.as_dtype("int16"))
self.assertIs(dtypes.int8, dtypes.as_dtype("int8"))
self.assertIs(dtypes.string, dtypes.as_dtype("string"))
self.assertIs(dtypes.complex64, dtypes.as_dtype("complex64"))
self.assertIs(dtypes.complex128, dtypes.as_dtype("complex128"))
self.assertIs(dtypes.int64, dtypes.as_dtype("int64"))
self.assertIs(dtypes.bool, dtypes.as_dtype("bool"))
self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
self.assertIs(dtypes.quint8, dtypes.as_dtype("quint8"))
self.assertIs(dtypes.qint32, dtypes.as_dtype("qint32"))
self.assertIs(dtypes.bfloat16, dtypes.as_dtype("bfloat16"))
self.assertIs(dtypes.float32_ref, dtypes.as_dtype("float32_ref"))
self.assertIs(dtypes.float64_ref, dtypes.as_dtype("float64_ref"))
self.assertIs(dtypes.int32_ref, dtypes.as_dtype("int32_ref"))
self.assertIs(dtypes.uint8_ref, dtypes.as_dtype("uint8_ref"))
self.assertIs(dtypes.int16_ref, dtypes.as_dtype("int16_ref"))
self.assertIs(dtypes.int8_ref, dtypes.as_dtype("int8_ref"))
self.assertIs(dtypes.string_ref, dtypes.as_dtype("string_ref"))
self.assertIs(dtypes.complex64_ref, dtypes.as_dtype("complex64_ref"))
self.assertIs(dtypes.complex128_ref, dtypes.as_dtype("complex128_ref"))
self.assertIs(dtypes.int64_ref, dtypes.as_dtype("int64_ref"))
self.assertIs(dtypes.bool_ref, dtypes.as_dtype("bool_ref"))
self.assertIs(dtypes.qint8_ref, dtypes.as_dtype("qint8_ref"))
self.assertIs(dtypes.quint8_ref, dtypes.as_dtype("quint8_ref"))
self.assertIs(dtypes.qint32_ref, dtypes.as_dtype("qint32_ref"))
self.assertIs(dtypes.bfloat16_ref, dtypes.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
dtypes.as_dtype("not_a_type")
def testDTypesHaveUniqueNames(self):
dtypez = []
names = set()
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = dtypes.as_dtype(datatype_enum)
dtypez.append(dtype)
names.add(dtype.name)
self.assertEqual(len(dtypez), len(names))
def testIsInteger(self):
self.assertEqual(dtypes.as_dtype("int8").is_integer, True)
self.assertEqual(dtypes.as_dtype("int16").is_integer, True)
self.assertEqual(dtypes.as_dtype("int32").is_integer, True)
self.assertEqual(dtypes.as_dtype("int64").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint8").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint16").is_integer, True)
self.assertEqual(dtypes.as_dtype("complex64").is_integer, False)
self.assertEqual(dtypes.as_dtype("complex128").is_integer, False)
self.assertEqual(dtypes.as_dtype("float").is_integer, False)
self.assertEqual(dtypes.as_dtype("double").is_integer, False)
self.assertEqual(dtypes.as_dtype("string").is_integer, False)
self.assertEqual(dtypes.as_dtype("bool").is_integer, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint32").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint16").is_integer, False)
def testIsFloating(self):
self.assertEqual(dtypes.as_dtype("int8").is_floating, False)
self.assertEqual(dtypes.as_dtype("int16").is_floating, False)
self.assertEqual(dtypes.as_dtype("int32").is_floating, False)
self.assertEqual(dtypes.as_dtype("int64").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex64").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex128").is_floating, False)
self.assertEqual(dtypes.as_dtype("float32").is_floating, True)
self.assertEqual(dtypes.as_dtype("float64").is_floating, True)
self.assertEqual(dtypes.as_dtype("string").is_floating, False)
self.assertEqual(dtypes.as_dtype("bool").is_floating, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint32").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint16").is_floating, False)
def testIsComplex(self):
self.assertEqual(dtypes.as_dtype("int8").is_complex, False)
self.assertEqual(dtypes.as_dtype("int16").is_complex, False)
self.assertEqual(dtypes.as_dtype("int32").is_complex, False)
self.assertEqual(dtypes.as_dtype("int64").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("complex64").is_complex, True)
self.assertEqual(dtypes.as_dtype("complex128").is_complex, True)
self.assertEqual(dtypes.as_dtype("float32").is_complex, False)
self.assertEqual(dtypes.as_dtype("float64").is_complex, False)
self.assertEqual(dtypes.as_dtype("string").is_complex, False)
self.assertEqual(dtypes.as_dtype("bool").is_complex, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint32").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint16").is_complex, False)
def testIsUnsigned(self):
self.assertEqual(dtypes.as_dtype("int8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("uint8").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("uint16").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("float32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("float64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bool").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("string").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex128").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint16").is_unsigned, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
# ignore types for which there are no minimum/maximum (or we cannot
# compute it, such as for the q* types)
if (dtype.is_quantized or dtype.base_dtype == dtypes.bool or
dtype.base_dtype == dtypes.string or
dtype.base_dtype == dtypes.complex64 or
dtype.base_dtype == dtypes.complex128):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
# check some values that are known
if numpy_dtype == np.bool_:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 1)
if numpy_dtype == np.int8:
self.assertEquals(dtype.min, -128)
self.assertEquals(dtype.max, 127)
if numpy_dtype == np.int16:
self.assertEquals(dtype.min, -32768)
self.assertEquals(dtype.max, 32767)
if numpy_dtype == np.int32:
self.assertEquals(dtype.min, -2147483648)
self.assertEquals(dtype.max, 2147483647)
if numpy_dtype == np.int64:
self.assertEquals(dtype.min, -9223372036854775808)
self.assertEquals(dtype.max, 9223372036854775807)
if numpy_dtype == np.uint8:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 255)
if numpy_dtype == np.uint16:
if dtype == dtypes.uint16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 65535)
elif dtype == dtypes.bfloat16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint32:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint64:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 18446744073709551615)
if numpy_dtype in (np.float16, np.float32, np.float64):
self.assertEquals(dtype.min, np.finfo(numpy_dtype).min)
self.assertEquals(dtype.max, np.finfo(numpy_dtype).max)
def testRepr(self):
for enum, name in dtypes._TYPE_TO_STRING.items():
if enum > 100:
continue
dtype = dtypes.DType(enum)
self.assertEquals(repr(dtype), "tf." + name)
import tensorflow as tf
dtype2 = eval(repr(dtype))
self.assertEquals(type(dtype2), dtypes.DType)
self.assertEquals(dtype, dtype2)
def testEqWithNonTFTypes(self):
self.assertNotEqual(dtypes.int32, int)
self.assertNotEqual(dtypes.float64, 2.1)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
basicthinker/Sexain-MemController | gem5-stable/src/dev/BadDevice.py | 69 | 1789 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from Device import BasicPioDevice
class BadDevice(BasicPioDevice):
type = 'BadDevice'
cxx_header = "dev/baddev.hh"
devicename = Param.String("Name of device to error on")
| apache-2.0 |
isolver/MarkWrite | distribution/MarkWrite/runapp.py | 2 | 1605 | # -*- coding: utf-8 -*-
from __future__ import division
#
# This file is part of the open-source MarkWrite application.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import markwrite
import sys
if sys.platform == 'win32':
# Work around so that MarkWrite app icon is also used as task bar icon.
# http://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105
import ctypes
myappid = u'isolver.markwrite.editor.version' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
import pyqtgraph as pg
from markwrite.gui.mainwin import MarkWriteMainWindow
# # Switch to using white background and black foreground
pg.setConfigOption('background', markwrite.SETTINGS['plotviews_background_color'])
pg.setConfigOption('foreground', markwrite.SETTINGS['plotviews_foreground_color'])
wmwin = MarkWriteMainWindow(markwrite.app)
MarkWriteMainWindow._appdirs = markwrite.appdirs
wmwin.show()
status = markwrite.app.exec_() | gpl-3.0 |
kmmartins/xbmc | lib/libUPnP/Neptune/Extras/Tools/Logging/NeptuneLogConsole.py | 265 | 2923 | #!/usr/bin/env python
from socket import *
from optparse import OptionParser
UDP_ADDR = "0.0.0.0"
UDP_PORT = 7724
BUFFER_SIZE = 65536
#HEADER_KEYS = ['Logger', 'Level', 'Source-File', 'Source-Function', 'Source-Line', 'TimeStamp']
HEADER_KEYS = {
'mini': ('Level'),
'standard': ('Logger', 'Level', 'Source-Function'),
'long': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function'),
'all': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function', 'TimeStamp'),
'custom': ()
}
Senders = {}
class LogRecord:
def __init__(self, data):
offset = 0
self.headers = {}
for line in data.split("\r\n"):
offset += len(line)+2
if ':' not in line: break
key,value=line.split(":",1)
self.headers[key] = value.strip()
self.body = data[offset:]
def __getitem__(self, index):
return self.headers[index]
def format(self, sender_index, keys):
parts = ['['+str(sender_index)+']']
if 'Level' in keys:
parts.append('['+self.headers['Level']+']')
if 'Logger' in keys:
parts.append(self.headers['Logger'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-File' in keys:
if 'Source-Line' in keys:
parts.append(self.headers['Source-File']+':'+self.headers['Source-Line'])
else:
parts.append(self.headers['Source-File'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-Function' in keys:
parts.append(self.headers['Source-Function'])
parts.append(self.body)
return ' '.join(parts)
class Listener:
def __init__(self, format='standard', port=UDP_PORT):
self.socket = socket(AF_INET,SOCK_DGRAM)
self.socket.bind((UDP_ADDR, port))
self.format_keys = HEADER_KEYS[format]
def listen(self):
while True:
data,addr = self.socket.recvfrom(BUFFER_SIZE)
sender_index = len(Senders.keys())
if addr in Senders:
sender_index = Senders[addr]
else:
print "### NEW SENDER:", addr
Senders[addr] = sender_index
record = LogRecord(data)
print record.format(sender_index, self.format_keys)
### main
parser = OptionParser(usage="%prog [options]")
parser.add_option("-p", "--port", dest="port", help="port number to listen on", type="int", default=UDP_PORT)
parser.add_option("-f", "--format", dest="format", help="log format (mini, standard, long, or all)", choices=('mini', 'standard', 'long', 'all'), default='standard')
(options, args) = parser.parse_args()
print "Listening on port", options.port
l = Listener(format=options.format, port=options.port)
l.listen()
| gpl-2.0 |
imgmix/django-registration | registration/backends/simple/urls.py | 18 | 1601 | """
URLconf for registration and activation, using django-registration's
one-step backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.simple.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize registration behavior, feel free to set up
your own URL patterns for these views instead.
"""
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from django.conf import settings
from django.views.generic.base import TemplateView
from .views import RegistrationView
urlpatterns = patterns('',
url(r'^register/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
url(r'^register/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
name='registration_complete'),
)
if getattr(settings, 'INCLUDE_REGISTER_URL', True):
urlpatterns += patterns('',
url(r'^register/$',
RegistrationView.as_view(),
name='registration_register'),
)
if getattr(settings, 'INCLUDE_AUTH_URLS', True):
urlpatterns += patterns('',
(r'', include('registration.auth_urls')),
)
| bsd-3-clause |
ThoriumGroup/thorium | setup.py | 1 | 7980 | #!/usr/bin/env python
"""
setup.py
========
This is a generic as possible setup.py template. The goal is to retrieve almost
all of the information from the main module file, rather than relying on values
explicitly entered here.
## Usage
This setup.py script needs to modified in the following ways:
- `MAIN_FILE` needs to be pointed at the main metadata file, this can be done
easily by modifyng the second arg.
- `setup` kwargs need to be modified:
- `classifiers` needs to be modified to suit your project.
- `keywords` needs to be modified to suit your project.
- If you have files that need to be included (such as `LICENSE`, you need to
create a MANIFEST.in file and `include FILENAME` them.
Other than that, all the metadata should live in your main file, just like
the example below.
## Metadata Example
The following should be placed in your project module's __init__.py file:
::
__author__ = "Ivan Busquets"
__author_email__ = "[email protected]"
__copyright__ = "Copyright 2011, Ivan Busquets"
__credits__ = ["Ivan Busquets", "Sean Wallitsch", ]
__license__ = "MIT"
__version__ = "1.2"
__maintainer__ = "Sean Wallitsch"
__maintainer_email__ = "[email protected]"
__module_name__ = "animatedSnap3D"
__short_desc__ = "An extension to Nuke's 'snap' options for animated verts"
__status__ = "Development"
__url__ = 'http://github.com/ThoriumGroup/animatedSnap3D'
Note: At this time `credits` is unused.
"""
# ==============================================================================
# IMPORTS
# ==============================================================================
from setuptools import setup, find_packages
import codecs
import os
import re
# ==============================================================================
# GLOBALS
# ==============================================================================
HERE = os.path.abspath(os.path.dirname(__file__))
MAIN_FILE = os.path.join(HERE, 'thorium', '__init__.py')
# Get the long description from the relevant file
with codecs.open('README.rst', encoding='utf-8') as readme_file:
LONG_DESCRIPTION = readme_file.read()
# ==============================================================================
# PRIVATE FUNCTIONS
# ==============================================================================
def _find_metadata(filepath):
"""Reads all the metadata from a source file by opening manually.
Why open and read it and not import?
https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
Args:
filepath : (str)
Filepath to the file containing the metadata.
Returns:
{str: str}
Dictionary with metadata keys and values.
Raises:
RuntimeError
Cannot proceed if version or module_name not found
"""
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(filepath, 'r', 'latin1') as meta_file:
metadata_file = meta_file.read()
metadata = {}
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
author_email_match = re.search(r"^__author_email__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
copyright_match = re.search(r"^__copyright__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
credits_match = re.search(r"^__credits__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
license_match = re.search(r"^__license__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
maint_match = re.search(r"^__maintainer__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
maint_email_match = re.search(r"^__maintainer_email__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
module_name_match = re.search(r"^__module_name__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
short_desc_match = re.search(r"^__short_desc__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
status_match = re.search(r"^__status__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
url_match = re.search(r"^__url__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
if not version_match or not module_name_match:
raise RuntimeError("Unable to find version or module_name string.")
if author_match:
metadata['author'] = author_match.group(1)
if author_email_match:
metadata['author_email'] = author_email_match.group(1)
if copyright_match:
metadata['copyright'] = copyright_match.group(1)
if credits_match:
metadata['credits'] = credits_match.group(1)
if license_match:
metadata['license'] = license_match.group(1)
if maint_match:
metadata['maintainer'] = maint_match.group(1)
if maint_email_match:
metadata['maintainer_email'] = maint_email_match.group(1)
if module_name_match:
metadata['module_name'] = module_name_match.group(1)
if short_desc_match:
metadata['short_desc'] = short_desc_match.group(1)
if status_match:
metadata['status'] = status_match.group(1)
if version_match:
metadata['version'] = version_match.group(1)
if url_match:
metadata['url'] = url_match.group(1)
return metadata
# ==============================================================================
# MAIN
# ==============================================================================
metadata = _find_metadata(MAIN_FILE)
setup(
name=metadata['module_name'],
version=metadata['version'],
description=metadata.get('short_desc', ''),
long_description=LONG_DESCRIPTION,
# The project URL.
url=metadata.get('url', ''),
# Author & Maintainer details
author=metadata.get('author', ''),
author_email=metadata.get('author_email', ''),
maintainer=metadata.get('maintainer', ''),
maintainer_email=metadata.get('maintainer_email', ''),
# Choose your license
license=metadata.get('license', ''),
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
# OS
'Operating System :: OS Independent',
# Language
'Natural Language :: English',
],
# What does your project relate to?
keywords='film tv color vfx nuke',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=['tests']),
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
include_package_data=True,
# Targeted OS
platforms='any',
)
| mit |
Ironarcher/casso-backend | lib/flask/module.py | 850 | 1363 | # -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
| apache-2.0 |
geekboxzone/lollipop_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/layout_tests/port/config.py | 68 | 2993 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# FIXME: Remove this file altogether. It's useless in a Blink checkout.
import logging
from webkitpy.common import webkit_finder
_log = logging.getLogger(__name__)
class Config(object):
_FLAGS_FROM_CONFIGURATIONS = {
"Debug": "--debug",
"Release": "--release",
}
def __init__(self, executive, filesystem, port_implementation=None):
self._executive = executive
self._filesystem = filesystem
self._webkit_finder = webkit_finder.WebKitFinder(self._filesystem)
self._default_configuration = None
self._build_directories = {}
self._port_implementation = port_implementation
def build_directory(self, configuration):
"""Returns the path to the build directory for the configuration."""
if configuration:
flags = ["--configuration", self.flag_for_configuration(configuration)]
else:
configuration = ""
flags = []
if self._port_implementation:
flags.append('--' + self._port_implementation)
if not self._build_directories.get(configuration):
self._build_directories[configuration] = self._webkit_finder.path_from_webkit_base('out', configuration)
return self._build_directories[configuration]
def flag_for_configuration(self, configuration):
return self._FLAGS_FROM_CONFIGURATIONS[configuration]
def default_configuration(self):
return 'Release'
| bsd-3-clause |
vadimtk/chrome4sdp | build/android/gyp/util/build_device.py | 2 | 3283 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A simple device interface for build steps.
"""
import logging
import os
import re
import sys
from util import build_utils
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib.device import adb_wrapper
from pylib.device import device_errors
from pylib.device import device_utils
def GetAttachedDevices():
return [a.GetDeviceSerial()
for a in adb_wrapper.AdbWrapper.Devices()]
class BuildDevice(object):
def __init__(self, configuration):
self.id = configuration['id']
self.description = configuration['description']
self.install_metadata = configuration['install_metadata']
self.device = device_utils.DeviceUtils(self.id)
def RunShellCommand(self, *args, **kwargs):
return self.device.RunShellCommand(*args, **kwargs)
def PushChangedFiles(self, *args, **kwargs):
return self.device.PushChangedFiles(*args, **kwargs)
def GetSerialNumber(self):
return self.id
def Install(self, *args, **kwargs):
return self.device.Install(*args, **kwargs)
def InstallSplitApk(self, *args, **kwargs):
return self.device.InstallSplitApk(*args, **kwargs)
def GetInstallMetadata(self, apk_package):
"""Gets the metadata on the device for the apk_package apk."""
# Matches lines like:
# -rw-r--r-- system system 7376582 2013-04-19 16:34 \
# org.chromium.chrome.shell.apk
# -rw-r--r-- system system 7376582 2013-04-19 16:34 \
# org.chromium.chrome.shell-1.apk
apk_matcher = lambda s: re.match('.*%s(-[0-9]*)?.apk$' % apk_package, s)
matches = filter(apk_matcher, self.install_metadata)
return matches[0] if matches else None
def GetConfigurationForDevice(device_id):
device = device_utils.DeviceUtils(device_id)
configuration = None
has_root = False
is_online = device.IsOnline()
if is_online:
cmd = 'ls -l /data/app; getprop ro.build.description'
cmd_output = device.RunShellCommand(cmd)
has_root = not 'Permission denied' in cmd_output[0]
if not has_root:
# Disable warning log messages from EnableRoot()
logging.getLogger().disabled = True
try:
device.EnableRoot()
has_root = True
except device_errors.CommandFailedError:
has_root = False
finally:
logging.getLogger().disabled = False
cmd_output = device.RunShellCommand(cmd)
configuration = {
'id': device_id,
'description': cmd_output[-1],
'install_metadata': cmd_output[:-1],
}
return configuration, is_online, has_root
def WriteConfigurations(configurations, path):
# Currently we only support installing to the first device.
build_utils.WriteJson(configurations[:1], path, only_if_changed=True)
def ReadConfigurations(path):
return build_utils.ReadJson(path)
def GetBuildDevice(configurations):
assert len(configurations) == 1
return BuildDevice(configurations[0])
def GetBuildDeviceFromPath(path):
configurations = ReadConfigurations(path)
if len(configurations) > 0:
return GetBuildDevice(ReadConfigurations(path))
return None
| bsd-3-clause |
Fudan-University/sakai | reference/library/src/webapp/editor/FCKeditor/editor/filemanager/connectors/py/connector.py | 126 | 4239 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
if (command == "FileUpload"):
return self.sendUploadResults( errorNo = 102, customMsg = "" )
else:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| apache-2.0 |
ChakshuGautam/coursera-dl | coursera/test/test_parsing.py | 15 | 3627 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test functionality of coursera module.
"""
import json
import os.path
import pytest
from six import iteritems
from mock import patch, Mock, mock_open
from coursera import coursera_dl
# JSon Handling
@pytest.fixture
def get_page(monkeypatch):
monkeypatch.setattr(coursera_dl, 'get_page', Mock())
@pytest.fixture
def json_path():
return os.path.join(os.path.dirname(__file__), "fixtures", "json")
def test_that_should_not_dl_if_file_exist(get_page, json_path):
coursera_dl.get_page = Mock()
coursera_dl.download_about(object(), "matrix-002", json_path)
assert coursera_dl.get_page.called is False
def test_that_we_parse_and_write_json_correctly(get_page, json_path):
unprocessed_json = os.path.join(os.path.dirname(__file__),
"fixtures", "json", "unprocessed.json")
raw_data = open(unprocessed_json).read()
coursera_dl.get_page = lambda x, y: raw_data
open_mock = mock_open()
with patch('coursera.coursera_dl.open', open_mock, create=True):
coursera_dl.download_about(object(), "networksonline-002", json_path)
about_json = os.path.join(json_path, 'networksonline-002-about.json')
open_mock.assert_called_once_with(about_json, 'w')
data = json.loads(open_mock().write.call_args[0][0])
assert data['id'] == 394
assert data['shortName'] == 'networksonline'
# Test Syllabus Parsing
@pytest.fixture
def get_video(monkeypatch):
"""
Mock some methods that would, otherwise, create repeateadly many web
requests.
More specifically, we mock:
* the search for hidden videos
* the actual download of videos
"""
# Mock coursera_dl.grab_hidden_video_url
monkeypatch.setattr(coursera_dl, 'grab_hidden_video_url',
lambda session, href: None)
# Mock coursera_dl.get_video
monkeypatch.setattr(coursera_dl, 'get_video',
lambda session, href: None)
@pytest.mark.parametrize(
"filename,num_sections,num_lectures,num_resources,num_videos", [
("regular-syllabus.html", 23, 102, 502, 102),
("links-to-wikipedia.html", 5, 37, 158, 36),
("preview.html", 20, 106, 106, 106),
("sections-not-to-be-missed.html", 9, 61, 224, 61),
("sections-not-to-be-missed-2.html", 20, 121, 397, 121),
("parsing-datasci-001-with-bs4.html", 10, 97, 358, 97), # issue 134
("parsing-startup-001-with-bs4.html", 4, 44, 136, 44), # issue 137
("parsing-wealthofnations-001-with-bs4.html", 8, 74, 296, 74), # issue 131
("parsing-malsoftware-001-with-bs4.html", 3, 18, 56, 16), # issue 148
("multiple-resources-with-the-same-format.html", 18, 97, 478, 97),
]
)
def test_parse(get_video, filename, num_sections, num_lectures, num_resources, num_videos):
filename = os.path.join(os.path.dirname(__file__), "fixtures", "html",
filename)
with open(filename) as syllabus:
syllabus_page = syllabus.read()
sections = coursera_dl.parse_syllabus(None, syllabus_page, None)
# section count
assert len(sections) == num_sections
# lecture count
lectures = [lec for sec in sections for lec in sec[1]]
assert len(lectures) == num_lectures
# resource count
resources = [(res[0], len(res[1]))
for lec in lectures for res in iteritems(lec[1])]
assert sum(r for f, r in resources) == num_resources
# mp4 count
assert sum(r for f, r in resources if f == "mp4") == num_videos
| lgpl-3.0 |
paulscherrerinstitute/snapshot | snapshot/gui/snapshot_gui.py | 1 | 15841 | #!/usr/bin/env python
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
import datetime
import json
import os
import sys
from PyQt5 import QtCore
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtWidgets import (
QAction,
QApplication,
QCheckBox,
QDialog,
QFormLayout,
QLabel,
QMainWindow,
QMenu,
QMessageBox,
QPlainTextEdit,
QSplitter,
QStatusBar,
QVBoxLayout,
QWidget,
)
from snapshot.ca_core import Snapshot
from snapshot.core import (
SnapshotError,
background_workers,
enable_tracing,
global_thread_pool,
since_start,
)
from snapshot.parser import ReqParseError, get_save_files, initialize_config
from .compare import SnapshotCompareWidget
from .restore import SnapshotRestoreWidget
from .save import SnapshotSaveWidget
from .utils import DetailedMsgBox, SnapshotConfigureDialog, make_separator
class SnapshotGui(QMainWindow):
"""
Main GUI class for Snapshot application. It needs separate working
thread where core of the application is running
"""
def __init__(self, config: dict = {}, parent=None):
"""
:param config: application settings
:param parent: parent QtObject
:return:
"""
QMainWindow.__init__(self, parent)
self.resize(1500, 850)
if not config or config['config_ok'] is False:
msg = "Loading configuration file failed! " \
"Do you want to continue without it?\n"
msg_window = DetailedMsgBox(msg, config['config_error'], 'Warning')
reply = msg_window.exec_()
if reply == QMessageBox.No:
QTimer.singleShot(0, lambda: self.close())
return
self.common_settings = config
if not config['req_file_path'] or not config['macros_ok']:
req_file_macros = config['req_file_macros']
req_file_path = config['req_file_path']
init_path = config['init_path']
configure_dialog = \
SnapshotConfigureDialog(self,
init_macros=req_file_macros,
init_path=os.path.join(init_path,
req_file_path))
configure_dialog.accepted.connect(self.set_request_file)
if configure_dialog.exec_() == QDialog.Rejected:
QTimer.singleShot(0, lambda: self.close())
return
# Before creating GUI, snapshot must be initialized.
self.snapshot = Snapshot()
# Create main GUI components:
# menu bar
# ______________________________
# | save_widget | restore_widget |
# | | |
# | autorefresh | |
# --------------------------------
# | compare_widget |
# --------------------------------
# | sts_log |
# ______________________________
# status_bar
#
# menu bar
menu_bar = self.menuBar()
file_menu = QMenu("File", menu_bar)
open_new_req_file_action = QAction("Open", file_menu)
open_new_req_file_action.setMenuRole(QAction.NoRole)
open_new_req_file_action.triggered.connect(self.open_new_req_file)
file_menu.addAction(open_new_req_file_action)
quit_action = QAction("Quit", file_menu)
quit_action.setMenuRole(QAction.NoRole)
quit_action.triggered.connect(self.close)
file_menu.addAction(quit_action)
menu_bar.addMenu(file_menu)
# Status components are needed by other GUI elements
self.status_log = SnapshotStatusLog(self)
self.common_settings["sts_log"] = self.status_log
self.status_bar = SnapshotStatus(self.common_settings, self)
self.common_settings["sts_info"] = self.status_bar
# Create status log show/hide control and add it to status bar
self.show_log_control = QCheckBox("Show status log")
self.show_log_control.setStyleSheet("background-color: transparent")
self.show_log_control.stateChanged.connect(self.status_log.setVisible)
self.status_log.setVisible(False)
self.status_bar.addPermanentWidget(self.show_log_control)
# Creating main layout
# Compare widget. Must be updated in case of file selection
self.compare_widget = SnapshotCompareWidget(self.snapshot,
self.common_settings, self)
self.compare_widget.pvs_filtered.connect(self.handle_pvs_filtered)
self.compare_widget.restore_requested.connect(
self._handle_restore_request)
self.save_widget = SnapshotSaveWidget(self.snapshot,
self.common_settings, self)
self.restore_widget = SnapshotRestoreWidget(self.snapshot,
self.common_settings, self)
self.restore_widget.files_updated.connect(self.handle_files_updated)
self.restore_widget.files_selected.connect(self.handle_selected_files)
self.save_widget.saved.connect(self.restore_widget.rebuild_file_list)
self.autorefresh = QCheckBox("Periodic PV update")
self.autorefresh.setChecked(True)
self.autorefresh.toggled.connect(self.toggle_autorefresh)
left_layout = QVBoxLayout()
left_layout.addWidget(self.save_widget)
left_layout.addStretch()
left_layout.addWidget(make_separator(self, 'horizontal'))
left_layout.addWidget(self.autorefresh)
left_widget = QWidget()
left_widget.setLayout(left_layout)
sr_splitter = QSplitter(self)
sr_splitter.addWidget(left_widget)
sr_splitter.addWidget(self.restore_widget)
sr_splitter.setStretchFactor(0, 1)
sr_splitter.setStretchFactor(1, 2)
main_splitter = QSplitter(self)
main_splitter.addWidget(sr_splitter)
main_splitter.addWidget(self.compare_widget)
main_splitter.addWidget(self.status_log)
main_splitter.setOrientation(Qt.Vertical)
main_splitter.setStretchFactor(0, 1)
main_splitter.setStretchFactor(1, 3)
# Set default widget and add status bar
self.setCentralWidget(main_splitter)
self.setStatusBar(self.status_bar)
# Show GUI and manage window properties
self.show()
self.setWindowTitle(
os.path.basename(self.common_settings["req_file_path"]) + ' - Snapshot')
# Status log default height should be 100px Set with splitter methods
widgets_sizes = main_splitter.sizes()
widgets_sizes[main_splitter.indexOf(main_splitter)] = 100
main_splitter.setSizes(widgets_sizes)
# Schedule opening the request file for after the GUI is shown.
QTimer.singleShot(
100,
lambda: self.change_req_file(
self.common_settings['req_file_path'],
self.common_settings['req_file_macros'],))
def toggle_autorefresh(self, checked):
if checked:
background_workers.resume_one('pv_updater')
else:
background_workers.suspend_one('pv_updater')
def open_new_req_file(self):
configure_dialog = SnapshotConfigureDialog(self, init_path=self.common_settings['req_file_path'],
init_macros=self.common_settings['req_file_macros'])
configure_dialog.accepted.connect(self.change_req_file)
configure_dialog.exec_() # Do not act on rejected
def change_req_file(self, req_file_path, macros):
background_workers.suspend()
self.status_bar.set_status("Loading new request file ...", 0, "orange")
self.set_request_file(req_file_path, macros)
save_dir = self.common_settings['save_dir']
# Read snapshots and instantiate PVs in parallel
def getfiles(*args):
return get_save_files(*args)
future_files = global_thread_pool.submit(getfiles, save_dir,
req_file_path)
self.init_snapshot(req_file_path, macros)
if self.common_settings['save_dir'] == save_dir:
already_parsed_files = future_files.result()
else:
# Apparently init_snapshot() found that the request file was
# invalid, the save_dir changed, and we need to junk the
# already read snapfiles.
future_files.cancel()
already_parsed_files = get_save_files(
self.common_settings['save_dir'],
self.common_settings['req_file_path'])
# handle all gui components
self.restore_widget.handle_new_snapshot_instance(self.snapshot,
already_parsed_files)
self.save_widget.handle_new_snapshot_instance(self.snapshot)
self.compare_widget.handle_new_snapshot_instance(self.snapshot)
self.setWindowTitle(os.path.basename(req_file_path) + ' - Snapshot')
self.status_bar.set_status("New request file loaded.", 3000, "#64C864")
background_workers.resume()
since_start("GUI processing finished")
def set_request_file(self, path: str, macros: dict):
self.common_settings["req_file_path"] = path
self.common_settings["req_file_macros"] = macros
if not self.common_settings['save_dir']:
self.common_settings['save_dir'] = os.path.dirname(path)
def init_snapshot(self, req_file_path, req_macros=None):
self.snapshot.clear_pvs()
req_macros = req_macros or {}
reopen_config = False
try:
self.snapshot = Snapshot(req_file_path, req_macros)
self.set_request_file(req_file_path, req_macros)
except (ReqParseError, OSError) as e:
msg = 'Request file cannot be loaded. ' \
'See details for type of error.'
msg_window = DetailedMsgBox(msg, str(e), 'Warning', self,
QMessageBox.Ok)
msg_window.exec_()
reopen_config = True
except SnapshotError as e:
QMessageBox.warning(
self,
"Warning",
str(e),
QMessageBox.Ok,
QMessageBox.NoButton)
reopen_config = True
if reopen_config:
configure_dialog = SnapshotConfigureDialog(
self, init_path=req_file_path, init_macros=req_macros)
configure_dialog.accepted.connect(self.init_snapshot)
if configure_dialog.exec_() == QDialog.Rejected:
self.close()
# Merge request file metadata into common settings, replacing existing
# settings.
# TODO Labels and filters are only overridden if given in the request
# file, for backwards compatibility with config files. After config
# files are out of use, change this to always override old values.
req_labels = self.snapshot.req_file_metadata.get('labels', {})
if req_labels:
self.common_settings['force_default_labels'] = \
req_labels.get('force_default_labels', False)
self.common_settings['default_labels'] = \
req_labels.get('labels', [])
req_filters = self.snapshot.req_file_metadata.get('filters', {})
if req_filters:
filters = self.common_settings['predefined_filters']
for fltype in ('filters', 'rgx-filters'):
filters[fltype] = req_filters.get(fltype, [])
self.common_settings['machine_params'] = \
self.snapshot.req_file_metadata.get('machine_params', {})
# Metadata to be filled from snapshot files.
self.common_settings['existing_labels'] = []
self.common_settings['existing_params'] = []
def handle_files_updated(self):
self.save_widget.update_labels()
self.compare_widget.clear_snap_files()
def handle_selected_files(self, selected_files):
# selected_files is a dict() with file names as keywords and
# dict() of pv data as value
self.compare_widget.new_selected_files(selected_files)
def _handle_restore_request(self, pvs_list):
self.restore_widget.do_restore(pvs_list)
def handle_pvs_filtered(self, pv_names_set):
# Yes, this merely sets the reference to the set of names, so
# technically, it needn't be done every time. But good luck tracking
# down who updated the list without this ;)
self.restore_widget.filtered_pvs = pv_names_set
# -------- Status widgets -----------
class SnapshotStatusLog(QWidget):
""" Command line like logger widget """
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.sts_log = QPlainTextEdit(self)
self.sts_log.setReadOnly(True)
layout = QVBoxLayout()
layout.setContentsMargins(10, 10, 10, 10)
layout.addWidget(self.sts_log)
self.setLayout(layout)
def log_msgs(self, msgs, msg_times):
if not isinstance(msgs, list):
msgs = [msgs]
if not isinstance(msg_times, list):
msg_times = [msg_times] * len(msgs)
msg_times = (datetime.datetime.fromtimestamp(
t).strftime('%H:%M:%S.%f') for t in msg_times)
self.sts_log.insertPlainText(
"\n".join(
"[{}] {}".format(
*
t) for t in zip(
msg_times,
msgs)) +
"\n")
self.sts_log.ensureCursorVisible()
class SnapshotStatus(QStatusBar):
def __init__(self, common_settings, parent=None):
QStatusBar.__init__(self, parent)
self.common_settings = common_settings
self.setSizeGripEnabled(False)
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.clear_status)
self.status_txt = QLabel()
self.status_txt.setStyleSheet("background-color: transparent")
self.addWidget(self.status_txt)
self.set_status()
def set_status(self, text="Ready", duration=0,
background="rgba(0, 0, 0, 30)"):
# Stop any existing timers
self.timer.stop()
if self.common_settings["force"]:
text = "[force mode] " + text
self.status_txt.setText(text)
style = "background-color : " + background
self.setStyleSheet(style)
# Force GUI updates to show status
QtCore.QCoreApplication.processEvents()
if duration:
self.timer.start(duration)
def clear_status(self):
self.set_status("Ready", 0, "rgba(0, 0, 0, 30)")
# This function should be called from outside, to start the gui
def start_gui(*args, **kwargs):
if kwargs.get('trace_execution'):
enable_tracing()
since_start("Interpreter started")
config = initialize_config(**kwargs)
app = QApplication(sys.argv)
# Load an application style
default_style_path = os.path.dirname(os.path.realpath(__file__))
default_style_path = os.path.join(default_style_path, "qss/default.qss")
app.setStyleSheet("file:///" + default_style_path)
# IMPORTANT the reference to the SnapshotGui Object need to be retrieved
# otherwise the GUI will not show up
_ = SnapshotGui(config)
since_start("GUI constructed")
sys.exit(app.exec_())
| gpl-3.0 |
bzloink/psi4 | conda/_conda_vers.py | 3 | 1480 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Dummy setup.py file solely for the purposes of getting an on-the-fly
computed version number into the conda recipe.
"""
import sys
from distutils.core import setup
def version_func():
import subprocess
command = 'python psi4/versioner.py --formatonly --format={versionlong}'
process = subprocess.Popen(command.split(), shell=False, stdout=subprocess.PIPE)
(out, err) = process.communicate()
if sys.version_info >= (3, 0):
return out.decode('utf-8').strip()
else:
return out.strip()
setup(
version=version_func(),
)
| lgpl-3.0 |
sslattery/Chimera | doc/spn/fuel_assembly/sp3/fs_azilut02.py | 1 | 16598 | ###############################################################################
## fs.py
## 9te [angband.ornl.gov]
## Wed Jan 12 10:37:50 2011
###############################################################################
## Copyright (C) 2008 Oak Ridge National Laboratory, UT-Battelle, LLC.
##---------------------------------------------------------------------------##
## generated by /data/denovo/production/head/setup/bin/pygen built on 20110112
###############################################################################
import os, sys, math, string
# pyspn equation type
from spn_fv import *
print_it = False
##---------------------------------------------------------------------------##
## MAIN
##---------------------------------------------------------------------------##
initialize(sys.argv)
if node() == 0:
print "Denovo - pyspn Python Front-End"
print "-------------------------------"
print "Release : %16s" % (release())
print "Release Date : %16s" % (release_date())
print "Build Date : %16s" % (build_date())
print
timer = Timer()
timer.start()
##---------------------------------------------------------------------------##
## XS DATA
####### UO2 Fuel-Clad Macroscopic Cross Sections ##########
## Transport-corrected Total Cross Sections
T_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_UO2[0] = 1.77949e-1
T_UO2[1] = 3.29805e-1
T_UO2[2] = 4.80388e-1
T_UO2[3] = 5.54367e-1
T_UO2[4] = 3.11801e-1
T_UO2[5] = 3.95168e-1
T_UO2[6] = 5.64406e-1
## Fission Cross Section
F_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
F_UO2[0] = 7.21206e-3
F_UO2[1] = 8.19301e-4
F_UO2[2] = 6.45320e-3
F_UO2[3] = 1.85648e-2
F_UO2[4] = 1.78084e-2
F_UO2[5] = 8.30348e-2
F_UO2[6] = 2.16004e-1
## Nu
N_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
N_UO2[0] = 2.78145
N_UO2[1] = 2.47443
N_UO2[2] = 2.43383
N_UO2[3] = 2.43380
N_UO2[4] = 2.43380
N_UO2[5] = 2.43380
N_UO2[6] = 2.43380
## Chi
C_UO2 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C_UO2[0] = 5.87910e-1
C_UO2[1] = 4.11760e-1
C_UO2[2] = 3.39060e-4
C_UO2[3] = 1.17610e-7
C_UO2[4] = 0.00000000
C_UO2[5] = 0.00000000
C_UO2[6] = 0.00000000
## Scattering Matrix for UO2 Fuel-Clad (Macroscopic)
S_UO2 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]]]
S_UO2[0] = [[1.27537e-1]]
S_UO2[1] = [[4.23780e-2], [3.24456e-1]]
S_UO2[2] = [[9.43740e-6], [1.63140e-3], [4.50940e-1]]
S_UO2[3] = [[5.51630e-9], [3.14270e-9], [2.67920e-3], [4.52565e-1], [1.25250e-4]]
S_UO2[4] = [[0.00000000], [0.00000000], [0.00000000], [5.56640e-3], [2.71401e-1], [1.29680e-3]]
S_UO2[5] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [1.02550e-2], [2.65802e-1], [8.54580e-3]]
S_UO2[6] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [1.00210e-8], [1.68090e-2], [2.73080e-1]]
## Upscattering Matrix
U_UO2 = [ [], [], [], [], [], [], [] ]
U_UO2[0] = []
U_UO2[1] = []
U_UO2[2] = []
U_UO2[3] = [4]
U_UO2[4] = [5]
U_UO2[5] = [6]
U_UO2[6] = []
######## 4.3% MOX Fuel-Clad Macroscopic Cross-Sections ############
## Transport-corrected Total Cross Sections
T_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_MOX43[0] = 1.78731e-1
T_MOX43[1] = 3.30849e-1
T_MOX43[2] = 4.83772e-1
T_MOX43[3] = 5.66922e-1
T_MOX43[4] = 4.26227e-1
T_MOX43[5] = 6.78997e-1
T_MOX43[6] = 6.82852e-1
## Fission Cross-Sections
F_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
F_MOX43[0] = 7.62704e-3
F_MOX43[1] = 8.76898e-4
F_MOX43[2] = 5.69835e-3
F_MOX43[3] = 2.28872e-2
F_MOX43[4] = 1.07635e-2
F_MOX43[5] = 2.32757e-1
F_MOX43[6] = 2.48968e-1
## Nu Cross-Sections
N_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
N_MOX43[0] = 2.85209
N_MOX43[1] = 2.89099
N_MOX43[2] = 2.85486
N_MOX43[3] = 2.86073
N_MOX43[4] = 2.85447
N_MOX43[5] = 2.86415
N_MOX43[6] = 2.86780
## Chi Cross-Sections
C_MOX43 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
C_MOX43[0] = 5.87910e-1
C_MOX43[1] = 4.11760e-1
C_MOX43[2] = 3.39060e-4
C_MOX43[3] = 1.17610e-7
C_MOX43[4] = 0.00000000
C_MOX43[5] = 0.00000000
C_MOX43[6] = 0.00000000
## Scattering Matrix for 4.3% MOX Fuel-Clad (Macroscopic)
S_MOX43 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]] ]
S_MOX43[0] = [[1.28876e-1]]
S_MOX43[1] = [[4.14130e-2], [3.25452e-1]]
S_MOX43[2] = [[8.22900e-6], [1.63950e-3], [4.53188e-1]]
S_MOX43[3] = [[5.04050e-9], [1.59820e-9], [2.61420e-3], [4.57173e-1], [1.60460e-4]]
S_MOX43[4] = [[0.00000000], [0.00000000], [0.00000000], [5.53940e-3], [2.76814e-1], [2.00510e-3]]
S_MOX43[5] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [9.31270e-3], [2.52962e-1], [8.49480e-3]]
S_MOX43[6] = [[0.00000000], [0.00000000], [0.00000000], [0.00000000], [9.16560e-9], [1.48500e-2], [2.65007e-1]]
## Upscattering Matrix
U_MOX43 = [ [], [], [], [], [], [], [] ]
U_MOX43[0] = []
U_MOX43[1] = []
U_MOX43[2] = []
U_MOX43[3] = [4]
U_MOX43[4] = [5]
U_MOX43[5] = [6]
U_MOX43[6] = []
############### Moderator 1 Macroscopic Cross-Sections ################
## Transport-corrected Total Cross Section
T_MOD1 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
T_MOD1[0] = 1.59206e-1
T_MOD1[1] = 4.12970e-1
T_MOD1[2] = 5.90310e-1
T_MOD1[3] = 5.84350e-1
T_MOD1[4] = 7.18000e-1
T_MOD1[5] = 1.25445
T_MOD1[6] = 2.65038
## Scattering Matrix for Moderator (Macroscopic)
S_MOD1 = [ [[]], [[]], [[]], [[]], [[]], [[]], [[]] ]
S_MOD1[0] = [[4.44777e-2]]
S_MOD1[1] = [[1.13400e-1], [2.82334e-1]]
S_MOD1[2] = [[7.23470e-4], [1.29940e-1], [3.45256e-1]]
S_MOD1[3] = [[3.74990e-6], [6.23400e-4], [2.24570e-1], [9.10284e-2], [7.14370e-5]]
S_MOD1[4] = [[5.31840e-8], [4.80020e-5], [1.69990e-2], [4.15510e-1], [1.39138e-1], [2.21570e-3]]
S_MOD1[5] = [[0.00000000], [7.44860e-6], [2.64430e-3], [6.37320e-2], [5.11820e-1], [6.99913e-1], [1.32440e-1]]
S_MOD1[6] = [[0.00000000], [1.04550e-6], [5.03440e-4], [1.21390e-2], [6.12290e-2], [5.37320e-1], [2.48070 ]]
## Upscattering Matrix
U_MOD1 = [ [], [], [], [], [], [], [] ]
U_MOD1[0] = []
U_MOD1[1] = []
U_MOD1[2] = []
U_MOD1[3] = [4]
U_MOD1[4] = [5]
U_MOD1[5] = [6]
U_MOD1[6] = []
################### Create nuf vectors
NUF_UO2 = []
NUF_MOX43 = []
for i in range(0, 7):
NUF_UO2.append( N_UO2[i] * F_UO2[i] )
NUF_MOX43.append( N_MOX43[i] * F_MOX43[i] )
##---------------------------------------------------------------------------##
## BUILD MESH
def build_mesh(N):
# vacuum = 0
# UO2 = 1
# MOX = 2
# moderator = 3
# UO2 pins
uo2_pin = Pincell()
uo2_ids = [1]
uo2_r = [0.4759]
uo2_pin.set_shells(uo2_ids, uo2_r, 3)
# MOX pins
mox_pin = Pincell()
mox_ids = [2]
mox_r = [0.4759]
mox_pin.set_shells(mox_ids, mox_r, 3)
# Make a 2x2 uo2 lattice and a 2x2 mox lattice
uo2_lat = Lattice(2)
mox_lat = Lattice(2)
# lattices are uniform
layout = [0, 0, 0, 0]
uo2_lat.set_pins(layout)
mox_lat.set_pins(layout)
# assign the pins in the lattices
uo2_lat.assign_pin(uo2_pin, 0)
mox_lat.assign_pin(mox_pin, 0)
# build the lattice
uo2_lat.build_lattice(N)
mox_lat.build_lattice(N)
# print out mixing tables
if print_it:
print "UO2 Lattice"
for m in xrange(uo2_lat.num_mixtures()):
vf = uo2_lat.f(m)
print "%4i" % (m),
for f in vf:
print "%9.6f" % (f),
print
print "MOX Lattice"
for m in xrange(mox_lat.num_mixtures()):
vf = mox_lat.f(m)
print "%4i" % (m),
for f in vf:
print "%9.6f" % (f),
print
# make the mixtable for the combined lattices by appending the mox table
# to the UO2 table (don't include the clean mixtures at the front of the
# table)
num_mixtures = uo2_lat.num_mixtures() + mox_lat.num_mixtures() - 4
table = Vec_Dbl(num_mixtures * 4)
ctr = 0
mox_offset = uo2_lat.num_mixtures()
# add UO2 mixtures
for m in xrange(uo2_lat.num_mixtures()):
vf = uo2_lat.f(m)
for f in vf:
table[ctr] = f
ctr = ctr + 1
# add MOX mixtures, skipping the clean mixes
for m in xrange(4, mox_lat.num_mixtures()):
vf = mox_lat.f(m)
for f in vf:
table[ctr] = f
ctr = ctr + 1
# make the cleanids
cleanids = [0, 1, 2, 3]
# the total core is 3x3 assemblies (2x2 fuel surrounded by water)
xylat = uo2_lat.xy_planes()
Nr = len(xylat) - 1
delta = Vec_Dbl(Nr, 0.0)
for i in xrange(Nr):
delta[i] = xylat[i+1] - xylat[i]
if Nr % 2 != 0:
print "Non-even lattices cells."
sys.exit(1)
# build the core planes
xycore = Vec_Dbl(int(2.5*Nr) + 1, 0.0)
for n in xrange(2):
for i in xrange(Nr):
index = i + n * Nr
xycore[index + 1] = xycore[index] + delta[i]
for i in xrange(Nr/2):
index = i + 2 * Nr
xycore[index + 1] = xycore[index] + delta[i]
# z-planes (14 in each assembly)
height = 14.28 * 1.5
Nz = 21
z = [0.0] * (Nz + 1)
dz = height / float(Nz)
for k in xrange(Nz):
z[k+1] = z[k] + dz
# get matids for each lattice
uo2ids = Vec_Int(uo2_lat.mixids())
moxids = Vec_Int(mox_lat.mixids())
# update the mox mixtures (leave clean zones alone)
for m in xrange(len(moxids)):
if moxids[m] > 3:
moxids[m] = moxids[m] + mox_offset - 4
# assign the matids
Nx = len(xycore) - 1
Ny = len(xycore) - 1
# arrangement
# |-----|-----|-----|
# | | | |
# | mod | mod | mod |
# | | | |
# |-----|-----|-----|
# | | | |
# | mox | uo2 | mod | y
# | | | |
# |-----|-----|-----|
# | | | |
# | uo2 | mox | mod |
# | | | |
# |-----|-----|-----|
# x
mixids = Vec_Int(Nx * Ny * Nz, 3)
kend = Nz / 2
# (0, 0) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = i + j * Ny + k * Nx * Ny
mixids[cell] = uo2ids[lat_cell]
# (1, 0) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = (i + Nr) + j * Ny + k * Nx * Ny
mixids[cell] = moxids[lat_cell]
# (0, 1) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = i + (j + Nr) * Ny + k * Nx * Ny
mixids[cell] = moxids[lat_cell]
# (1, 1) lattice
for k in xrange(kend):
for j in xrange(Nr):
for i in xrange(Nr):
lat_cell = i + j * Nr
cell = (i + Nr) + (j + Nr) * Ny + k * Nx * Ny
mixids[cell] = uo2ids[lat_cell]
return (xycore, z, mixids, cleanids, table)
##---------------------------------------------------------------------------##
## DB
##---------------------------------------------------------------------------##
entries = {
"problem_type" : "FIXED_SOURCE",
"num_groups" : 7,
"downscatter" : False,
"Pn_order" : 0,
"tolerance" : 1.0e-3,
"max_itr" : 400,
"linear_solver_xml_file" : "azilut02.xml",
"boundary" : "reflect",
"boundary_db" : {"reflect" : [1, 0, 1, 0, 1, 0]},
"SPN_order" : 3
}
db = DB.from_dict(entries)
# decomposition
if nodes() == 1:
db.insert("num_blocks_i", 1)
db.insert("num_blocks_j", 1)
elif nodes() == 2:
db.insert("num_blocks_i", 2)
db.insert("num_blocks_j", 1)
elif nodes() == 16:
db.insert("num_blocks_i", 4)
db.insert("num_blocks_j", 4)
# Mesh
(r, z, mixids, cleanids, table) = build_mesh(10)
db.insert("x_edges", r)
db.insert("y_edges", r)
db.insert("z_edges", z)
##---------------------------------------------------------------------------##
## MANAGER
##---------------------------------------------------------------------------##
# make manager, material, and angles
manager = Manager()
mat = Mat()
# partition the problem
manager.partition(db, mat)
# get mapping and mesh objects
mapp = manager.get_map()
indexer = manager.get_indexer()
mesh = manager.get_mesh()
# global and local cell numbers
Gx = indexer.num_global(X)
Gy = indexer.num_global(Y)
Gz = mesh.num_cells_dim(Z)
Nx = mesh.num_cells_dim(X)
Ny = mesh.num_cells_dim(Y)
Nz = mesh.num_cells_dim(Z)
if node() == 0:
print ">>> Partitioned global mesh with %i x %i x %i cells" \
% (Gx, Gy, Gz)
##---------------------------------------------------------------------------##
## MATERIAL SETUP
##---------------------------------------------------------------------------##
# vacuum = 0
# UO2 = 1
# MOX = 2
# moderator = 3
# set database
xsdb = XS_DB(db)
xsdb.set_num(4)
xsdb.assign_zero(0)
for g in xrange(0, xsdb.num_groups()):
xsdb.assign_upscatter(1, g, T_UO2[g], U_UO2[g], S_UO2[g])
xsdb.assign_upscatter(2, g, T_MOX43[g], U_MOX43[g], S_MOX43[g])
xsdb.assign_upscatter(3, g, T_MOD1[g], U_MOD1[g], S_MOD1[g])
## Assign fission data
xsdb.assign_fission(1, NUF_UO2, C_UO2)
xsdb.assign_fission(2, NUF_MOX43, C_MOX43)
# make macro mixer
mixer = Macro_Mixer(xsdb)
mixer.set(cleanids, table)
# make the material database
mixer.mix_with_global_ids(mixids, mat)
##---------------------------------------------------------------------------##
## ENERGY PARTITIONING
##---------------------------------------------------------------------------##
manager.partition_energy(mat)
##---------------------------------------------------------------------------##
## SOURCE SETUP
##---------------------------------------------------------------------------##
# allocate source and problem state
source = Isotropic_Source()
manager.setup(source)
total = Gx * Gy * Gz
Ng = mat.num_groups()
srcids = Vec_Int(total, 0)
srcstr = Vec_Dbl(total, 0.0)
num_shapes = 2
shapes = Vec_Dbl(2 * mat.num_groups(), 0.0)
chi0 = xsdb.fission_data(1, 0, CHI)
chi1 = xsdb.fission_data(2, 0, CHI)
# source 0 spectrum -> UO2 Chi
# source 1 spectrum -> MOX Chi
# make shapes
ctr = 0
for g in xrange(Ng):
shapes[ctr] = xsdb.fission_data(1, g, CHI)
ctr += 1
for g in xrange(Ng):
shapes[ctr] = xsdb.fission_data(2, g, CHI)
ctr += 1
# assign ids and strengths
for cell in xrange(total):
matid = mixids[cell]
if mat.assigned_fission(matid):
for g in xrange(Ng):
srcstr[cell] += mat.fission_data(matid, g, NU_SIGMA_F)
if mat.fission_data(matid, 0, CHI) == chi1:
srcids[cell] = 1
# set the source
source.set(num_shapes, shapes, srcids, srcstr)
##---------------------------------------------------------------------------##
## SOLVE
##---------------------------------------------------------------------------##
if node() == 0:
print ">>> Setup complete"
print ">>> Solving with %s differencing" % (manager.spatial_descriptor())
# solve the problem
manager.solve(source)
##---------------------------------------------------------------------------##
## OUTPUT
##---------------------------------------------------------------------------##
# make SILO output
silo = SILO()
silo.add_mixer(mixer)
silo.open("fs")
phi = Vec_Dbl(mesh.num_cells(), 0.0)
for g in xrange(Ng):
flux = manager.moments(g)
for cell in xrange(mesh.num_cells()):
phi[cell] = phi[cell] + flux.scalar_flux(cell)
silo.add("phi", phi)
silo.close()
##---------------------------------------------------------------------------##
## TIMING
##---------------------------------------------------------------------------##
# output final database (has class-dependent defaults)
db.output()
timer.stop()
time = timer.wall_clock()
keys = timer_keys()
if len(keys) > 0 and node() == 0:
print "\n"
print "TIMING : Problem ran in %16.6e seconds." % (time)
print "------------------------------------------------------------------"
for key in keys:
print "%30s : %16.6e %16.6e" % (key, timer_value(key) / time, timer_value(key))
print "------------------------------------------------------------------"
##---------------------------------------------------------------------------##
manager.close()
finalize()
###############################################################################
## end of fs.py
###############################################################################
| bsd-3-clause |
hejunbok/paparazzi | sw/ground_segment/python/udp_link/udp_link.py | 21 | 8628 | #!/usr/bin/env python
from ivy.std_api import *
import socket
import struct
import os
import logging
import sys
import threading
import time
sys.path.append(os.getenv("PAPARAZZI_HOME") + "/sw/lib/python")
import messages_xml_map
PING_PERIOD = 5.0
STATUS_PERIOD = 1.0
STX = 0x99
STX_TS = 0x98
DATALINK_PORT = 4243
DOWNLINK_PORT = 4242
class DownLinkStatus():
def __init__(self, ac_id, address):
self.ac_id = ac_id
self.address = address
self.rx_bytes = 0
self.rx_msgs = 0
self.run_time = 0
self.last_rx_bytes = 0
self.last_rx_msgs = 0
self.last_ping_time = 0
self.last_pong_time = 0
class IvyUdpLink():
def __init__(self):
self.InitIvy()
self.status_timer = threading.Timer(STATUS_PERIOD, self.sendStatus)
self.ping_timer = threading.Timer(STATUS_PERIOD, self.sendPing)
self.ac_downlink_status = {}
self.rx_err = 0
messages_xml_map.ParseMessages()
self.data_types = {'float': ['f', 4],
'uint8': ['B', 1],
'uint16': ['H', 2],
'uint32': ['L', 4],
'int8': ['b', 1],
'int16': ['h', 2],
'int32': ['l', 4]
}
def Unpack(self, data_fields, type, start, length):
return struct.unpack(type, "".join(data_fields[start:start + length]))[0]
def InitIvy(self):
# initialising the bus
IvyInit("Link", # application name for Ivy
"READY", # ready message
0, # main loop is local (ie. using IvyMainloop)
lambda x, y: y, # handler called on connection/deconnection
lambda x, y: y # handler called when a diemessage is received
)
# starting the bus
logging.getLogger('Ivy').setLevel(logging.WARN)
IvyStart("")
IvyBindMsg(self.OnSettingMsg, "(^.* SETTING .*)")
def calculate_checksum(self, msg):
ck_a = 0
ck_b = 0
# start char not included in checksum for pprz protocol
for c in msg[1:]:
ck_a = (ck_a + ord(c)) % 256
ck_b = (ck_b + ck_a) % 256
return (ck_a, ck_b)
def buildPprzMsg(self, msg_id, *args):
stx = STX
length = 6
sender = 0
msg_fields = messages_xml_map.message_dictionary_types["datalink"][msg_id]
struct_string = "=BBBB"
typed_args = []
idx = 0
for msg_type in msg_fields:
struct_string += self.data_types[msg_type][0]
length += self.data_types[msg_type][1]
if (msg_type == "float"):
typed_args.append(float(args[idx]))
else:
typed_args.append(int(args[idx]))
idx += 1
msg = struct.pack(struct_string, stx, length, sender, msg_id, *typed_args)
(ck_a, ck_b) = self.calculate_checksum(msg)
msg = msg + struct.pack('=BB', ck_a, ck_b)
return msg
def OnSettingMsg(self, agent, *larg):
list = larg[0].split(' ')
sender = list[0]
msg_name = list[1]
ac_id = list[3]
args = list[2:]
msg_id = messages_xml_map.message_dictionary_name_id["datalink"][msg_name]
if self.ac_downlink_status.has_key(int(ac_id)):
msgbuf = self.buildPprzMsg(msg_id, *args)
address = (self.ac_downlink_status[int(ac_id)].address[0], DATALINK_PORT)
self.server.sendto(msgbuf, address)
def sendPing(self):
for (ac_id, value) in self.ac_downlink_status.items():
msg_id = messages_xml_map.message_dictionary_name_id["datalink"]["PING"]
msgbuf = self.buildPprzMsg(msg_id)
address = (self.ac_downlink_status[int(ac_id)].address[0], DATALINK_PORT)
self.server.sendto(msgbuf, address)
value.last_ping_time = time.clock()
self.ping_timer = threading.Timer(STATUS_PERIOD, self.sendPing)
self.ping_timer.start()
def sendStatus(self):
for (key, value) in self.ac_downlink_status.items():
IvySendMsg("%i DOWNLINK_STATUS %i %i %i %i %i %i %i" % (
value.ac_id,
value.run_time,
value.rx_bytes,
value.rx_msgs,
self.rx_err,
value.rx_bytes - value.last_rx_bytes,
value.rx_msgs - value.last_rx_msgs,
1000 * value.last_pong_time))
value.last_rx_bytes = value.rx_bytes
value.last_rx_msgs = value.rx_msgs
value.run_time = value.run_time + 1
self.status_timer = threading.Timer(STATUS_PERIOD, self.sendStatus)
self.status_timer.start()
def updateStatus(self, ac_id, length, address, isPong):
if not self.ac_downlink_status.has_key(ac_id):
self.ac_downlink_status[ac_id] = DownLinkStatus(ac_id, address)
self.ac_downlink_status[ac_id].rx_msgs += 1
self.ac_downlink_status[ac_id].rx_bytes += length
if isPong:
self.ac_downlink_status[ac_id].last_pong_time = time.clock() - self.ac_downlink_status[ac_id].last_ping_time
def ProcessPacket(self, msg, address):
if len(msg) < 4:
self.rx_err = self.rx_err + 1
return
msg_offset = 0
while msg_offset < len(msg):
start_byte = ord(msg[msg_offset])
msg_start_idx = msg_offset
msg_offset = msg_offset + 1
if start_byte != STX and start_byte != STX_TS:
self.rx_err = self.rx_err + 1
return
msg_length = ord(msg[msg_offset])
msg_offset = msg_offset + 1
if (start_byte == STX_TS):
timestamp = int(self.Unpack(msg, 'L', msg_offset, 4))
msg_offset = msg_offset + 4
ac_id = ord(msg[msg_offset])
msg_offset = msg_offset + 1
msg_id = ord(msg[msg_offset])
msg_offset = msg_offset + 1
msg_name = messages_xml_map.message_dictionary_id_name["telemetry"][msg_id]
msg_fields = messages_xml_map.message_dictionary_types["telemetry"][msg_id]
ivy_msg = "%i %s " % (ac_id, msg_name)
for field in msg_fields:
if field[-2:] == "[]":
baseType = field[:-2]
array_length = int(self.Unpack(msg, 'B', msg_offset, 1))
msg_offset = msg_offset + 1
for count in range(0, array_length):
array_value = str(
self.Unpack(msg, self.data_types[baseType][0], msg_offset, self.data_types[baseType][1]))
msg_offset = msg_offset + self.data_types[baseType][1]
if (count == array_length - 1):
ivy_msg += array_value + " "
else:
ivy_msg += array_value + ","
else:
ivy_msg += str(
self.Unpack(msg, self.data_types[field][0], msg_offset, self.data_types[field][1])) + " "
msg_offset = msg_offset + self.data_types[field][1]
if (msg_offset > len(msg)):
print "finished without parsing %s" % field
break
(ck_a, ck_b) = self.calculate_checksum(msg[msg_start_idx:msg_offset])
msg_ck_a = int(self.Unpack(msg, 'B', msg_offset, 1))
msg_offset += 1
msg_ck_b = int(self.Unpack(msg, 'B', msg_offset, 1))
msg_offset += 1
# check for valid checksum
if (ck_a, ck_b) == (msg_ck_a, msg_ck_b):
self.updateStatus(ac_id, msg_length, address,
msg_id == messages_xml_map.message_dictionary_name_id["telemetry"]["PONG"])
# strip off trailing whitespace
ivy_msg = ivy_msg[:-1]
IvySendMsg(ivy_msg)
def Run(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.server.bind(('0.0.0.0', DOWNLINK_PORT))
self.status_timer.start()
self.ping_timer.start()
while True:
(msg, address) = self.server.recvfrom(2048)
self.ProcessPacket(msg, address)
def main():
udp_interface = IvyUdpLink()
udp_interface.Run()
if __name__ == '__main__':
main()
| gpl-2.0 |
gurneyalex/odoo | addons/website/tests/test_qweb.py | 3 | 6800 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import http, tools
from odoo.addons.website.tools import MockRequest
from odoo.modules.module import get_module_resource
from odoo.tests.common import TransactionCase
class TestQweb(TransactionCase):
def _load(self, module, *args):
tools.convert_file(self.cr, 'website',
get_module_resource(module, *args),
{}, 'init', False, 'test', self.registry._assertion_report)
def test_qweb_cdn(self):
self._load('website', 'tests', 'template_qweb_test.xml')
website = self.env['website'].browse(1)
website.write({
"cdn_activated": True,
"cdn_url": "http://test.cdn"
})
demo = self.env['res.users'].search([('login', '=', 'demo')])[0]
demo.write({"signature": '''<span class="toto">
span<span class="fa"></span><img src="/web/image/1"/>
</span>'''})
demo_env = self.env(user=demo)
html = demo_env['ir.qweb'].render('website.test_template', {"user": demo}, website_id= website.id)
html = html.strip().decode('utf8')
html = re.sub(r'\?unique=[^"]+', '', html).encode('utf8')
attachments = demo_env['ir.attachment'].search([('url', '=like', '/web/content/%-%/website.test_bundle.%')])
self.assertEqual(len(attachments), 2)
self.assertEqual(html, ("""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="http://test.external.link/style1.css"/>
<link rel="stylesheet" href="http://test.external.link/style2.css"/>
<link type="text/css" rel="stylesheet" href="http://test.cdn%(css)s"/>
<meta/>
<script type="text/javascript" src="http://test.external.link/javascript1.js"></script>
<script type="text/javascript" src="http://test.external.link/javascript2.js"></script>
<script type="text/javascript" src="http://test.cdn%(js)s"></script>
</head>
<body>
<img src="http://test.external.link/img.png"/>
<img src="http://test.cdn/website/static/img.png"/>
<a href="http://test.external.link/link">x</a>
<a href="http://test.cdn/web/content/local_link">x</a>
<span style="background-image: url('http://test.cdn/web/image/2')">xxx</span>
<div widget="html"><span class="toto">
span<span class="fa"></span><img src="http://test.cdn/web/image/1">
</span></div>
<div widget="image"><img src="http://test.cdn/web/image/res.users/%(user_id)s/image_1920/%(filename)s" class="img img-fluid" alt="%(alt)s"/></div>
</body>
</html>""" % {
"js": attachments[0].url,
"css": attachments[1].url,
"user_id": demo.id,
"filename": "Marc%20Demo",
"alt": "Marc Demo",
}).encode('utf8'))
class TestQwebProcessAtt(TransactionCase):
def setUp(self):
super(TestQwebProcessAtt, self).setUp()
self.website = self.env['website'].browse(1)
self.env.ref('base.lang_fr').active = True
self.website.language_ids = self.env.ref('base.lang_en') + self.env.ref('base.lang_fr')
self.website.default_lang_id = self.env.ref('base.lang_en')
self.website.cdn_activated = True
self.website.cdn_url = "http://test.cdn"
self.website.cdn_filters = "\n".join(["^(/[a-z]{2}_[A-Z]{2})?/a$", "^(/[a-z]{2})?/a$", "^/b$"])
def _test_att(self, url, expect, tag='a', attribute='href'):
self.assertEqual(
self.env['ir.qweb']._post_processing_att(tag, {attribute: url}, {}),
expect
)
def test_process_att_no_request(self):
# no request so no URL rewriting
self._test_att('/', {'href': '/'})
self._test_att('/en/', {'href': '/en/'})
self._test_att('/fr/', {'href': '/fr/'})
# no URL rewritting for CDN
self._test_att('/a', {'href': '/a'})
def test_process_att_no_website(self):
with MockRequest(self.env):
# no website so URL rewriting
self._test_att('/', {'href': '/'})
self._test_att('/en/', {'href': '/en/'})
self._test_att('/fr/', {'href': '/fr/'})
# no URL rewritting for CDN
self._test_att('/a', {'href': '/a'})
def test_process_att_monolang_route(self):
with MockRequest(self.env, website=self.website, multilang=False):
# lang not changed in URL but CDN enabled
self._test_att('/a', {'href': 'http://test.cdn/a'})
self._test_att('/en/a', {'href': 'http://test.cdn/en/a'})
self._test_att('/b', {'href': 'http://test.cdn/b'})
self._test_att('/en/b', {'href': '/en/b'})
def test_process_att_no_request_lang(self):
with MockRequest(self.env, website=self.website):
self._test_att('/', {'href': '/'})
self._test_att('/en/', {'href': '/'})
self._test_att('/fr/', {'href': '/fr/'})
def test_process_att_with_request_lang(self):
with MockRequest(self.env, website=self.website, context={'lang': 'fr_FR'}):
self._test_att('/', {'href': '/fr/'})
self._test_att('/en/', {'href': '/'})
self._test_att('/fr/', {'href': '/fr/'})
def test_process_att_matching_cdn_and_lang(self):
with MockRequest(self.env, website=self.website):
# lang prefix is added before CDN
self._test_att('/a', {'href': 'http://test.cdn/a'})
self._test_att('/en/a', {'href': 'http://test.cdn/a'})
self._test_att('/fr/a', {'href': 'http://test.cdn/fr/a'})
self._test_att('/b', {'href': 'http://test.cdn/b'})
self._test_att('/en/b', {'href': 'http://test.cdn/b'})
self._test_att('/fr/b', {'href': '/fr/b'})
def test_process_att_no_route(self):
with MockRequest(self.env, website=self.website, context={'lang': 'fr_FR'}, routing=False):
# default on multilang=True if route is not /{module}/static/
self._test_att('/web/static/hi', {'href': '/web/static/hi'})
self._test_att('/my-page', {'href': '/fr/my-page'})
def test_process_att_url_crap(self):
with MockRequest(self.env, website=self.website):
match = http.root.get_db_router.return_value.bind.return_value.match
# #{fragment} is stripped from URL when testing route
self._test_att('/x#y?z', {'href': '/x#y?z'})
match.assert_called_with('/x', method='POST', query_args=None)
match.reset_calls()
self._test_att('/x?y#z', {'href': '/x?y#z'})
match.assert_called_with('/x', method='POST', query_args='y')
| agpl-3.0 |
jjmleiro/hue | desktop/core/ext-py/Django-1.6.10/django/forms/extras/widgets.py | 117 | 4978 | """
Extra HTML Widget classes
"""
from __future__ import unicode_literals
import datetime
import re
from django.forms.widgets import Widget, Select
from django.utils import datetime_safe
from django.utils.dates import MONTHS
from django.utils.encoding import force_str
from django.utils.safestring import mark_safe
from django.utils.formats import get_format
from django.utils import six
from django.conf import settings
__all__ = ('SelectDateWidget',)
RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
output = []
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
output.append('year')
#if not self.first_select: self.first_select = 'year'
elif char in 'bEFMmNn':
output.append('month')
#if not self.first_select: self.first_select = 'month'
elif char in 'dj':
output.append('day')
#if not self.first_select: self.first_select = 'day'
return output
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def __init__(self, attrs=None, years=None, required=True):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
self.required = required
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10)
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, six.string_types):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
v = datetime.datetime.strptime(force_str(value), input_format)
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
else:
match = RE_DATE.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
choices = [(i, i) for i in self.years]
year_html = self.create_select(name, self.year_field, value, year_val, choices)
choices = list(six.iteritems(MONTHS))
month_html = self.create_select(name, self.month_field, value, month_val, choices)
choices = [(i, i) for i in range(1, 32)]
day_html = self.create_select(name, self.day_field, value, day_val, choices)
output = []
for field in _parse_date_fmt():
if field == 'year':
output.append(year_html)
elif field == 'month':
output.append(month_html)
elif field == 'day':
output.append(day_html)
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
first_select = None
field_list = _parse_date_fmt()
if field_list:
first_select = field_list[0]
if first_select is not None:
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name, None)
def create_select(self, name, field, value, val, choices):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not (self.required and val):
choices.insert(0, self.none_value)
local_attrs = self.build_attrs(id=field % id_)
s = Select(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
| apache-2.0 |
camptocamp/odoo | addons/account/project/wizard/account_analytic_journal_report.py | 378 | 3164 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_journal_report(osv.osv_memory):
_name = 'account.analytic.journal.report'
_description = 'Account Analytic Journal'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'analytic_account_journal_id': fields.many2many('account.analytic.journal', 'account_analytic_journal_name', 'journal_line_id', 'journal_print_id', 'Analytic Journals', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
ids_list = []
if context.get('active_id',False):
ids_list.append(context.get('active_id',False))
else:
record = self.browse(cr,uid,ids[0],context=context)
for analytic_record in record.analytic_account_journal_id:
ids_list.append(analytic_record.id)
datas = {
'ids': ids_list,
'model': 'account.analytic.journal',
'form': data
}
context2 = context.copy()
context2['active_model'] = 'account.analytic.journal'
context2['active_ids'] = ids_list
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticjournal', data=datas, context=context2)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(account_analytic_journal_report, self).default_get(cr, uid, fields, context=context)
if not context.has_key('active_ids'):
journal_ids = self.pool.get('account.analytic.journal').search(cr, uid, [], context=context)
else:
journal_ids = context.get('active_ids')
if 'analytic_account_journal_id' in fields:
res.update({'analytic_account_journal_id': journal_ids})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HailStorm32/Q.bo_stacks | qbo_arduqbo/src/qbo_arduqbo/srv/_TorqueEnable.py | 1 | 6300 | """autogenerated by genpy from qbo_arduqbo/TorqueEnableRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TorqueEnableRequest(genpy.Message):
_md5sum = "e44dc96db32bd58b5a896c2c5bf316d0"
_type = "qbo_arduqbo/TorqueEnableRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool torque_enable
"""
__slots__ = ['torque_enable']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
torque_enable
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TorqueEnableRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.torque_enable is None:
self.torque_enable = False
else:
self.torque_enable = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.torque_enable))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.torque_enable,) = _struct_B.unpack(str[start:end])
self.torque_enable = bool(self.torque_enable)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.torque_enable))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.torque_enable,) = _struct_B.unpack(str[start:end])
self.torque_enable = bool(self.torque_enable)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
"""autogenerated by genpy from qbo_arduqbo/TorqueEnableResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TorqueEnableResponse(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "qbo_arduqbo/TorqueEnableResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TorqueEnableResponse, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
class TorqueEnable(object):
_type = 'qbo_arduqbo/TorqueEnable'
_md5sum = 'e44dc96db32bd58b5a896c2c5bf316d0'
_request_class = TorqueEnableRequest
_response_class = TorqueEnableResponse
| lgpl-2.1 |
priyaganti/rockstor-core | src/rockstor/smart_manager/views/detail_views.py | 2 | 2971 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from smart_manager.models import (ReplicaShare, ReplicaTrail, Replica,
ReceiveTrail,)
from smart_manager.serializers import (ReplicaShareSerializer,
ReplicaTrailSerializer,
ReplicaSerializer,
ReceiveTrailSerializer,)
import rest_framework_custom as rfc
from rest_framework.response import Response
class ReplicaShareDetailView(rfc.GenericView):
serializer_class = ReplicaShareSerializer
def get(self, *args, **kwargs):
try:
if ('sname' in self.kwargs):
data = ReplicaShare.objects.get(share=self.kwargs['sname'])
else:
data = ReplicaShare.objects.get(id=self.kwargs['rid'])
serialized_data = ReplicaShareSerializer(data)
return Response(serialized_data.data)
except:
return Response()
class ReplicaTrailDetailView(rfc.GenericView):
serializer_class = ReplicaTrailSerializer
def get(self, *args, **kwargs):
if ('rtid' in self.kwargs):
try:
return ReplicaTrail.objects.get(id=self.kwargs['rtid'])
except:
return Response()
class ReplicaDetailView(rfc.GenericView):
serializer_class = ReplicaSerializer
def get(self, *args, **kwargs):
if ('sname' in self.kwargs):
try:
data = Replica.objects.get(share=self.kwargs['sname'])
serialized_data = ReplicaSerializer(data)
return Response(serialized_data.data)
except:
return Response()
elif ('rid' in self.kwargs):
try:
data = Replica.objects.get(id=self.kwargs['rid'])
serialized_data = ReplicaSerializer(data)
return Response(serialized_data.data)
except:
return Response()
class ReceiveTrailDetailView(rfc.GenericView):
serializer_class = ReceiveTrailSerializer
def get(self, request, *args, **kwargs):
if ('rtid' in self.kwargs):
with self._handle_exception(request):
return ReceiveTrail.objects.get(id=self.kwargs['rtid'])
| gpl-3.0 |
HydrelioxGitHub/home-assistant | homeassistant/components/intent_script/__init__.py | 14 | 2945 | """Handle intents with scripts."""
import copy
import logging
import voluptuous as vol
from homeassistant.helpers import (
intent, template, script, config_validation as cv)
DOMAIN = 'intent_script'
CONF_INTENTS = 'intents'
CONF_SPEECH = 'speech'
CONF_ACTION = 'action'
CONF_CARD = 'card'
CONF_TYPE = 'type'
CONF_TITLE = 'title'
CONF_CONTENT = 'content'
CONF_TEXT = 'text'
CONF_ASYNC_ACTION = 'async_action'
DEFAULT_CONF_ASYNC_ACTION = False
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
cv.string: {
vol.Optional(CONF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_ASYNC_ACTION,
default=DEFAULT_CONF_ASYNC_ACTION): cv.boolean,
vol.Optional(CONF_CARD): {
vol.Optional(CONF_TYPE, default='simple'): cv.string,
vol.Required(CONF_TITLE): cv.template,
vol.Required(CONF_CONTENT): cv.template,
},
vol.Optional(CONF_SPEECH): {
vol.Optional(CONF_TYPE, default='plain'): cv.string,
vol.Required(CONF_TEXT): cv.template,
}
}
}
}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Activate Alexa component."""
intents = copy.deepcopy(config[DOMAIN])
template.attach(hass, intents)
for intent_type, conf in intents.items():
if CONF_ACTION in conf:
conf[CONF_ACTION] = script.Script(
hass, conf[CONF_ACTION],
"Intent Script {}".format(intent_type))
intent.async_register(hass, ScriptIntentHandler(intent_type, conf))
return True
class ScriptIntentHandler(intent.IntentHandler):
"""Respond to an intent with a script."""
def __init__(self, intent_type, config):
"""Initialize the script intent handler."""
self.intent_type = intent_type
self.config = config
async def async_handle(self, intent_obj):
"""Handle the intent."""
speech = self.config.get(CONF_SPEECH)
card = self.config.get(CONF_CARD)
action = self.config.get(CONF_ACTION)
is_async_action = self.config.get(CONF_ASYNC_ACTION)
slots = {key: value['value'] for key, value
in intent_obj.slots.items()}
if action is not None:
if is_async_action:
intent_obj.hass.async_create_task(action.async_run(slots))
else:
await action.async_run(slots)
response = intent_obj.create_response()
if speech is not None:
response.async_set_speech(speech[CONF_TEXT].async_render(slots),
speech[CONF_TYPE])
if card is not None:
response.async_set_card(
card[CONF_TITLE].async_render(slots),
card[CONF_CONTENT].async_render(slots),
card[CONF_TYPE])
return response
| apache-2.0 |
Jollytown/Garuda | server/garuda/lib/python2.7/site-packages/pip/download.py | 61 | 30557 | from __future__ import absolute_import
import cgi
import email.utils
import hashlib
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file)
from pip.utils.filesystem import check_path_owner
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.packages import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
distro = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], platform.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], platform.libc_ver()),
))
if libc:
distro["libc"] = libc
if distro:
data["distro"] = distro
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "OS X", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want the -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual evication from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
with open(url) as f:
content = f.read()
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = (
'.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.whl'
)
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.critical(
"Hash digest size of the package %d (%s) doesn't match the "
"expected hash name %s!",
download_hash.digest_size, link, link.hash_name,
)
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.critical(
"Hash of the package %s (%s) doesn't match the expected hash %s!",
link, download_hash.hexdigest(), link.hash,
)
raise HashMismatch(
'Bad %s hash for package %s' % (link.hash_name, link)
)
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warning(
"Unsupported hash name %s for package %s", link.hash_name, link,
)
return None
with open(target_file, 'rb') as fp:
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
return download_hash
def _download_url(resp, link, content_file):
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warning(
"Unsupported hash name %s for package %s",
link.hash_name, link,
)
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we do
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
progress_indicator = lambda x, *a, **k: x
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info(
"Downloading %s (%s)", url, format_size(total_length),
)
progress_indicator = DownloadProgressBar(
max=total_length,
).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
for chunk in progress_indicator(resp_read(4096), 4096):
if download_hash is not None:
download_hash.update(chunk)
content_file.write(chunk)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None, session=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link, session, temp_dir)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, content_type, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir."""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, content_type, link)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location, only_download)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir)
if only_download:
write_delete_marker_file(location)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file)
return file_path, content_type
def _check_download_dir(link, download_dir):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash, '
're-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| mit |
MwanzanFelipe/rockletonfortune | lib/django/conf/urls/__init__.py | 264 | 4592 | import warnings
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import (
LocaleRegexURLResolver, RegexURLPattern, RegexURLResolver,
)
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
__all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include', 'patterns', 'url']
handler400 = 'django.views.defaults.bad_request'
handler403 = 'django.views.defaults.permission_denied'
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
def include(arg, namespace=None, app_name=None):
if app_name and not namespace:
raise ValueError('Must specify a namespace if specifying app_name.')
if app_name:
warnings.warn(
'The app_name argument to django.conf.urls.include() is deprecated. '
'Set the app_name in the included URLconf instead.',
RemovedInDjango20Warning, stacklevel=2
)
if isinstance(arg, tuple):
# callable returning a namespace hint
try:
urlconf_module, app_name = arg
except ValueError:
if namespace:
raise ImproperlyConfigured(
'Cannot override the namespace for a dynamic module that provides a namespace'
)
warnings.warn(
'Passing a 3-tuple to django.conf.urls.include() is deprecated. '
'Pass a 2-tuple containing the list of patterns and app_name, '
'and provide the namespace argument to include() instead.',
RemovedInDjango20Warning, stacklevel=2
)
urlconf_module, app_name, namespace = arg
else:
# No namespace hint - use manually provided namespace
urlconf_module = arg
if isinstance(urlconf_module, six.string_types):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
app_name = getattr(urlconf_module, 'app_name', app_name)
if namespace and not app_name:
warnings.warn(
'Specifying a namespace in django.conf.urls.include() without '
'providing an app_name is deprecated. Set the app_name attribute '
'in the included module, or pass a 2-tuple containing the list of '
'patterns and app_name instead.',
RemovedInDjango20Warning, stacklevel=2
)
namespace = namespace or app_name
# Make sure we can iterate through the patterns (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
# Test if the LocaleRegexURLResolver is used within the include;
# this should throw an error since this is not allowed!
if isinstance(url_pattern, LocaleRegexURLResolver):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.')
return (urlconf_module, app_name, namespace)
def patterns(prefix, *args):
warnings.warn(
'django.conf.urls.patterns() is deprecated and will be removed in '
'Django 1.10. Update your urlpatterns to be a list of '
'django.conf.urls.url() instances instead.',
RemovedInDjango110Warning, stacklevel=2
)
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
warnings.warn(
'Support for string view arguments to url() is deprecated and '
'will be removed in Django 1.10 (got %s). Pass the callable '
'instead.' % view,
RemovedInDjango110Warning, stacklevel=2
)
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
| bsd-3-clause |
Mariaanisimova/pythonintask | INBa/2015/Shemenev_A_V/task_100_30.py | 1 | 3036 | #ะะฐะดะฐัะฐ โ10, ะะฐัะธะฐะฝั 30
#ะะฐะฟะธัะธัะต ะฟัะพะณัะฐะผะผั "ะะตะฝะตัะฐัะพั ะฟะตััะพะฝะฐะถะตะน" ะดะปั ะธะณัั.ะะพะปัะทะพะฒะฐัะตะปั ะดะพะปะถะฝะพ ะฑััั ะฟัะตะดะพััะฐะฒะปะตะฝะพ 30 ะฟัะฝะบัะพะฒ,
#ะบะพัะพััะต ะผะพะถะฝะพ ัะฐัะฟัะตะดะตะปะธัั ะผะตะถะดั ัะตััััะผั ั
ะฐัะฐะบัะตัะธััะธะบะฐะผะธ: ะกะธะปะฐ, ะะดะพัะพะฒัะต, ะัะดัะพััั ะธ ะะพะฒะบะพััั.
#ะะฐะดะพ ัะดะตะปะฐัั ัะฐะบ, ััะพะฑั ะฟะพะปัะทะพะฒะฐัะตะปั ะผะพะณ ะฝะต ัะพะปัะบะพ ะฑัะฐัั ััะธ ะฟัะฝะบัั ะธะท ะพะฑัะตะณะพ "ะฟัะปะฐ", ะฝะพ ะธ ะฒะพะทะฒัะฐัะฐัั ะธั
ััะดะฐ ะธะท ั
ะฐัะฐะบัะตัะธััะธะบ,
#ะบะพัะพััะผ ะพะฝ ัะตัะธะป ะฟัะธัะฒะพะธัั ะดััะณะธะต ะทะฝะฐัะตะฝะธั.
#ะจะตะผะตะฝะตะฒ ะ.ะ
#28.04.2016
print ("""
ะะพะฑัะพ ะฟะพะถะฐะปะพะฒะฐัั ะฒ "ะะตะฝะตัะฐัะพั ะฟะตััะพะฝะฐะถะตะน".
ะั ะผะพะถะตัะต ัะฐัะฟัะตะดะตะปะธัั 30 ะพัะบะพะฒ ะผะตะถะดั 4 ั
ะฐัะฐะบัะตัะธััะธะบะฐะผะธ:
ะกะธะปะฐ, ะะดะพัะพะฒัะต, ะัะดัะพััั ะธ ะะพะฒะบะพััั. ะั ะผะพะถะตัะต ะบะฐะบ ะธ ะฑัะฐัั ะธะท ะพะฑัะตะณะพ
ัะธัะปะฐ ะฟัะฝะบะพัะฒ, ัะฐะบ ะธ ะฒะพะทะฒัะฐัะฐัั. ะ ะฐัะฟัะตะดะตะปัะนัะต ั
ะฐัะฐะบัะตัะธััะธะบะธ ั ัะผะพะผ. ะฃะดะฐัะธ!
""")
STR=0
HP=0
INT=0
AGL=0
point=30
number=0
print("ะัะปะธ ั
ะพัะธัะต ะธะทะผะตะฝะธัั ะกะธะปั, ัะพ ะฝะฐะฟะธัะธัะต 'ะกะธะปะฐ'. ะัะปะธ ะะดะพัะพะฒัะต, ัะพ 'ะะดะพัะพะฒัะต'. ะัะปะธ ะัะดัะพััั, ัะพ 'ะัะดัะพััั'. ะัะปะธ ะบ ะะพะฒะบะพััั, ัะพ 'ะะพะฒะบะพััั'.")
while True:
if STR<0 or HP<0 or INT<0 or AGL<0 or point>30:
print("ะัะธะฑะบะฐ")
break
#number=int(input("ะะฐะฟะธัะธัะต ัะฝะพะฒะฐ"))
elif point==0:
print("ะั ัะฐัะฟัะตะดะตะปะธะปะธ ะพัะบะธ. ะั
ัะฐัะฟัะตะดะตะปะตะฝะธะต:\nะกะธะปะฐ:",STR,"\nะะดะพัะพะฒัะต:",HP,"\nะัะดัะพััั:",INT,"\nะะพะฒะบะพััั:",AGL)
break
print("ะะฐัะธ ะพัะบะธ:\nะกะธะปะฐ:",STR,"\nะะดะพัะพะฒัะต:",HP,"\nะัะดัะพััั:",INT,"\nะะพะฒะบะพััั:",AGL,"\nะะตัะฐัะฟัะตะดะตะปัะฝะฝัะต ะพัะบะธ:",point)
user_input=input("")
if user_input=="ะกะธะปะฐ" :
number=int(input("ะกะบะพะปัะบะพ ั
ะพัะธัะต ะฟัะธะฑะฐะฒะธัั (ะพัะฑะฐะฒะธัั)?"))
if chislo <= point :
STR+=number
point-=number
else :
print('ะกะปะธัะบะพะผ ะผะฝะพะณะพ')
elif user_input=="ะะดะพัะพะฒัะต":
number=int(input("ะกะบะพะปัะบะพ ั
ะพัะธัะต ะฟัะธะฑะฐะฒะธัั (ะพัะฑะฐะฒะธัั)?"))
if chislo <= point :
HP+=number
point-=number
else :
print('ะกะปะธัะบะพะผ ะผะฝะพะณะพ')
elif user_input=="ะัะดัะพััั":
number=int(input("ะกะบะพะปัะบะพ ั
ะพัะธัะต ะฟัะธะฑะฐะฒะธัั (ะพัะฑะฐะฒะธัั)?"))
if number <= point :
INT+=number
point-=number
else :
print('ะกะปะธัะบะพะผ ะผะฝะพะณะพ')
elif user_input=="ะะพะฒะบะพััั":
number=int(input("ะกะบะพะปัะบะพ ั
ะพัะธัะต ะฟัะธะฑะฐะฒะธัั (ะพัะฑะฐะฒะธัั)?"))
if chislo <= point :
AGL+=number
point-=number
else :
print('ะกะปะธัะบะพะผ ะผะฝะพะณะพ')
input("ะะฐะถะผะธัะต Enter ะดะปั ะฒัั
ะพะดะฐ.")
| apache-2.0 |
SangramChavan/Ubuntu-16.04-new-installation | GreenHat.py | 5 | 1211 | # Copyright (c) 2015 Angus H. (4148)
# Distributed under the GNU General Public License v3.0 (GPLv3).
from datetime import date, timedelta
from random import randint
from time import sleep
import sys
import subprocess
import os
# returns a date string for the date that is N days before STARTDATE
def get_date_string(n, startdate):
d = startdate - timedelta(days=n)
rtn = d.strftime("%a %b %d %X %Y %z -0400")
return rtn
# main app
def main(argv):
if len(argv) < 1 or len(argv) > 2:
print "Error: Bad input."
sys.exit(1)
n = int(argv[0])
if len(argv) == 1:
startdate = date.today()
if len(argv) == 2:
startdate = date(int(argv[1][0:4]), int(argv[1][5:7]), int(argv[1][8:10]))
i = 0
while i <= n:
curdate = get_date_string(i, startdate)
num_commits = randint(1, 10)
for commit in range(0, num_commits):
subprocess.call("echo '" + curdate + str(randint(0, 1000000)) +"' > realwork.txt; git add realwork.txt; GIT_AUTHOR_DATE='" + curdate + "' GIT_COMMITTER_DATE='" + curdate + "' git commit -m 'update'; git push;", shell=True)
sleep(.5)
i += 1
subprocess.call("git rm realwork.txt; git commit -m 'delete'; git push;", shell=True)
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
jereze/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
kanpol/eli | 2012/plugins_python/htmlize/core.py | 13 | 2324 | #-------------------------------------------------------------------------------
# htmlize: htmlize/core.py
#
# The core functionality of htmlize.
#
# Eli Bendersky ([email protected])
# This code is in the public domain
#-------------------------------------------------------------------------------
from collections import namedtuple
import re
# Regex for matching/capturing role text.
# E.g. :name:`text` - first capture group is "name", second group is "text"
#
ROLE_REGEX = re.compile(r':(\w+):`([^`]*)`')
RoleMatch = namedtuple('RoleMatch', 'name contents')
def htmlize(post, db, plugins=[]):
""" pass
"""
contents = post.contents
# Plugins are classes - we need to instantiate them to get objects.
plugins = [P(post, db) for P in plugins]
# Split the contents to paragraphs
paragraphs = re.split(r'\n\n+', contents)
for i, p in enumerate(paragraphs):
paragraphs[i] = '<p>' + p.replace('\n', ' ') + '</p>'
contents = '\n\n'.join(paragraphs)
# Find roles in the contents. Create a list of parts, where each
# part is either text that has no roles in it, or a RoleMatch
# object.
pos = 0
parts = []
while True:
match = ROLE_REGEX.search(contents, pos)
if match is None:
parts.append(contents[pos:])
break
parts.append(contents[pos:match.start()])
parts.append(RoleMatch(match.group(1), match.group(2)))
pos = match.end()
# Ask plugins to act on roles
for i, part in enumerate(parts):
if isinstance(part, RoleMatch):
parts[i] = _plugin_replace_role(
part.name, part.contents, plugins)
# Build full contents back again, and ask plugins to act on
# contents.
contents = ''.join(parts)
for p in plugins:
contents_hook = p.get_contents_hook()
if contents_hook:
contents = contents_hook(contents)
return contents
def _plugin_replace_role(name, contents, plugins):
""" The first plugin that handles this role is used.
"""
for p in plugins:
role_hook = p.get_role_hook(name)
if role_hook:
return role_hook(contents)
# If no plugin handling this role is found, return its original form
return ':{0}:`{1}`'.format(name, contents)
| unlicense |
jeremiedecock/snippets | python/matplotlib/hist_logscale_x.py | 1 | 1804 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make a histogram using a logarithmic scale on X axis
See:
- http://stackoverflow.com/questions/6855710/how-to-have-logarithmic-bins-in-a-python-histogram
"""
import numpy as np
import matplotlib.pyplot as plt
# SETUP #######################################################################
# histtype : [โbarโ | โbarstackedโ | โstepโ | โstepfilledโ]
HIST_TYPE='bar'
ALPHA=0.5
# MAKE DATA ###################################################################
data = np.random.exponential(size=1000000)
#data = np.abs(np.random.normal(size=1000000) * 10000.)
#data = np.random.chisquare(10, size=1000000)
# INIT FIGURE #################################################################
fig = plt.figure(figsize=(8.0, 6.0))
# AX1 #########################################################################
ax1 = fig.add_subplot(211)
res_tuple = ax1.hist(data,
bins=50,
histtype=HIST_TYPE,
alpha=ALPHA)
ax1.set_title("Normal scale")
ax1.set_xlabel("Value")
ax1.set_ylabel("Count")
# AX2 #########################################################################
ax2 = fig.add_subplot(212)
vmin = np.log10(data.min())
vmax = np.log10(data.max())
bins = np.logspace(vmin, vmax, 50) # <- make a range from 10**vmin to 10**vmax
print(bins)
res_tuple = ax2.hist(data,
bins=bins,
histtype=HIST_TYPE,
alpha=ALPHA)
ax2.set_xscale("log") # <- Activate log scale on X axis
ax2.set_title("Log scale")
ax2.set_xlabel("Value")
ax2.set_ylabel("Count")
# SHOW AND SAVE FILE ##########################################################
plt.tight_layout()
plt.savefig("hist_logscale_x.png")
plt.show()
| mit |
kekivelez/DjangoSkeleton | tcshealth/users/views.py | 1 | 7243 | from django.shortcuts import render
# Create your views here.
import uuid
from rest_framework import status
from rest_framework.response import Response
from rest_framework import generics
from rest_framework import serializers as DRFserializers
from rest_framework import permissions
from rest_framework import renderers
from rest_framework_jwt.views import ObtainJSONWebToken
from django.shortcuts import get_object_or_404
from . import serializers
from .models import User
from ..utils.views import PutUpdateAPIView
class AccessTokenView(ObtainJSONWebToken):
renderer_classes = (renderers.JSONRenderer, renderers.BrowsableAPIRenderer)
def post(self, request):
"""
Returns a Access Token that can be used for authenticated requests.
---
type:
email:
required: true
type: string
password:
required: true
type: string
omit_serializer: true
parameters:
- name: body
description: C7redentials to get a API access token.
required: true
type: AccessTokenPostResponse
paramType: body
"""
return super(AccessTokenView, self).post(request)
class RegisterView(generics.CreateAPIView):
authentication_classes = ()
permission_classes = ()
serializer_class = serializers.RegisterUserSerializer
def perform_create(self, serializer):
user = serializer.save(token_version=str(uuid.uuid4()))
user.send_account_activation_email()
def post(self, request, *args, **kwargs):
"""
Creates a new User and sends an email with the activation url.
"""
return super(RegisterView, self).post(request, *args, **kwargs)
class ChangePasswordView(PutUpdateAPIView):
model = User
permission_classes = (permissions.IsAuthenticated,)
serializer_class = serializers.ChangePasswordSerializer
def get_object(self):
return self.request.user
def put(self, request, *args, **kwargs):
"""
Changes the password of the current user.
---
omit_serializer: true
parameters:
- name: Authorization
description: The authorization token. The format for the header value is 'Bearer (token)'.
required: true
type: string
paramType: header
"""
super(ChangePasswordView, self).put(request, *args, **kwargs)
return Response(
data={"change_password": True}, status=status.HTTP_200_OK)
class UsersView(generics.ListAPIView):
model = User
serializer_class = serializers.UserSerializer
queryset = User.objects.all()
authentication_classes = ()
permission_classes = ()
filter_fields = ('email', 'gender', 'height')
search_fields = ('email', 'first_name', 'last_name')
ordering_fields = ('email', 'first_name', 'last_name')
def get(self, request, *args, **kwargs):
"""
Returns a list of all users.
---
parameters:
- name: search
description: Text to search for.
required: false
type: string
paramType: query
- name: email
description: Value to filter by. Example, [email protected]
required: false
type: string
paramType: query
- name: ordering
description: Values to order by. Example, order_by=email,country
required: false
type: string
paramType: query
"""
return super(UsersView, self).get(request, *args, **kwargs)
class CurrentUserView(generics.RetrieveAPIView):
model = User
serializer_class = serializers.UserSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
return self.request.user
def get(self, request, *args, **kwargs):
"""
Returns the information of the current user.
---
parameters:
- name: Authorization
description: The authorization token. The format for the header value is 'Bearer (token)'.
required: true
type: string
paramType: header
"""
return super(CurrentUserView, self).get(request, *args, **kwargs)
class ForgotPasswordView(generics.CreateAPIView):
authentication_classes = ()
permission_classes = ()
serializer_class = serializers.ForgotPasswordSerializer
def post(self, request, *args, **kwargs):
"""
Sends the forgot password email for the user with the given email.
"""
serializer = self.get_serializer(data=request.DATA)
serializer.is_valid(raise_exception=True)
serializer.send_password_reset_email()
return Response(serializer.validated_data)
class ResetPasswordView(generics.CreateAPIView):
authentication_classes = ()
permission_classes = ()
serializer_class = serializers.ResetPasswordSerializer
def post(self, request, *args, **kwargs):
"""
Resets the password for the current user.
---
omit_serializer: true
parameters:
- name: token
description: Password Reset token.
required: true
type: string
paramType: form
"""
serializer = self.get_serializer(data=request.DATA)
serializer.is_valid(raise_exception=True)
return Response(serializer.validated_data)
class UserSettingsView(generics.UpdateAPIView):
model = User
permission_classes = (permissions.IsAuthenticated,)
serializer_class = serializers.UserSerializer
def get_object(self):
return self.request.user
def put(self, request, *args, **kwargs):
"""
Updates the information of the current user.
---
parameters:
- name: Authorization
description: The authorization token. The format for the header value is 'Bearer (token)'.
required: true
type: string
paramType: header
"""
return super(UserSettingsView, self).put(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
"""
Updates the information of the current user partially.
---
parameters:
- name: Authorization
description: The authorization token. The format for the header value is 'Bearer (token)'.
required: true
type: string
paramType: header
"""
return super(UserSettingsView, self).patch(request, *args, **kwargs)
class SpecificUserView(generics.RetrieveAPIView):
model = User
serializer_class = serializers.UserSerializer
authentication_classes = ()
permission_classes = ()
def get_object(self):
return get_object_or_404(User, pk=self.kwargs['pk'])
def get(self, request, *args, **kwargs):
"""
Returns the public information of a user with the given id.
"""
return super(SpecificUserView, self).get(request, *args, **kwargs) | gpl-2.0 |
terryyin/linkchecker | third_party/dnspython/dns/rdtypes/ANY/NSEC.py | 100 | 4812 | # Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import dns.exception
import dns.rdata
import dns.rdatatype
import dns.name
class NSEC(dns.rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns.name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset // 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("NSEC too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad NSEC bitmap length")
bitmap = wire[current : current + octets].unwrap()
current += octets
rdlen -= octets
windows.append((window, bitmap))
if not origin is None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, windows)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
return self._wire_cmp(other)
| gpl-2.0 |
purushothamc/myibitsolutions | strings/justified_text.py | 1 | 1674 | def fullJustify(strings_list, number):
result = []
if not strings_list or number <= 0:
return result
current_length, idx, firstWord = 0, 0, True
for word in strings_list:
if firstWord:
result.append(word)
current_length += len(result[-1])
firstWord = False
else:
next_word = " " + word
current_length += len(next_word)
if current_length <= number:
result[-1] += next_word
else:
current_length = len(word)
result.append(word)
result_len = len(result)
for idx in xrange(result_len):
string = result[idx]
space_count = string.count(" ")
string_len = len(string)
difference = number - string_len
if (difference > 0 and space_count == 0) or idx == result_len - 1:
string += " "*difference
result[idx] = string
else:
extra_left = difference % space_count
to_pad = difference / space_count
temp_list = []
for char in string:
if char != " ":
temp_list.append(char)
else:
spaced_char = ""
if extra_left:
spaced_char = " "
extra_left -= 1
spaced_char += " " + to_pad*" "
temp_list.append(spaced_char)
result[idx] = "".join(temp_list)
print result
A = ["This", "is", "an", "example", "of", "text", "justification."]
A = [ "What", "must", "be", "shall", "be." ]
B = 12
fullJustify(A, B) | gpl-3.0 |
lesh1k/beatport-verifier | venv/lib/python2.7/site-packages/wheel/pep425tags.py | 220 | 2861 | """Generate and work with PEP 425 Compatibility Tags."""
import sys
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
impl_ver = sysconfig.get_config_var("py_version_nodot")
if not impl_ver:
impl_ver = ''.join(map(str, sys.version_info[:2]))
return impl_ver
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
major = sys.version_info[0]
# Support all previous minor Python versions.
for minor in range(sys.version_info[1], -1, -1):
versions.append(''.join(map(str, (major, minor))))
impl = get_abbr_impl()
abis = []
soabi = sysconfig.get_config_var('SOABI')
if soabi and soabi.startswith('cpython-'):
abis[0:0] = ['cp' + soabi.split('-', 1)[-1]]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
arch = get_platform()
# Current version, current API (built specifically for our Python):
for abi in abis:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
| cc0-1.0 |
cjerdonek/pip | pip/exceptions.py | 123 | 1125 | """Exceptions used throughout package"""
from __future__ import absolute_import
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed. """
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class HashMismatch(InstallationError):
"""Distribution file hash values don't match."""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
| mit |
daodaoliang/bokeh | bokeh/charts/builder/tests/test_line_builder.py | 33 | 2376 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Line
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestLine(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
opencord/voltha | voltha/adapters/adtran_olt/test/codec/test_olt_state.py | 1 | 4156 | # Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from resources.sample_json import olt_state_json
from voltha.adapters.adtran_olt.codec.olt_state import OltState
import pytest
@pytest.fixture()
def olt_state_object():
return OltState(olt_state_json)
@pytest.fixture()
def pon_object():
return OltState.Pon(olt_state_json["pon"][0])
@pytest.fixture()
def onu_object():
return OltState.Pon.Onu(olt_state_json["pon"][0]["onu"][0])
@pytest.fixture()
def gem_object():
return OltState.Pon.Gem(olt_state_json["pon"][0]["gem"][0])
def test_olt_to_string(olt_state_object):
assert str(olt_state_object) == "OltState: ngpon2_agent-13.0.32-1.657.815547"
def test_olt_state_software_version(olt_state_object):
assert olt_state_object.software_version == "ngpon2_agent-13.0.32-1.657.815547"
def test_olt_state_pons(olt_state_object):
assert str(olt_state_object.pons[0]) == "OltState.Pon: pon-id: 0"
def test_olt_state_len(olt_state_object):
assert len(olt_state_object) == 16
def test_olt_state_get_item(olt_state_object):
assert str(olt_state_object[1]) == "OltState.Pon: pon-id: 1"
def test_olt_state_get_item_not_int(olt_state_object):
with pytest.raises(TypeError):
olt_state_object["something"]
def test_olt_state_get_item_out_of_bounds(olt_state_object):
with pytest.raises(KeyError):
olt_state_object[16]
def test_olt_state_iter(olt_state_object):
with pytest.raises(NotImplementedError):
for _ in olt_state_object:
pass
def test_olt_state_contains(olt_state_object):
assert 5 in olt_state_object
def test_olt_state_contains_does_not_contain(olt_state_object):
assert not 16 in olt_state_object
def test_olt_state_contains_not_int(olt_state_object):
with pytest.raises(TypeError):
"something" in olt_state_object
def test_pon_to_string(pon_object):
assert str(pon_object) == "OltState.Pon: pon-id: 0"
def test_pon_properties(pon_object):
assert pon_object.pon_id == 0
assert pon_object.downstream_wavelength == 0
assert pon_object.upstream_wavelength == 0
assert pon_object.downstream_channel_id == 15
assert pon_object.rx_packets == 1625773517
assert pon_object.tx_packets == 761098346
assert pon_object.rx_bytes == 145149613233620
assert pon_object.tx_bytes == 141303797318481
assert pon_object.tx_bip_errors == 0
assert pon_object.ont_los == []
assert pon_object.discovered_onu == frozenset()
assert pon_object.wm_tuned_out_onus == "AAAAAAAAAAAAAAAAAAAAAA=="
def test_pon_gems(pon_object):
assert str(pon_object.gems[2176]) == "OltState.Pon.Gem: onu-id: 0, gem-id: 2176"
def test_pon_gems_existing(pon_object):
pon_object._gems = "existing"
assert pon_object.gems == "existing"
def test_pon_onus(pon_object):
assert str(pon_object.onus[0]) == "OltState.Pon.Onu: onu-id: 0"
def test_pon_onus_existing(pon_object):
pon_object._onus = "existing"
assert pon_object.onus == "existing"
def test_onu_properties(onu_object):
assert onu_object.onu_id == 0
assert onu_object.oper_status == "unknown"
assert onu_object.reported_password == "redacted"
assert onu_object.rssi == -207
assert onu_object.equalization_delay == 620952
assert onu_object.fiber_length == 47
def test_gem_properties(gem_object):
assert gem_object.onu_id == 0
assert gem_object.alloc_id == 1024
assert gem_object.gem_id == 2176
assert gem_object.tx_packets == 65405
assert gem_object.tx_bytes == 5420931
assert gem_object.rx_packets == 13859
assert gem_object.rx_bytes == 3242784
| apache-2.0 |
MRigal/django | django/contrib/contenttypes/migrations/0001_initial.py | 585 | 1227 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.contenttypes.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100, verbose_name='python model class name')),
],
options={
'ordering': ('name',),
'db_table': 'django_content_type',
'verbose_name': 'content type',
'verbose_name_plural': 'content types',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.contenttypes.models.ContentTypeManager()),
],
),
migrations.AlterUniqueTogether(
name='contenttype',
unique_together=set([('app_label', 'model')]),
),
]
| bsd-3-clause |
marinho/geraldo | site/newsite/site-geraldo/django/contrib/localflavor/is_/is_postalcodes.py | 438 | 4913 | # -*- coding: utf-8 -*-
IS_POSTALCODES = (
('101', u'101 Reykjavรญk'),
('103', u'103 Reykjavรญk'),
('104', u'104 Reykjavรญk'),
('105', u'105 Reykjavรญk'),
('107', u'107 Reykjavรญk'),
('108', u'108 Reykjavรญk'),
('109', u'109 Reykjavรญk'),
('110', u'110 Reykjavรญk'),
('111', u'111 Reykjavรญk'),
('112', u'112 Reykjavรญk'),
('113', u'113 Reykjavรญk'),
('116', u'116 Kjalarnes'),
('121', u'121 Reykjavรญk'),
('123', u'123 Reykjavรญk'),
('124', u'124 Reykjavรญk'),
('125', u'125 Reykjavรญk'),
('127', u'127 Reykjavรญk'),
('128', u'128 Reykjavรญk'),
('129', u'129 Reykjavรญk'),
('130', u'130 Reykjavรญk'),
('132', u'132 Reykjavรญk'),
('150', u'150 Reykjavรญk'),
('155', u'155 Reykjavรญk'),
('170', u'170 Seltjarnarnes'),
('172', u'172 Seltjarnarnes'),
('190', u'190 Vogar'),
('200', u'200 Kรณpavogur'),
('201', u'201 Kรณpavogur'),
('202', u'202 Kรณpavogur'),
('203', u'203 Kรณpavogur'),
('210', u'210 Garรฐabรฆr'),
('212', u'212 Garรฐabรฆr'),
('220', u'220 Hafnarfjรถrรฐur'),
('221', u'221 Hafnarfjรถrรฐur'),
('222', u'222 Hafnarfjรถrรฐur'),
('225', u'225 รlftanes'),
('230', u'230 Reykjanesbรฆr'),
('232', u'232 Reykjanesbรฆr'),
('233', u'233 Reykjanesbรฆr'),
('235', u'235 Keflavรญkurflugvรถllur'),
('240', u'240 Grindavรญk'),
('245', u'245 Sandgerรฐi'),
('250', u'250 Garรฐur'),
('260', u'260 Reykjanesbรฆr'),
('270', u'270 Mosfellsbรฆr'),
('300', u'300 Akranes'),
('301', u'301 Akranes'),
('302', u'302 Akranes'),
('310', u'310 Borgarnes'),
('311', u'311 Borgarnes'),
('320', u'320 Reykholt รญ Borgarfirรฐi'),
('340', u'340 Stykkishรณlmur'),
('345', u'345 Flatey รก Breiรฐafirรฐi'),
('350', u'350 Grundarfjรถrรฐur'),
('355', u'355 รlafsvรญk'),
('356', u'356 Snรฆfellsbรฆr'),
('360', u'360 Hellissandur'),
('370', u'370 Bรบรฐardalur'),
('371', u'371 Bรบรฐardalur'),
('380', u'380 Reykhรณlahreppur'),
('400', u'400 รsafjรถrรฐur'),
('401', u'401 รsafjรถrรฐur'),
('410', u'410 Hnรญfsdalur'),
('415', u'415 Bolungarvรญk'),
('420', u'420 Sรบรฐavรญk'),
('425', u'425 Flateyri'),
('430', u'430 Suรฐureyri'),
('450', u'450 Patreksfjรถrรฐur'),
('451', u'451 Patreksfjรถrรฐur'),
('460', u'460 Tรกlknafjรถrรฐur'),
('465', u'465 Bรญldudalur'),
('470', u'470 รingeyri'),
('471', u'471 รingeyri'),
('500', u'500 Staรฐur'),
('510', u'510 Hรณlmavรญk'),
('512', u'512 Hรณlmavรญk'),
('520', u'520 Drangsnes'),
('522', u'522 Kjรถrvogur'),
('523', u'523 Bรฆr'),
('524', u'524 Norรฐurfjรถrรฐur'),
('530', u'530 Hvammstangi'),
('531', u'531 Hvammstangi'),
('540', u'540 Blรถnduรณs'),
('541', u'541 Blรถnduรณs'),
('545', u'545 Skagastrรถnd'),
('550', u'550 Sauรฐรกrkrรณkur'),
('551', u'551 Sauรฐรกrkrรณkur'),
('560', u'560 Varmahlรญรฐ'),
('565', u'565 Hofsรณs'),
('566', u'566 Hofsรณs'),
('570', u'570 Fljรณt'),
('580', u'580 Siglufjรถrรฐur'),
('600', u'600 Akureyri'),
('601', u'601 Akureyri'),
('602', u'602 Akureyri'),
('603', u'603 Akureyri'),
('610', u'610 Grenivรญk'),
('611', u'611 Grรญmsey'),
('620', u'620 Dalvรญk'),
('621', u'621 Dalvรญk'),
('625', u'625 รlafsfjรถrรฐur'),
('630', u'630 Hrรญsey'),
('640', u'640 Hรบsavรญk'),
('641', u'641 Hรบsavรญk'),
('645', u'645 Fosshรณll'),
('650', u'650 Laugar'),
('660', u'660 Mรฝvatn'),
('670', u'670 Kรณpasker'),
('671', u'671 Kรณpasker'),
('675', u'675 Raufarhรถfn'),
('680', u'680 รรณrshรถfn'),
('681', u'681 รรณrshรถfn'),
('685', u'685 Bakkafjรถrรฐur'),
('690', u'690 Vopnafjรถrรฐur'),
('700', u'700 Egilsstaรฐir'),
('701', u'701 Egilsstaรฐir'),
('710', u'710 Seyรฐisfjรถrรฐur'),
('715', u'715 Mjรณifjรถrรฐur'),
('720', u'720 Borgarfjรถrรฐur eystri'),
('730', u'730 Reyรฐarfjรถrรฐur'),
('735', u'735 Eskifjรถrรฐur'),
('740', u'740 Neskaupstaรฐur'),
('750', u'750 Fรกskrรบรฐsfjรถrรฐur'),
('755', u'755 Stรถรฐvarfjรถrรฐur'),
('760', u'760 Breiรฐdalsvรญk'),
('765', u'765 Djรบpivogur'),
('780', u'780 Hรถfn รญ Hornafirรฐi'),
('781', u'781 Hรถfn รญ Hornafirรฐi'),
('785', u'785 รrรฆfi'),
('800', u'800 Selfoss'),
('801', u'801 Selfoss'),
('802', u'802 Selfoss'),
('810', u'810 Hveragerรฐi'),
('815', u'815 รorlรกkshรถfn'),
('820', u'820 Eyrarbakki'),
('825', u'825 Stokkseyri'),
('840', u'840 Laugarvatn'),
('845', u'845 Flรบรฐir'),
('850', u'850 Hella'),
('851', u'851 Hella'),
('860', u'860 Hvolsvรถllur'),
('861', u'861 Hvolsvรถllur'),
('870', u'870 Vรญk'),
('871', u'871 Vรญk'),
('880', u'880 Kirkjubรฆjarklaustur'),
('900', u'900 Vestmannaeyjar'),
('902', u'902 Vestmannaeyjar')
)
| lgpl-3.0 |
pypot/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
phlax/translate | translate/convert/factory.py | 3 | 7615 | # -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Factory methods to convert supported input files to supported translatable files."""
import os
import six
#from translate.convert import prop2po, po2prop, odf2xliff, xliff2odf
__all__ = ('converters', 'UnknownExtensionError', 'UnsupportedConversionError')
# Turn into property to support lazy loading of things?
converters = {}
#for module in (prop2po, po2prop, odf2xliff, xliff2odf):
# if not hasattr(module, 'formats'):
# continue
# for extension in module.formats:
# if extension not in converters:
# converters[extension] = []
# converters[extension].append(module.formats[extension])
@six.python_2_unicode_compatible
class UnknownExtensionError(Exception):
def __init__(self, afile):
self.file = afile
def __str__(self):
return 'Unable to find extension for file: %s' % (self.file)
@six.python_2_unicode_compatible
class UnsupportedConversionError(Exception):
def __init__(self, in_ext=None, out_ext=None, templ_ext=None):
self.in_ext = in_ext
self.out_ext = out_ext
self.templ_ext = templ_ext
def __str__(self):
msg = "Unsupported conversion from %s to %s" % (self.in_ext, self.out_ext)
if self.templ_ext:
msg += ' with template %s' % (self.templ_ext)
return msg
def get_extension(filename):
path, fname = os.path.split(filename)
ext = fname.split(os.extsep)[-1]
if ext == fname:
return None
return ext
def get_converter(in_ext, out_ext=None, templ_ext=None):
convert_candidates = None
if templ_ext:
if (in_ext, templ_ext) in converters:
convert_candidates = converters[(in_ext, templ_ext)]
else:
raise UnsupportedConversionError(in_ext, out_ext, templ_ext)
else:
if in_ext in converters:
convert_candidates = converters[in_ext]
elif (in_ext,) in converters:
convert_candidates = converters[(in_ext,)]
else:
raise UnsupportedConversionError(in_ext, out_ext)
convert_fn = None
if not out_ext:
out_ext, convert_fn = convert_candidates[0]
else:
for ext, func in convert_candidates:
if ext == out_ext:
convert_fn = func
break
if not convert_fn:
raise UnsupportedConversionError(in_ext, out_ext, templ_ext)
return convert_fn
def get_output_extensions(ext):
"""Compiles a list of possible output extensions for the given input extension."""
out_exts = []
for key in converters:
in_ext = key
if isinstance(key, tuple):
in_ext = key[0]
if in_ext == ext:
for out_ext, convert_fn in converters[key]:
out_exts.append(out_ext)
return out_exts
def convert(inputfile, template=None, options=None, convert_options=None):
"""Convert the given input file to an appropriate output format, optionally
using the given template file and further options.
If the output extension (format) cannot be inferred the first converter
that can handle the input file (and the format/extension it gives as
output) is used.
:type inputfile: file
:param inputfile: The input file to be converted
:type template: file
:param template: Template file to use during conversion
:type options: dict (default: None)
:param options: Valid options are:
- in_ext: The extension (format) of the input file.
- out_ext: The extension (format) to use for the output file.
- templ_ext: The extension (format) of the template file.
- in_fname: File name of the input file; used only to determine
the input file extension (format).
- templ_fname: File name of the template file; used only to
determine the template file extension (format).
:returns: a 2-tuple: The new output file (in a temporary directory) and
the extension (format) of the output file. The caller is
responsible for deleting the (temporary) output file.
"""
in_ext, out_ext, templ_ext = None, None, None
# Get extensions from options
if options is None:
options = {}
else:
if 'in_ext' in options:
in_ext = options['in_ext']
if 'out_ext' in options:
out_ext = options['out_ext']
if template and 'templ_ext' in options:
templ_ext = options['templ_ext']
# If we still do not have extensions, try and get it from the *_fname options
if not in_ext and 'in_fname' in options:
in_ext = get_extension(options['in_fname'])
if template and not templ_ext and 'templ_fname' in options:
templ_fname = get_extension(options['templ_fname'])
# If we still do not have extensions, get it from the file names
if not in_ext and hasattr(inputfile, 'name'):
in_ext = get_extension(inputfile.name)
if template and not templ_ext and hasattr(template, 'name'):
templ_ext = get_extension(template.name)
if not in_ext:
raise UnknownExtensionError(inputfile)
if template and not templ_ext:
raise UnknownExtensionError(template)
out_ext_candidates = get_output_extensions(in_ext)
if not out_ext_candidates:
# No converters registered for the in_ext we have
raise UnsupportedConversionError(in_ext=in_ext, templ_ext=templ_ext)
if out_ext and out_ext not in out_ext_candidates:
# If out_ext has a value at this point, it was given in options, so
# we just take a second to make sure that the conversion is supported.
raise UnsupportedConversionError(in_ext, out_ext, templ_ext)
if not out_ext and templ_ext in out_ext_candidates:
# If we're using a template, chances are (pretty damn) good that the
# output file will be of the same type
out_ext = templ_ext
else:
# As a last resort, we'll just use the first possible output format
out_ext = out_ext_candidates[0]
# XXX: We are abusing tempfile.mkstemp() below: we are only using it to
# obtain a temporary file name to use the normal open() with. This is
# done because a tempfile.NamedTemporaryFile simply gave too many
# issues when being closed (and deleted) by the rest of the toolkit
# (eg. TranslationStore.savefile()). Therefore none of mkstemp()'s
# security features are being utilised.
import tempfile
tempfd, tempfname = tempfile.mkstemp(prefix='ttk_convert', suffix=os.extsep + out_ext)
os.close(tempfd)
if convert_options is None:
convert_options = {}
with open(tempfname, 'w') as output_file:
get_converter(in_ext, out_ext, templ_ext)(inputfile, output_file, template, **convert_options)
return output_file, out_ext
| gpl-2.0 |
Thraxis/pymedusa | tests/issue_submitter_tests.py | 1 | 2025 | # coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
"""
Test exception logging
"""
import os.path
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sickbeard import logger, ex
def exception_generator():
"""
Dummy function to raise a fake exception and log it
"""
try:
raise Exception('FAKE EXCEPTION')
except Exception as error:
logger.log(u"FAKE ERROR: " + ex(error), logger.ERROR) # pylint: disable=no-member
logger.submit_errors() # pylint: disable=no-member
raise
class IssueSubmitterBasicTests(unittest.TestCase):
"""
Tests logging of exceptions
"""
def test_submitter(self):
"""
Test that an exception is raised
"""
self.assertRaises(Exception, exception_generator)
if __name__ == "__main__":
print("""
==================
STARTING - ISSUE SUBMITTER TESTS
==================
######################################################################
""")
SUITE = unittest.TestLoader().loadTestsFromTestCase(IssueSubmitterBasicTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-3.0 |
jimdial/azure-quickstart-templates | splunk-on-ubuntu/scripts/config.py | 119 | 1408 | #ย ย ย ย ย The MIT License (MIT)
#
#ย ย ย ย ย Copyright (c) 2016 Microsoft. All rights reserved.
#
#ย ย ย ย ย Permission is hereby granted, free of charge, to any person obtaining a copy
#ย ย ย ย ย of this software and associated documentation files (the "Software"), to deal
#ย ย ย ย ย in the Software without restriction, including without limitation the rights
#ย ย ย ย ย to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#ย ย ย ย ย copies of the Software, and to permit persons to whom the Software is
#ย ย ย ย ย furnished to do so, subject to the following conditions:
#
#ย ย ย ย ย The above copyright notice and this permission notice shall be included in
#ย ย ย ย ย all copies or substantial portions of the Software.
#
#ย ย ย ย ย THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#ย ย ย ย ย IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#ย ย ย ย ย FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#ย ย ย ย ย AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#ย ย ย ย ย LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#ย ย ย ย ย OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#ย ย ย ย ย THE SOFTWARE.
STORAGE_ACCOUNT_NAME = 'YOUR_STORAGE_ACCOUNT_NAME'
STORAGE_ACCOUNT_KEY = 'YOUR_STORAGE_ACCOUNT_KEY'
| mit |
jonashaag/django-nonrel-nohistory | django/contrib/sessions/models.py | 231 | 2034 | import base64
import cPickle as pickle
from django.db import models
from django.utils.translation import ugettext_lazy as _
class SessionManager(models.Manager):
def encode(self, session_dict):
"""
Returns the given session dictionary pickled and encoded as a string.
"""
return SessionStore().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django Web site).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'), db_index=True)
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def get_decoded(self):
return SessionStore().decode(self.session_data)
# At bottom to avoid circular import
from django.contrib.sessions.backends.db import SessionStore
| bsd-3-clause |
sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/JMB/WSGI/_WSGIHandler.py | 3 | 15264 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""
WSGI Handler
=============
NOTE: This is experimental software.
This is the WSGI handler for ServerCore. It will wait on the
HTTPParser to transmit the body in full before proceeding. Thus, it is probably
not a good idea to use any WSGI apps requiring a lot of large file uploads (although
it could theoretically function fairly well for that purpose as long as the concurrency
level is relatively low).
For more information on WSGI, what it is, and to get a general overview of what
this component is intended to adapt the ServerCore to do, see one of the following
links:
* http://www.python.org/dev/peps/pep-0333/ (PEP 333)
* http://www.wsgi.org/wsgi/ (WsgiStart wiki)
* http://en.wikipedia.org/wiki/Web_Server_Gateway_Interface (Wikipedia article on WSGI)
-------------
Dependencies
-------------
This component depends on the wsgiref module, which is included with python 2.5.
Thus if you're using an older version, you will need to install it before using
this component.
The easiest way to install wsgiref is to use easy_install, which may be downloaded
from http://peak.telecommunity.com/DevCenter/EasyInstall . You may then install
wsgiref using the command "sudo easy_install wsgiref" (without the quotes of course).
Please note that Kamaelia Publish includes wsgiref.
-----------------------------
How do I use this component?
-----------------------------
The easiest way to use this component is to use the WsgiHandler factory function
that is included in Factory.py in this package. That method has URL handling that
will route a URL to the proper place. There is also a SimpleWsgiHandler that may
be used if you only want to support one application object. For more information
on how to use these functions, please see Factory.py. Also please note that both
of these factory functions are made to work with ServerCore/SimpleServer. Here is
an example of how to create a simple WSGI server:
from Kamaelia.Protocol.HTTP import HTTPProtocol
from Kamaelia.Experimental.Wsgi.Factory import WsgiFactory # FIXME: Docs are broken :-(
WsgiConfig = {
'wsgi_ver' : (1, 0),
'server_admin' : 'Jason Baker',
'server_software' : 'Kamaelia Publish'
}
url_list = [ #Note that this is a list of dictionaries. Order is important.
{
'kp.regex' : 'simple',
'kp.import_path' : 'Kamaelia.Apps.Wsgi.Apps.Simple',
'kp.app_obj' : 'simple_app',
}
{
'kp.regex' : '.*', #The .* means that this is a 404 handler
'kp.import_path' : 'Kamaelia.Apps.Wsgi.Apps.ErrorHandler',
'kp.app_obj' : 'application',
}
]
routing = [['/', WsgiFactory(WsgiConfig, url_list)]]
ServerCore(
protocol=HTTPProtocol(routing),
port=8080,
socketOptions=(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)).run()
------------------
Internal overview
------------------
request object
~~~~~~~~~~~~~~~
Note that certain WSGI applications will require configuration
data from the urls file. If you use the WsgiFactory to run this
handler, all options specified in the urls file will be put into
the environment variable with a kp. in front of them.
For example, the 'regex' entry in a urls file would go into the
environ dictionary like this if it was set to 'simple':
{
...
'kp.regex' : 'simple',
...
}
wsgi.input
~~~~~~~~~~~
PEP 333 requires that the WSGI environ dictionary also contain a file-like object
that holds the body of the request. Currently, the WsgiHandler will wait for the
full request before starting the application (which is not optimal behavior). If
the method is not PUT or POST, the handler will use a pre-made null-file object that
will always return empty data. This is an optimization to lower peak memory usage
and to speed things up.
WsgiConfig
~~~~~~~~~~~
The WsgiHandler requires a WsgiConfig dictonary for general configuration info. The
following items are required to be defined:
* wsgi_ver: the WSGI version as a Tuple. You want to use (1, 0)
* server_admin: the name and/or email address of the server's administrator
* server_software: The software and/or software version that runs the server
FIXME: It would be nice if the WsgiConfig were made into an object rather than a
dictionary.
"""
from pprint import pprint, pformat
import sys, os, cStringIO, cgitb, traceback, logging, copy
from datetime import datetime
from wsgiref.util import is_hop_by_hop
import Axon
from Axon.ThreadedComponent import threadedcomponent
from Axon.Ipc import producerFinished
import Kamaelia.Protocol.HTTP.ErrorPages as ErrorPages
from xml.sax.saxutils import unescape
class NullFileLike (object):
"""
This is a file-like object that is meant to represent an empty file.
"""
def read(self, number=0):
return ''
def readlines(self, number=0):
return[]
def readline(self):
return ''
def close(self):
pass
def next():
raise StopIteration()
class ErrorLogger(object):
"""This is the file-like object intended to be used for wsgi.errors."""
def __init__(self, logger):
self.logger = logger
def write(self, data):
self.logger.error(data)
def writelines(self, seq):
data = '\n'.join(seq)
self.logger.error(data)
def flush(self):
pass
_null_fl = NullFileLike()
class _WsgiHandler(threadedcomponent):
"""
This is a WSGI handler that is used to serve WSGI applications. Typically,
URL routing is to be done in the factory method that creates this. Thus,
the handler must be passed the application object. You probably don't need
to instantiate this class directly.
It will respond to the following signals:
producerFinished - This is used by the HTTPServer to indicate that the full
body has been transmitted. This will not shut down this component, and in
fact will make it BEGIN processing the request. If the request is not a
POST or PUT request, the Handler will ignore this signal.
Any other signals that this component may receive may result in undefined
behavior, but this component will most likely ignore them.
"""
Inboxes = {
'inbox' : 'Used to receive the body of requests from the HTTPParser',
'control' : 'NOT USED',
}
Outboxes = {
'outbox' : 'used to send page fragments',
'signal' : 'send producerFinished messages',
}
Debug = False
def __init__(self, app, request, WsgiConfig, **argd):
"""
app - The WSGI application to run
request - the request object that is generated by HTTPParser
log_writable - a LogWritable object to be passed as a wsgi.errors object.
WsgiConfig - General configuration about the WSGI server.
"""
super(_WsgiHandler, self).__init__(**argd)
self.environ = request
batch_str = self.environ.get('batch', '')
if batch_str:
batch_str = 'batch ' + batch_str
print 'request received for [%s] %s' % \
(self.environ['REQUEST_URI'], batch_str)
self.app = app
self.response_dict = {}
self.wsgi_config = WsgiConfig
self.write_called = False
self.pf_received = False #Have we received a producerFinished signal?
self.logger = logging.getLogger('kp')
self.log = ErrorLogger(self.logger)
def main(self):
if self.environ['REQUEST_METHOD'] == 'POST' or self.environ['REQUEST_METHOD'] == 'PUT':
try:
body = self.waitForBody()
except:
self._error(503, sys.exc_info())
self.send(producerFinished(self), 'signal')
return
self.memfile = cStringIO.StringIO(body)
else:
self.memfile = _null_fl
self.initWSGIVars(self.wsgi_config)
#pprint(self.environ)
not_done = True
try:
#PEP 333 specifies that we're not supposed to buffer output here,
#so pulling the iterator out of the app object
app_return = self.app(self.environ, self.start_response)
if isinstance(app_return, (list)):
response = app_return.pop(0)
self.write(response)
[self.sendFragment(x) for x in app_return]
else:
app_iter = iter(app_return)
response = app_iter.next()# License: LGPL
while not response:
response = app_iter.next()
self.write(response)
[self.sendFragment(x) for x in app_iter if x]
app_iter.close()
if hasattr(app_iter, 'close'):
app_iter.close()
except:
self._error(503, sys.exc_info()) #Catch any errors and log them and print
#either an error message or a stack
#trace (depending if debug is set)
self.memfile.close()
#The Kamaelia Publish Peer depends on the producerFinished signal being sent
#AFTER this handler has received a producerFinished signal. Thus, we wait
#until we get a signal before finishing up.
if not self.pf_received:
while not self.dataReady('control'):
self.pause()
self.send(Axon.Ipc.producerFinished(self), "signal")
def start_response(self, status, response_headers, exc_info=None):
"""
Method to be passed to WSGI application object to start the response.
"""
if exc_info:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
elif self.response_dict:
#Will be caught by _error
raise WsgiAppError('start_response called a second time without exc_info! See PEP 333.')
#PEP 333 requires that an application NOT send any hop-by-hop headers.
#Therefore, we check for any of them in the headers the application
#returns. If so, an exception is raised to be caught by _error.
for key,value in response_headers:
if is_hop_by_hop(key):
raise WsgiAppError('Hop by hop header specified')
self.response_dict['headers'] = copy.copy(response_headers)
self.response_dict['statuscode'] = status
return self.write
def write(self, body_data):
"""
Write method to be passed to WSGI application object. Used to write
unbuffered output to the page. You probably don't want to use this
unless you have good reason to.
"""
if self.response_dict and not self.write_called:
self.response_dict['data'] = body_data
self.send(self.response_dict, 'outbox')
self.write_called = True
elif self.write_called:
self.sendFragment(body_data)
#the following errors will be caught and sent to _error
elif not self.response_dict and not self.write_called:
raise WsgiError("write() called before start_response()!")
else:
raise WsgiError('Unkown error in write.')
def _error(self, status=500, body_data=('', '', '')):
"""
This is an internal method used to print an error to the browser and log
it in the wsgi log.
"""
if self.Debug:
resource = {
'statuscode' : status,
'type' : 'text/html',
'data' : cgitb.html(body_data),
}
self.send(resource, 'outbox')
else:
self.send(ErrorPages.getErrorPage(status, 'An internal error has occurred.'), 'outbox')
self.log.write(''.join(traceback.format_exception(body_data[0], body_data[1], body_data[2], '\n')))
def waitForBody(self):
"""
This internal method is used to make the WSGI Handler wait for the body
of an HTTP request before proceeding.
FIXME: We should really begin executing the Application and pull the
body as needed rather than pulling it all up front.
"""
buffer = []
not_done = True
while not_done:
for msg in self.Inbox('control'):
#print msg
if isinstance(msg, producerFinished):
not_done = False
self.pf_received = True
for msg in self.Inbox('inbox'):
if isinstance(msg, str):
text = msg
elif isinstance(msg, dict):
text = msg.get('body', '')
text = unescape(text)
else:
text = ''
if not isinstance(text, str):
text = str(text)
buffer.append(text)
if not_done and not self.anyReady():
self.pause()
return ''.join(buffer)
def sendFragment(self, fragment):
"""
This is a pretty simple method. It's used to send a fragment if an app
yields a value beyond the first.
"""
page = {
'data' : fragment,
}
#print 'FRAGMENT'
#pprint(page)
self.send(page, 'outbox')
def initWSGIVars(self, wsgi_config):
"""
This method initializes all variables that are required to be present
(including ones that could possibly be empty).
"""
#==================================
#WSGI variables
#==================================
self.environ["wsgi.version"] = wsgi_config['wsgi_ver']
self.environ["wsgi.errors"] = self.log
self.environ['wsgi.input'] = self.memfile
self.environ["wsgi.multithread"] = True
self.environ["wsgi.multiprocess"] = False
self.environ["wsgi.run_once"] = False
class WsgiError(Exception):
"""
This is used to indicate an internal error of some kind. It is thrown if the
write() callable is called without start_response being called.
"""
pass
class WsgiAppError(Exception):
"""
This is an exception that is used if a Wsgi application does something it shouldnt.
"""
pass
| apache-2.0 |
riklaunim/django-custom-multisite | setup.py | 1 | 4043 | from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils not to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
django_dir = 'django'
for dirpath, dirnames, filenames in os.walk(django_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name = "Django",
version = '1.4.18-multisite-pozytywnie2',
url = 'http://www.djangoproject.com/',
author = 'Django Software Foundation',
author_email = '[email protected]',
description = 'A high-level Python Web framework that encourages rapid development and clean, pragmatic design.',
download_url = 'https://www.djangoproject.com/m/releases/1.4/Django-1.4.18.tar.gz',
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
scripts = ['django/bin/django-admin.py'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| bsd-3-clause |
wakatime/wakadump | setup.py | 1 | 1515 | from setuptools import setup
about = {}
with open('wakadump/__about__.py') as f:
exec(f.read(), about)
packages = [
about['__title__'],
]
install_requires = [x.strip() for x in open('requirements.txt').readlines()]
setup(
name=about['__title__'],
version=about['__version__'],
license=about['__license__'],
description=about['__description__'],
long_description=open('README.rst').read(),
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
packages=packages,
package_dir={about['__title__']: about['__title__']},
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=install_requires,
entry_points={
'console_scripts': ['wakadump = wakadump.cli:main'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Text Editors',
],
)
| bsd-3-clause |
Autodesk/molecular-design-toolkit | moldesign/integrators/verlet.py | 1 | 2603 | from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import units as u
from ..molecules import Trajectory
from ..utils import exports
from .base import IntegratorBase
@exports
class VelocityVerlet(IntegratorBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: raise exception if any constraints are requested ...
def run(self, run_for):
"""
Users won't call this directly - instead, use mol.run
Propagate position, momentum by a single timestep using velocity verlet
:param run_for: number of timesteps OR amount of time to run for
"""
if not self._prepped:
self.prep()
nsteps = self.time_to_steps(run_for, self.params.timestep)
# Set up trajectory and record the first frame
self.mol.time = 0.0 * u.default.time
self.traj = Trajectory(self.mol)
self.mol.calculate()
self.traj.new_frame()
next_trajectory_frame = self.params.frame_interval
# Dynamics loop
for istep in range(nsteps):
self.step()
if istep + 1 >= next_trajectory_frame:
self.traj.new_frame()
next_trajectory_frame += self.params.frame_interval
return self.traj
def prep(self):
self.time = 0.0 * self.params.timestep
self._prepped = True
def step(self):
# Move momenta from t-dt to t-dt/2
phalf = self.mol.momenta + 0.5 * self.params.timestep * self.mol.calc_forces(wait=True)
# Move positions from t-dt to t
self.mol.positions += phalf * self.params.timestep / self.mol.dim_masses
# Move momenta from t-dt/2 to t - triggers recomputed forces
self.mol.momenta = phalf + 0.5 * self.params.timestep * self.mol.calc_forces(wait=True)
self.time += self.params.timestep
self.mol.time = self.time
| apache-2.0 |
sajeeshcs/nested_projects_keystone | keystone/tests/mapping_fixtures.py | 6 | 12218 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Federation Mapping."""
EMPLOYEE_GROUP_ID = "0cd5e9"
CONTRACTOR_GROUP_ID = "85a868"
TESTER_GROUP_ID = "123"
DEVELOPER_GROUP_ID = "xyz"
# Mapping summary:
# LastName Smith & Not Contractor or SubContractor -> group 0cd5e9
# FirstName Jill & Contractor or SubContractor -> to group 85a868
MAPPING_SMALL = {
"rules": [
{
"local": [
{
"group": {
"id": EMPLOYEE_GROUP_ID
}
},
{
"user": {
"name": "{0}"
}
}
],
"remote": [
{
"type": "UserName"
},
{
"type": "orgPersonType",
"not_any_of": [
"Contractor",
"SubContractor"
]
},
{
"type": "LastName",
"any_one_of": [
"Bo"
]
}
]
},
{
"local": [
{
"group": {
"id": CONTRACTOR_GROUP_ID
}
},
{
"user": {
"name": "{0}"
}
}
],
"remote": [
{
"type": "UserName"
},
{
"type": "orgPersonType",
"any_one_of": [
"Contractor",
"SubContractor"
]
},
{
"type": "FirstName",
"any_one_of": [
"Jill"
]
}
]
}
]
}
# Mapping summary:
# orgPersonType Admin or Big Cheese -> name {0} {1} email {2} and group 0cd5e9
# orgPersonType Customer -> user name {0} email {1}
# orgPersonType Test and email ^@example.com$ -> group 123 and xyz
MAPPING_LARGE = {
"rules": [
{
"local": [
{
"user": {
"name": "{0} {1}",
"email": "{2}"
},
"group": {
"id": EMPLOYEE_GROUP_ID
}
}
],
"remote": [
{
"type": "FirstName"
},
{
"type": "LastName"
},
{
"type": "Email"
},
{
"type": "orgPersonType",
"any_one_of": [
"Admin",
"Big Cheese"
]
}
]
},
{
"local": [
{
"user": {
"name": "{0}",
"email": "{1}"
}
}
],
"remote": [
{
"type": "UserName"
},
{
"type": "Email"
},
{
"type": "orgPersonType",
"not_any_of": [
"Admin",
"Employee",
"Contractor",
"Tester"
]
}
]
},
{
"local": [
{
"group": {
"id": TESTER_GROUP_ID
}
},
{
"group": {
"id": DEVELOPER_GROUP_ID
}
},
{
"user": {
"name": "{0}"
}
}
],
"remote": [
{
"type": "UserName"
},
{
"type": "orgPersonType",
"any_one_of": [
"Tester"
]
},
{
"type": "Email",
"any_one_of": [
".*@example.com$"
],
"regex": True
}
]
}
]
}
MAPPING_BAD_REQ = {
"rules": [
{
"local": [
{
"user": "name"
}
],
"remote": [
{
"type": "UserName",
"bad_requirement": [
"Young"
]
}
]
}
]
}
MAPPING_BAD_VALUE = {
"rules": [
{
"local": [
{
"user": "name"
}
],
"remote": [
{
"type": "UserName",
"any_one_of": "should_be_list"
}
]
}
]
}
MAPPING_NO_RULES = {
'rules': []
}
MAPPING_NO_REMOTE = {
"rules": [
{
"local": [
{
"user": "name"
}
],
"remote": []
}
]
}
MAPPING_MISSING_LOCAL = {
"rules": [
{
"remote": [
{
"type": "UserName",
"any_one_of": "should_be_list"
}
]
}
]
}
MAPPING_WRONG_TYPE = {
"rules": [
{
"local": [
{
"user": "{1}"
}
],
"remote": [
{
"not_type": "UserName"
}
]
}
]
}
MAPPING_MISSING_TYPE = {
"rules": [
{
"local": [
{
"user": "{1}"
}
],
"remote": [
{}
]
}
]
}
MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF = {
"rules": [
{
"local": [
{
"group": {
"id": "0cd5e9"
}
},
{
"user": {
"name": "{0}"
}
}
],
"remote": [
{
"type": "UserName"
},
{
"type": "orgPersonType",
"not_any_of": [
"SubContractor"
],
"invalid_type": "xyz"
}
]
}
]
}
MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF = {
"rules": [
{
"local": [
{
"group": {
"id": "0cd5e9"
}
},
{
"user": {
"name": "{0}"
}
}
],
"remote": [
{
"type": "UserName"
},
{
"type": "orgPersonType",
"any_one_of": [
"SubContractor"
],
"invalid_type": "xyz"
}
]
}
]
}
MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE = {
"rules": [
{
"local": [
{
"group": {
"id": "0cd5e9"
}
},
{
"user": {
"name": "{0}"
}
}
],
"remote": [
{
"type": "UserName"
},
{
"type": "orgPersonType",
"invalid_type": "xyz"
}
]
}
]
}
MAPPING_EXTRA_RULES_PROPS = {
"rules": [
{
"local": [
{
"group": {
"id": "0cd5e9"
}
},
{
"user": {
"name": "{0}"
}
}
],
"invalid_type": {
"id": "xyz",
},
"remote": [
{
"type": "UserName"
},
{
"type": "orgPersonType",
"not_any_of": [
"SubContractor"
]
}
]
}
]
}
MAPPING_TESTER_REGEX = {
"rules": [
{
"local": [
{
"user": {
"name": "{0}",
}
}
],
"remote": [
{
"type": "UserName"
}
]
},
{
"local": [
{
"group": {
"id": TESTER_GROUP_ID
}
}
],
"remote": [
{
"type": "orgPersonType",
"any_one_of": [
".*Tester*"
],
"regex": True
}
]
}
]
}
EMPLOYEE_ASSERTION = {
'Email': '[email protected]',
'UserName': 'tbo',
'FirstName': 'Tim',
'LastName': 'Bo',
'orgPersonType': 'Employee;BuildingX;'
}
EMPLOYEE_ASSERTION_PREFIXED = {
'PREFIX_Email': '[email protected]',
'PREFIX_UserName': 'tbo',
'PREFIX_FirstName': 'Tim',
'PREFIX_LastName': 'Bo',
'PREFIX_orgPersonType': 'SuperEmployee;BuildingX;'
}
CONTRACTOR_ASSERTION = {
'Email': '[email protected]',
'UserName': 'jsmith',
'FirstName': 'Jill',
'LastName': 'Smith',
'orgPersonType': 'Contractor;Non-Dev;'
}
ADMIN_ASSERTION = {
'Email': '[email protected]',
'UserName': 'bob',
'FirstName': 'Bob',
'LastName': 'Thompson',
'orgPersonType': 'Admin;Chief;'
}
CUSTOMER_ASSERTION = {
'Email': '[email protected]',
'UserName': 'bwilliams',
'FirstName': 'Beth',
'LastName': 'Williams',
'orgPersonType': 'Customer;'
}
TESTER_ASSERTION = {
'Email': '[email protected]',
'UserName': 'testacct',
'FirstName': 'Test',
'LastName': 'Account',
'orgPersonType': 'MadeupGroup;Tester;GroupX'
}
BAD_TESTER_ASSERTION = {
'Email': '[email protected]',
'UserName': 'Evil',
'FirstName': 'Test',
'LastName': 'Account',
'orgPersonType': 'Tester;'
}
MALFORMED_TESTER_ASSERTION = {
'Email': '[email protected]',
'UserName': 'testacct',
'FirstName': 'Test',
'LastName': 'Account',
'orgPersonType': 'Tester;',
'object': object(),
'dictionary': dict(zip('teststring', xrange(10))),
'tuple': tuple(xrange(5))
}
CONTRACTOR_MALFORMED_ASSERTION = {
'UserName': 'user',
'FirstName': object(),
'orgPersonType': 'Contractor'
}
| apache-2.0 |
sktjdgns1189/android_kernel_pantech_ef63l | scripts/build-all.py | 24 | 10334 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
perf_defconfig = msm8974_pantech_perf_defconfig
print 'Perf defconfig : %s' % perf_defconfig
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'PERF_DEFCONFIG=%s' % perf_defconfig,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
mgohde/nudge-tools | tools/dbtool.py | 1 | 4655 | #!/usr/bin/python
import os
import sys
# Because of course Python has XML parsing built in.
import xml.etree.ElementTree as ET
# dbtool.py -- Quick and dirty tool to insert XML formatted storylines into the database.
def nodesearch(nodename, node):
for n in node:
if n.tag==nodename:
return n
return None
def manynodesearch(nodename, node):
nlist=[]
for n in node:
if n.tag==nodename:
nlist.append(n)
return nlist
def sanitize(text):
"""Replaces all special characters in text with escaped characters."""
newstr=''
for c in text:
if c=='"' or c=="'" or c=='%' or c=='_':
newstr=newstr+'\\'
newstr=newstr+c
return newstr
def readstory(infile):
contents=infile.read()
curid=1
position=0;
resid=1
root=ET.fromstring(contents)
storytitle=root.attrib['title']
for subnode in root:
answerchoice='A'
snid=subnode.attrib['id']
textnode=nodesearch("text", subnode)
answernode=nodesearch("answers", subnode)
print "-- Generated statements for node: %s" % snid
# Check all destinations to see if there is an END node, and if there is, insert it into the rewards table.
for a in answernode:
destsearch=manynodesearch("dest", a)
for d in destsearch:
# The way nudge is currently implemented, rewards cannot be given out per user action, however
# this may change in the future, so it is beneficial to be able to include per-decision
# rewards now:
try:
numpoints=int(d.attrib['points'])
rewardname=d.attrib['reward']
rewardtext=d.attrib['rewardtext']
endname="e%d" % curid
print "INSERT INTO rewardss (reward, statement, points, end_id, end, storytitle) VALUES ('%s', '%s', %d, %d, '%s', '%s');" % (rewardname, rewardtext, numpoints, curid, endname, storytitle)
except:
pass
if(d.text=="END"):
position=1
print "INSERT INTO storytable VALUES (%d,'%s','%s','%s',%d);" % (curid, storytitle, snid, sanitize(textnode.text), position)
# This ensures that the story will have valid entry and exit points.
position=2
curid+=1
for a in answernode:
optiontextnode=nodesearch("text", a)
destsearch=manynodesearch("dest", a)
minprob=0
print "INSERT INTO answers VALUES ('%s','%s','%s','%s');" % (storytitle, snid, answerchoice, sanitize(optiontextnode.text))
for d in destsearch:
maxprob=minprob+int(d.attrib['p'])
print "INSERT INTO results VALUES (%d,'%s','%s','%s',%d,%d,'%s');" % (resid, storytitle, snid, answerchoice, minprob, maxprob, d.text)
minprob=minprob+int(d.attrib['p'])
resid+=1
answerchoice=chr(ord(answerchoice)+1)
def delstory(infile):
contents=infile.read()
curid=1
resid=1
root=ET.fromstring(contents)
storytitle=root.attrib['title']
print "-- Generated statements for story: %s" % storytitle
print "DELETE FROM storytable WHERE storytitle='%s';" % storytitle
print "DELETE FROM answers WHERE storytitle='%s';" % storytitle
print "DELETE FROM results WHERE storytitle='%s';" % storytitle
print "DELETE FROM rewardss WHERE storytitle='%s';" % storytitle
def printusage(progname):
print "Usage: %s [-d] [input filename]" % progname
print "Generate SQL statements to install or delete a storyline from a Nudge SQL database"
print "Generates statements to install if -d is not specified."
print ""
print "Arguments:"
print " -d\tGenerate statements to delete a storyline."
print "[input filename] may be blank. In this case, %s attempts to read a story from standard input." % progname
def main(args):
infile=sys.stdin
delete=False
# Insert arg handling fanciness here
if len(args)!=1:
if args[1]=='-d':
delete=True
if len(args)>1:
infile=open(args[2], 'r')
elif args[1]=='-h' or args[1]=='--h':
printusage(args[0])
return
else:
infile=open(args[1], 'r')
if not delete:
readstory(infile)
else:
delstory(infile)
if __name__=="__main__":
main(sys.argv)
| gpl-2.0 |
elgambitero/FreeCAD_sf_master | src/Mod/Path/InitGui.py | 16 | 6342 | #***************************************************************************
#* (c) Yorik van Havre ([email protected]) 2014 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************/
class PathWorkbench ( Workbench ):
"Path workbench"
Icon = """
/* XPM */
static char * Path_xpm[] = {
"16 16 9 1",
" c None",
". c #262623",
"+ c #452F16",
"@ c #525451",
"# c #7E5629",
"$ c #838582",
"% c #BE823B",
"& c #989A97",
"* c #CFD1CE",
" .@@@@@@@@@@. ",
" $**********$ ",
" @$$$&&&&$$$@ ",
" .$&&&&$. ",
" @******. ",
" @******. ",
" ...@@... ",
" .&&@. ",
" .@. . ",
" .&&. ",
" .$*$. ",
" .$. . ",
"+###+ .@&.+###+",
"+%%%+ .$$. +%%%+",
"+%%%%#.. .#%%%%+",
".++++++..++++++."};
"""
MenuText = "Path"
ToolTip = "Path workbench"
def Initialize(self):
# load the builtin modules
import Path
import PathGui
# load python modules
from PathScripts import PathProfile
from PathScripts import PathPocket
from PathScripts import PathDrilling
from PathScripts import PathDressup
from PathScripts import PathHop
from PathScripts import PathCopy
from PathScripts import PathFixture
from PathScripts import PathCompoundExtended
from PathScripts import PathProject
from PathScripts import PathToolTableEdit
from PathScripts import PathStock
from PathScripts import PathPlane
from PathScripts import PathPost
from PathScripts import PathToolLenOffset
from PathScripts import PathLoadTool
from PathScripts import PathComment
from PathScripts import PathStop
from PathScripts import PathMachine
from PathScripts import PathFromShape
from PathScripts import PathKurve
# build commands list
commands =["Path_Stock","Path_Plane","Path_Fixture","Path_ToolTableEdit","Path_Profile","Path_Kurve","Path_Pocket","Path_Drilling",\
"Path_Dressup","Path_Hop","Path_Shape","Path_Copy","Path_CompoundExtended","Path_Project"]
projcmdlist = ["Path_Project", "Path_ToolTableEdit","Path_Post"]
prepcmdlist = ["Path_Plane","Path_Fixture","Path_LoadTool","Path_ToolLenOffset","Path_Comment","Path_Stop"]
opcmdlist = ["Path_Profile","Path_Kurve","Path_Pocket","Path_Drilling","Path_FromShape"]
modcmdlist = ["Path_Copy","Path_CompoundExtended","Path_Dressup","Path_Hop"]
# Add commands to menu and toolbar
def QT_TRANSLATE_NOOP(scope, text): return text
# self.appendToolbar(QT_TRANSLATE_NOOP("PathWorkbench","Path"),commands)
self.appendToolbar(QT_TRANSLATE_NOOP("PathWorkbench","Commands for setting up Project"),projcmdlist)
self.appendToolbar(QT_TRANSLATE_NOOP("PathWorkbench","Prepatory Commands"),prepcmdlist)
self.appendToolbar(QT_TRANSLATE_NOOP("PathWorkbench","Operations"),opcmdlist)
self.appendToolbar(QT_TRANSLATE_NOOP("PathWorkbench","Commands for grouping,copying, and organizing operations"),modcmdlist)
# self.appendMenu(QT_TRANSLATE_NOOP("PathWorkbench","Path"),commands)
self.appendMenu([QT_TRANSLATE_NOOP("PathWorkbench","Path"),QT_TRANSLATE_NOOP("Path","Project Setup")],projcmdlist)
self.appendMenu([QT_TRANSLATE_NOOP("PathWorkbench","Path"),QT_TRANSLATE_NOOP("Path","Prepatory Commands")],prepcmdlist)
self.appendMenu([QT_TRANSLATE_NOOP("PathWorkbench","Path"),QT_TRANSLATE_NOOP("Path","New Operation")],opcmdlist)
self.appendMenu([QT_TRANSLATE_NOOP("PathWorkbench","Path"),QT_TRANSLATE_NOOP("Path","Path Modification")],modcmdlist)
# Add preferences pages
import os
FreeCADGui.addPreferencePage(FreeCAD.getHomePath()+os.sep+"Mod"+os.sep+"Path"+os.sep+"PathScripts"+os.sep+"DlgSettingsPath.ui","Path")
Log ('Loading Path workbench... done\n')
def GetClassName(self):
return "Gui::PythonWorkbench"
def Activated(self):
Msg("Path workbench activated\n")
def Deactivated(self):
Msg("Path workbench deactivated\n")
Gui.addWorkbench(PathWorkbench())
FreeCAD.addImportType("GCode (*.nc *.gc *.ncc *.ngc *.cnc *.tap)","PathGui")
FreeCAD.addExportType("GCode (*.nc *.gc *.ncc *.ngc *.cnc *.tap)","PathGui")
| lgpl-2.1 |
percy-g2/Novathor_xperia_u8500 | 6.2.A.1.100/external/webkit/Tools/Scripts/webkitpy/common/system/ospath_unittest.py | 15 | 2518 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for ospath.py."""
import os
import unittest
from webkitpy.common.system.ospath import relpath
# Make sure the tests in this class are platform independent.
class RelPathTest(unittest.TestCase):
"""Tests relpath()."""
os_path_abspath = lambda self, path: path
def _rel_path(self, path, abs_start_path):
return relpath(path, abs_start_path, self.os_path_abspath)
def test_same_path(self):
rel_path = self._rel_path("WebKit", "WebKit")
self.assertEquals(rel_path, "")
def test_long_rel_path(self):
start_path = "WebKit"
expected_rel_path = os.path.join("test", "Foo.txt")
path = os.path.join(start_path, expected_rel_path)
rel_path = self._rel_path(path, start_path)
self.assertEquals(expected_rel_path, rel_path)
def test_none_rel_path(self):
"""Test _rel_path() with None return value."""
start_path = "WebKit"
path = os.path.join("other_dir", "foo.txt")
rel_path = self._rel_path(path, start_path)
self.assertTrue(rel_path is None)
rel_path = self._rel_path("Tools", "WebKit")
self.assertTrue(rel_path is None)
| gpl-2.0 |
haxwithaxe/qutebrowser | qutebrowser/browser/history.py | 2 | 7443 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Simple history which gets written to disk."""
import time
import collections
from PyQt5.QtCore import pyqtSignal, QUrl
from PyQt5.QtWebKit import QWebHistoryInterface
from qutebrowser.utils import utils, objreg, standarddir, log
from qutebrowser.config import config
from qutebrowser.misc import lineparser
class HistoryEntry:
"""A single entry in the web history.
Attributes:
atime: The time the page was accessed.
url: The URL which was accessed as QUrl.
url_string: The URL which was accessed as string.
"""
def __init__(self, atime, url):
self.atime = float(atime)
self.url = QUrl(url)
self.url_string = url
def __repr__(self):
return utils.get_repr(self, constructor=True, atime=self.atime,
url=self.url.toDisplayString())
def __str__(self):
return '{} {}'.format(int(self.atime), self.url_string)
class WebHistory(QWebHistoryInterface):
"""A QWebHistoryInterface which supports being written to disk.
Attributes:
_lineparser: The AppendLineParser used to save the history.
_history_dict: An OrderedDict of URLs read from the on-disk history.
_new_history: A list of HistoryEntry items of the current session.
_saved_count: How many HistoryEntries have been written to disk.
_initial_read_started: Whether async_read was called.
_initial_read_done: Whether async_read has completed.
_temp_history: OrderedDict of temporary history entries before
async_read was called.
Signals:
add_completion_item: Emitted before a new HistoryEntry is added.
arg: The new HistoryEntry.
item_added: Emitted after a new HistoryEntry is added.
arg: The new HistoryEntry.
"""
add_completion_item = pyqtSignal(HistoryEntry)
item_added = pyqtSignal(HistoryEntry)
async_read_done = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self._initial_read_started = False
self._initial_read_done = False
self._lineparser = lineparser.AppendLineParser(
standarddir.data(), 'history', parent=self)
self._history_dict = collections.OrderedDict()
self._temp_history = collections.OrderedDict()
self._new_history = []
self._saved_count = 0
objreg.get('save-manager').add_saveable(
'history', self.save, self.item_added)
def __repr__(self):
return utils.get_repr(self, length=len(self))
def __getitem__(self, key):
return self._new_history[key]
def __iter__(self):
return iter(self._history_dict.values())
def __len__(self):
return len(self._history_dict)
def async_read(self):
"""Read the initial history."""
if self._initial_read_started:
log.init.debug("Ignoring async_read() because reading is started.")
return
self._initial_read_started = True
if standarddir.data() is None:
self._initial_read_done = True
self.async_read_done.emit()
return
with self._lineparser.open():
for line in self._lineparser:
yield
data = line.rstrip().split(maxsplit=1)
if not data:
# empty line
continue
elif len(data) != 2:
# other malformed line
log.init.warning("Invalid history entry {!r}!".format(
line))
continue
atime, url = data
if atime.startswith('\0'):
log.init.warning(
"Removing NUL bytes from entry {!r} - see "
"https://github.com/The-Compiler/qutebrowser/issues/"
"670".format(data))
atime = atime.lstrip('\0')
# This de-duplicates history entries; only the latest
# entry for each URL is kept. If you want to keep
# information about previous hits change the items in
# old_urls to be lists or change HistoryEntry to have a
# list of atimes.
entry = HistoryEntry(atime, url)
self._add_entry(entry)
self._initial_read_done = True
self.async_read_done.emit()
for url, entry in self._temp_history.items():
self._new_history.append(entry)
self._add_entry(entry)
self.add_completion_item.emit(entry)
def _add_entry(self, entry, target=None):
"""Add an entry to self._history_dict or another given OrderedDict."""
if target is None:
target = self._history_dict
target[entry.url_string] = entry
target.move_to_end(entry.url_string)
def get_recent(self):
"""Get the most recent history entries."""
old = self._lineparser.get_recent()
return old + [str(e) for e in self._new_history]
def save(self):
"""Save the history to disk."""
new = (str(e) for e in self._new_history[self._saved_count:])
self._lineparser.new_data = new
self._lineparser.save()
self._saved_count = len(self._new_history)
def addHistoryEntry(self, url_string):
"""Called by WebKit when an URL should be added to the history.
Args:
url_string: An url as string to add to the history.
"""
if not url_string:
return
if config.get('general', 'private-browsing'):
return
entry = HistoryEntry(time.time(), url_string)
if self._initial_read_done:
self.add_completion_item.emit(entry)
self._new_history.append(entry)
self._add_entry(entry)
self.item_added.emit(entry)
else:
self._add_entry(entry, target=self._temp_history)
def historyContains(self, url_string):
"""Called by WebKit to determine if an URL is contained in the history.
Args:
url_string: The URL (as string) to check for.
Return:
True if the url is in the history, False otherwise.
"""
return url_string in self._history_dict
def init(parent=None):
"""Initialize the web history.
Args:
parent: The parent to use for WebHistory.
"""
history = WebHistory(parent)
objreg.register('web-history', history)
QWebHistoryInterface.setDefaultInterface(history)
| gpl-3.0 |
parinporecha/backend_gtgonline | GTG/tests/test_urlregex.py | 1 | 1375 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2012 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
""" Tests for URL regex """
import unittest
from GTG.tools.urlregex import match
class TestURLRegex(unittest.TestCase):
""" Test extractor of URL from text """
def test_anchor_amperstand(self):
""" Reproducer for bug #1023555 """
url = "http://test.com/#hi&there"
self.assertEqual(match(url).group(0), url)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
| gpl-3.0 |
Nolski/airmozilla | airmozilla/manage/forms.py | 1 | 29803 | import re
import datetime
from collections import defaultdict
import dateutil.parser
import pytz
from django import forms
from django.db.models import Count
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.utils.timezone import utc
from django.utils.safestring import mark_safe
from funfactory.urlresolvers import reverse
from slugify import slugify
from airmozilla.base.forms import BaseModelForm, BaseForm
from airmozilla.manage import url_transformer
from airmozilla.main.models import (
Approval,
Event,
EventTweet,
Location,
Region,
Tag,
Template,
Channel,
SuggestedEvent,
SuggestedEventComment,
URLMatch,
EventAssignment,
LocationDefaultEnvironment,
RecruitmentMessage,
Picture,
Topic,
)
from airmozilla.comments.models import Discussion, Comment
from airmozilla.surveys.models import Question, Survey
from airmozilla.staticpages.models import StaticPage
from .widgets import PictureWidget
TIMEZONE_CHOICES = [(tz, tz.replace('_', ' ')) for tz in pytz.common_timezones]
ONE_HOUR = 60 * 60
class UserEditForm(BaseModelForm):
class Meta:
model = User
fields = ('is_active', 'is_staff', 'is_superuser', 'groups')
def clean(self):
cleaned_data = super(UserEditForm, self).clean()
is_active = cleaned_data.get('is_active')
is_staff = cleaned_data.get('is_staff')
is_superuser = cleaned_data.get('is_superuser')
groups = cleaned_data.get('groups')
if is_superuser and not is_staff:
raise forms.ValidationError('Superusers must be staff.')
if is_staff and not is_active:
raise forms.ValidationError('Staff must be active.')
if is_staff and not is_superuser and not groups:
raise forms.ValidationError(
'Non-superuser staff must belong to a group.'
)
return cleaned_data
class GroupEditForm(BaseModelForm):
def __init__(self, *args, **kwargs):
super(GroupEditForm, self).__init__(*args, **kwargs)
self.fields['name'].required = True
choices = self.fields['permissions'].choices
self.fields['permissions'] = forms.MultipleChoiceField(
choices=choices,
widget=forms.CheckboxSelectMultiple,
required=False
)
class Meta:
model = Group
class EventRequestForm(BaseModelForm):
tags = forms.CharField(required=False)
class Meta:
model = Event
widgets = {
'description': forms.Textarea(attrs={'rows': 4}),
'short_description': forms.Textarea(attrs={'rows': 2}),
'call_info': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'template_environment': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'remote_presenters': forms.Textarea(attrs={'rows': 3}),
'start_time': forms.DateTimeInput(format='%Y-%m-%d %H:%M'),
'estimated_duration': forms.widgets.Select(
choices=Event.ESTIMATED_DURATION_CHOICES
),
}
exclude = ('featured', 'status', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'placeholder_img', 'picture',
'description',
'short_description', 'location', 'start_time',
'estimated_duration',
'channels', 'tags', 'call_info',
'remote_presenters',
'additional_links', 'privacy', 'popcorn_url'
)
def __init__(self, *args, **kwargs):
super(EventRequestForm, self).__init__(*args, **kwargs)
self.fields['channels'].help_text = (
'<a href="%s" class="btn btn-default" target="_blank">'
'<i class="glyphicon glyphicon-plus-sign"></i>'
'New channel'
'</a>' % reverse('manage:channel_new'))
self.fields['placeholder_img'].label = 'Placeholder image'
if 'instance' in kwargs:
event = kwargs['instance']
approvals = event.approval_set.all()
self.initial['approvals'] = [app.group for app in approvals]
if event.location:
self.fields['start_time'].help_text = (
'Time zone of this date is that of {0}.'.format(
event.location.timezone
)
)
# when the django forms present the start_time form field,
# it's going to first change it to UTC, then strftime it
self.initial['start_time'] = (
event.location_time.replace(tzinfo=utc)
)
else:
self.fields['start_time'].help_text = (
'Since there is no location, time zone of this date '
' is UTC.'
)
if event.pk:
tags_formatted = ','.join(x.name for x in event.tags.all())
self.initial['tags'] = tags_formatted
def clean_tags(self):
tags = self.cleaned_data['tags']
split_tags = [t.strip() for t in tags.split(',') if t.strip()]
final_tags = []
for tag_name in split_tags:
try:
t = Tag.objects.get(name=tag_name)
except Tag.DoesNotExist:
try:
t = Tag.objects.get(name__iexact=tag_name)
except Tag.DoesNotExist:
t = Tag.objects.create(name=tag_name)
final_tags.append(t)
return final_tags
def clean_slug(self):
"""Enforce unique slug across current slugs and old slugs."""
slug = self.cleaned_data['slug']
if Event.objects.filter(slug=slug).exclude(pk=self.instance.id):
raise forms.ValidationError('This slug is already in use.')
return slug
@staticmethod
def _check_staticpage_slug(slug):
if StaticPage.objects.filter(url__startswith='/%s' % slug).count():
raise forms.ValidationError(
"The default slug for event would clash with an existing "
"static page with the same URL. It might destroy existing "
"URLs that people depend on."
)
def clean(self):
data = super(EventRequestForm, self).clean()
if data.get('title') and not data.get('slug'):
# this means you have submitted a form without being explicit
# about what the slug will be
self._check_staticpage_slug(slugify(data.get('title')).lower())
elif data.get('slug'):
# are you trying to change it?
if self.instance.slug != data['slug']:
# apparently, you want to change to a new slug
self._check_staticpage_slug(data['slug'])
return data
class EventEditForm(EventRequestForm):
approvals = forms.ModelMultipleChoiceField(
queryset=Group.objects.filter(permissions__codename='change_approval'),
required=False,
widget=forms.CheckboxSelectMultiple()
)
curated_groups = forms.CharField(
required=False,
help_text='Curated groups only matter if the event is open to'
' "%s".' % [x[1] for x in Event.PRIVACY_CHOICES
if x[0] == Event.PRIVACY_CONTRIBUTORS][0]
)
class Meta(EventRequestForm.Meta):
exclude = ('archive_time',)
# Fields specified to enforce order
fields = (
'title', 'slug', 'status', 'privacy', 'featured', 'template',
'template_environment', 'placeholder_img', 'picture',
'location',
'description', 'short_description', 'start_time',
'estimated_duration',
'archive_time',
'channels', 'tags',
'call_info', 'additional_links', 'remote_presenters',
'approvals',
'popcorn_url',
'pin',
'recruitmentmessage',
)
def __init__(self, *args, **kwargs):
super(EventEditForm, self).__init__(*args, **kwargs)
if 'pin' in self.fields:
self.fields['pin'].help_text = (
"Use of pins is deprecated. Use Curated groups instead."
)
self.fields['popcorn_url'].label = 'Popcorn URL'
if 'recruitmentmessage' in self.fields:
self.fields['recruitmentmessage'].required = False
self.fields['recruitmentmessage'].label = 'Recruitment message'
self.fields.keyOrder.pop(
self.fields.keyOrder.index('curated_groups')
)
self.fields.keyOrder.insert(
self.fields.keyOrder.index('privacy') + 1,
'curated_groups'
)
self.fields['location'].queryset = (
Location.objects.filter(is_active=True).order_by('name')
)
if self.instance and self.instance.id:
# Checking for id because it might be an instance but never
# been saved before.
self.fields['picture'].widget = PictureWidget(self.instance)
# make the list of approval objects depend on requested approvals
# print Group.approval_set.filter(event=self.instance)
group_ids = [
x[0] for x in
Approval.objects
.filter(event=self.instance).values_list('group')
]
self.fields['approvals'].queryset = Group.objects.filter(
id__in=group_ids
)
# If the event has a duration, it doesn't make sense to
# show the estimated_duration widget.
if self.instance.duration:
del self.fields['estimated_duration']
elif self.initial.get('picture'):
self.fields['picture'].widget = PictureWidget(
Picture.objects.get(id=self.initial['picture']),
editable=False
)
else:
# too early to associate with a picture
del self.fields['picture']
def clean_pin(self):
value = self.cleaned_data['pin']
if value and len(value) < 4:
raise forms.ValidationError("Pin too short to be safe")
return value
def clean(self):
cleaned_data = super(EventEditForm, self).clean()
if not (
cleaned_data.get('placeholder_img') or cleaned_data.get('picture')
):
raise forms.ValidationError("Must have a placeholder or a Picture")
return cleaned_data
class EventExperiencedRequestForm(EventEditForm):
class Meta(EventEditForm.Meta):
exclude = ('featured', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'status', 'privacy', 'template',
'template_environment', 'placeholder_img', 'picture',
'description',
'short_description', 'location', 'start_time',
'estimated_duration',
'channels', 'tags', 'call_info',
'additional_links', 'remote_presenters',
'approvals', 'pin', 'popcorn_url', 'recruitmentmessage'
)
class EventArchiveForm(BaseModelForm):
class Meta(EventRequestForm.Meta):
exclude = ()
fields = ('template', 'template_environment')
class EventArchiveTimeForm(BaseModelForm):
class Meta(EventRequestForm.Meta):
exclude = ()
fields = ('archive_time',)
def __init__(self, *args, **kwargs):
super(EventArchiveTimeForm, self).__init__(*args, **kwargs)
self.fields['archive_time'].help_text = (
"Input timezone is <b>UTC</b>"
)
if self.initial['archive_time']:
# Force it to a UTC string so Django doesn't convert it
# to a timezone-less string in the settings.TIME_ZONE timezone.
self.initial['archive_time'] = (
self.initial['archive_time'].strftime('%Y-%m-%d %H:%M:%S')
)
def clean_archive_time(self):
value = self.cleaned_data['archive_time']
# force it back to UTC
if value:
value = value.replace(tzinfo=utc)
return value
class EventTweetForm(BaseModelForm):
class Meta:
model = EventTweet
fields = (
'text',
'include_placeholder',
'send_date',
)
widgets = {
'text': forms.Textarea(attrs={
'autocomplete': 'off',
'data-maxlength': 140,
'rows': 2,
})
}
def __init__(self, event, *args, **kwargs):
super(EventTweetForm, self).__init__(*args, **kwargs)
self.fields['text'].help_text = (
'<b class="char-counter">140</b> characters left. '
'<span class="char-counter-warning"><b>Note!</b> Sometimes '
'Twitter can count it as longer than it appears if you '
'include a URL. '
'It\'s usually best to leave a little room.</span>'
)
# it's a NOT NULL field but it defaults to NOW()
# in the views code
self.fields['send_date'].required = False
if event.tags.all():
def pack_tags(tags):
return '[%s]' % (','.join('"%s"' % x for x in tags))
self.fields['text'].help_text += (
'<br><a href="#" class="include-event-tags" '
'data-tags=\'%s\'>include all event tags</a>'
% pack_tags([x.name for x in event.tags.all()])
)
if event.placeholder_img or event.picture:
from airmozilla.main.helpers import thumbnail
if event.picture:
pic = event.picture.file
else:
pic = event.placeholder_img
thumb = thumbnail(pic, '160x90', crop='center')
self.fields['include_placeholder'].help_text = (
'<img src="%(url)s" alt="placeholder" class="thumbnail" '
'width="%(width)s" width="%(height)s">' %
{
'url': thumb.url,
'width': thumb.width,
'height': thumb.height
}
)
else:
del self.fields['include_placeholder']
if event.location:
self.fields['send_date'].help_text = (
'Timezone is %s' % event.location.timezone
)
class ChannelForm(BaseModelForm):
class Meta:
model = Channel
exclude = ('created',)
def __init__(self, *args, **kwargs):
super(ChannelForm, self).__init__(*args, **kwargs)
self.fields['parent'].required = False
if kwargs.get('instance'):
self.fields['parent'].choices = [
(x, y) for (x, y)
in self.fields['parent'].choices
if x != kwargs['instance'].pk
]
def clean(self):
cleaned_data = super(ChannelForm, self).clean()
if 'always_show' in cleaned_data and 'never_show' in cleaned_data:
# if one is true, the other one can't be
if cleaned_data['always_show'] and cleaned_data['never_show']:
raise forms.ValidationError(
"Can't both be on always and never shown"
)
return cleaned_data
class TemplateEditForm(BaseModelForm):
class Meta:
model = Template
widgets = {
'content': forms.Textarea(attrs={'rows': 20})
}
class TemplateMigrateForm(BaseForm):
template = forms.ModelChoiceField(
widget=forms.widgets.RadioSelect(),
queryset=Template.objects.all()
)
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance')
super(TemplateMigrateForm, self).__init__(*args, **kwargs)
scheduled = defaultdict(int)
removed = defaultdict(int)
events = Event.objects.all()
for each in events.values('template').annotate(Count('template')):
scheduled[each['template']] = each['template__count']
events = events.filter(status=Event.STATUS_REMOVED)
for each in events.values('template').annotate(Count('template')):
removed[each['template']] = each['template__count']
choices = [('', '---------')]
other_templates = Template.objects.exclude(id=self.instance.id)
for template in other_templates.order_by('name'):
choices.append((
template.id,
'{0} ({1} events, {2} removed)'.format(
template.name,
scheduled[template.id],
removed[template.id],
)
))
self.fields['template'].choices = choices
class RecruitmentMessageEditForm(BaseModelForm):
class Meta:
model = RecruitmentMessage
widgets = {
'notes': forms.Textarea(attrs={'rows': 3})
}
exclude = ('modified_user', 'created')
class SurveyEditForm(BaseModelForm):
class Meta:
model = Survey
exclude = ('created', 'modified')
def __init__(self, *args, **kwargs):
super(SurveyEditForm, self).__init__(*args, **kwargs)
self.fields['active'].validators.append(self.validate_active)
self.fields['events'].required = False
self.fields['events'].queryset = (
self.fields['events'].queryset.order_by('title')
)
def validate_active(self, value):
if value and not self.instance.question_set.count():
raise forms.ValidationError(
"Survey must have at least one question in order to be active"
)
class SurveyNewForm(BaseModelForm):
class Meta:
model = Survey
fields = ('name', )
class LocationEditForm(BaseModelForm):
timezone = forms.ChoiceField(choices=TIMEZONE_CHOICES)
def __init__(self, *args, **kwargs):
super(LocationEditForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
initial = kwargs['instance'].timezone
else:
initial = settings.TIME_ZONE
self.initial['timezone'] = initial
class Meta:
model = Location
class LocationDefaultEnvironmentForm(BaseModelForm):
class Meta:
model = LocationDefaultEnvironment
fields = ('privacy', 'template', 'template_environment')
class RegionEditForm(BaseModelForm):
class Meta:
model = Region
class TopicEditForm(BaseModelForm):
class Meta:
model = Topic
def __init__(self, *args, **kwargs):
super(TopicEditForm, self).__init__(*args, **kwargs)
self.fields['topic'].widget = forms.widgets.TextInput(attrs={
'placeholder': 'for example Partners for Firefox OS'
})
class ApprovalForm(BaseModelForm):
class Meta:
model = Approval
fields = ('comment',)
widgets = {
'comment': forms.Textarea(attrs={'rows': 3})
}
class HeadersField(forms.CharField):
widget = forms.widgets.Textarea
def __init__(self, *args, **kwargs):
super(HeadersField, self).__init__(*args, **kwargs)
self.help_text = self.help_text or mark_safe(
"For example <code>Content-Type: text/xml</code>"
)
def to_python(self, value):
if not value:
return {}
headers = {}
for line in [x.strip() for x in value.splitlines() if x.strip()]:
try:
key, value = line.split(':', 1)
except ValueError:
raise forms.ValidationError(line)
headers[key.strip()] = value.strip()
return headers
def prepare_value(self, value):
if isinstance(value, basestring):
# already prepared
return value
elif value is None:
return ''
out = []
for key in sorted(value):
out.append('%s: %s' % (key, value[key]))
return '\n'.join(out)
def widget_attrs(self, widget):
attrs = super(HeadersField, self).widget_attrs(widget)
if 'rows' not in attrs:
attrs['rows'] = 3
return attrs
class StaticPageEditForm(BaseModelForm):
headers = HeadersField(required=False)
class Meta:
model = StaticPage
fields = (
'url',
'title',
'content',
'privacy',
'template_name',
'allow_querystring_variables',
'headers',
)
def __init__(self, *args, **kwargs):
super(StaticPageEditForm, self).__init__(*args, **kwargs)
self.fields['url'].label = 'URL'
self.fields['template_name'].label = 'Template'
choices = (
('', 'Default'),
('staticpages/nosidebar.html', 'Default (but no sidebar)'),
('staticpages/blank.html', 'Blank (no template wrapping)'),
)
self.fields['template_name'].widget = forms.widgets.Select(
choices=choices
)
def clean_url(self):
value = self.cleaned_data['url']
if value.startswith('sidebar'):
# expect it to be something like
# 'sidebar_bottom_how-tos'
try:
__, __, channel_slug = value.split('_', 2)
except ValueError:
raise forms.ValidationError(
"Must be format like `sidebar_bottom_channel-slug`"
)
try:
Channel.objects.get(slug=channel_slug)
except Channel.DoesNotExist:
raise forms.ValidationError(
"No channel slug found called `%s`" % channel_slug
)
return value
def clean(self):
cleaned_data = super(StaticPageEditForm, self).clean()
if 'url' in cleaned_data and 'privacy' in cleaned_data:
if cleaned_data['url'].startswith('sidebar_'):
if cleaned_data['privacy'] != Event.PRIVACY_PUBLIC:
raise forms.ValidationError(
"If a sidebar the privacy must be public"
)
return cleaned_data
class VidlyURLForm(forms.Form):
url = forms.CharField(
required=True,
label='URL',
widget=forms.widgets.TextInput(attrs={
'placeholder': 'E.g. http://videos.mozilla.org/.../file.flv',
'class': 'input-xxlarge',
})
)
token_protection = forms.BooleanField(required=False)
hd = forms.BooleanField(required=False, label='HD')
def __init__(self, *args, **kwargs):
disable_token_protection = kwargs.pop(
'disable_token_protection',
False
)
super(VidlyURLForm, self).__init__(*args, **kwargs)
if disable_token_protection:
self.fields['token_protection'].widget.attrs['disabled'] = (
'disabled'
)
self.fields['token_protection'].required = True
self.fields['token_protection'].help_text = (
'Required for non-public events'
)
def clean_url(self):
# annoyingly, we can't use forms.URLField since it barfs on
# Basic Auth urls. Instead, let's just make some basic validation
# here
value = self.cleaned_data['url']
if ' ' in value or '://' not in value:
raise forms.ValidationError('Not a valid URL')
value, error = url_transformer.run(value)
if error:
raise forms.ValidationError(error)
return value
class EventsAutocompleteForm(BaseForm):
q = forms.CharField(required=True, max_length=200)
max = forms.IntegerField(required=False, min_value=1, max_value=20)
class AcceptSuggestedEventForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('review_comments',)
widgets = {
'review_comments': forms.Textarea(attrs={'rows': 3})
}
class TagEditForm(BaseModelForm):
class Meta:
model = Tag
class TagMergeForm(BaseForm):
name = forms.ChoiceField(
label='Name to keep',
widget=forms.widgets.RadioSelect()
)
def __init__(self, name, *args, **kwargs):
super(TagMergeForm, self).__init__(*args, **kwargs)
def describe_tag(tag):
count = Event.objects.filter(tags=tag).count()
if count == 1:
tmpl = '%s (%d time)'
else:
tmpl = '%s (%d times)'
return tmpl % (tag.name, count)
self.fields['name'].choices = [
(x.name, describe_tag(x))
for x in Tag.objects.filter(name__iexact=name)
]
class VidlyResubmitForm(VidlyURLForm):
id = forms.IntegerField(widget=forms.widgets.HiddenInput())
class URLMatchForm(BaseModelForm):
class Meta:
model = URLMatch
exclude = ('use_count',)
def clean_name(self):
name = self.cleaned_data['name'].strip()
if URLMatch.objects.filter(name__iexact=name):
raise forms.ValidationError("URL matcher name already in use")
return name
def clean_string(self):
string = self.cleaned_data['string']
try:
re.compile(string)
except Exception as e:
raise forms.ValidationError(e)
return string
class SuggestedEventCommentForm(BaseModelForm):
class Meta:
model = SuggestedEventComment
fields = ('comment',)
widgets = {
'comment': forms.Textarea(attrs={'rows': 3})
}
class DiscussionForm(BaseModelForm):
class Meta:
model = Discussion
fields = ('enabled', 'closed', 'moderate_all', 'notify_all',
'moderators')
class CommentEditForm(BaseModelForm):
class Meta:
model = Comment
fields = ('status', 'comment', 'flagged')
class CommentsFilterForm(BaseForm):
user = forms.CharField(required=False)
comment = forms.CharField(required=False)
status = forms.ChoiceField(
required=False,
choices=(
(('', 'ALL'),) + Comment.STATUS_CHOICES + (('flagged', 'Flagged'),)
)
)
class CommentsFilterForm(CommentsFilterForm):
event = forms.CharField(required=False)
class EventAssignmentForm(BaseModelForm):
class Meta:
model = EventAssignment
fields = ('locations', 'users')
def __init__(self, *args, **kwargs):
super(EventAssignmentForm, self).__init__(*args, **kwargs)
users = (
User.objects
.extra(select={
'email_lower': 'LOWER(email)'
})
.filter(is_active=True, is_staff=True)
.order_by('email_lower')
)
def describe_user(user):
ret = user.email
if user.first_name or user.last_name:
name = (user.first_name + ' ' + user.last_name).strip()
ret += ' (%s)' % name
return ret
self.fields['users'].choices = [
(x.pk, describe_user(x)) for x in users
]
self.fields['users'].required = False
self.fields['users'].help_text = 'Start typing to find users.'
locations = (
Location.objects.filter(is_active=True)
.order_by('name')
)
if self.instance.event.location:
locations = locations.exclude(pk=self.instance.event.location.pk)
self.fields['locations'].choices = [
(x.pk, x.name) for x in locations
]
self.fields['locations'].required = False
self.fields['locations'].help_text = 'Start typing to find locations.'
class EventTranscriptForm(BaseModelForm):
class Meta:
model = Event
fields = ('transcript', )
class QuestionForm(BaseModelForm):
class Meta:
model = Question
fields = ('question',)
class EventSurveyForm(BaseForm):
survey = forms.ChoiceField(
widget=forms.widgets.RadioSelect()
)
def __init__(self, *args, **kwargs):
super(EventSurveyForm, self).__init__(*args, **kwargs)
def describe_survey(survey):
output = survey.name
if not survey.active:
output += ' (not active)'
count_questions = Question.objects.filter(survey=survey).count()
if count_questions == 1:
output += ' (1 question)'
else:
output += ' (%d questions)' % count_questions
return output
self.fields['survey'].choices = [
('0', 'none')
] + [
(x.id, describe_survey(x)) for x in Survey.objects.all()
]
class PictureForm(BaseModelForm):
class Meta:
model = Picture
fields = ('file', 'notes', 'default_placeholder')
class AutocompeterUpdateForm(BaseForm):
verbose = forms.BooleanField(required=False)
max_ = forms.IntegerField(required=False)
all = forms.BooleanField(required=False)
flush_first = forms.BooleanField(required=False)
since = forms.IntegerField(
required=False,
help_text="Minutes since last modified"
)
def clean_since(self):
value = self.cleaned_data['since']
if value:
print "Minutes", int(value)
value = datetime.timedelta(minutes=int(value))
return value
class ISODateTimeField(forms.DateTimeField):
def strptime(self, value, __):
return dateutil.parser.parse(value)
class EventsDataForm(BaseForm):
since = ISODateTimeField(required=False)
| bsd-3-clause |
catapult-project/catapult | third_party/google-endpoints/future/types/newlist.py | 82 | 2284 | """
A list subclass for Python 2 that behaves like Python 3's list.
The primary difference is that lists have a .copy() method in Py3.
Example use:
>>> from builtins import list
>>> l1 = list() # instead of {} for an empty list
>>> l1.append('hello')
>>> l2 = l1.copy()
"""
import sys
import copy
from future.utils import with_metaclass
from future.types.newobject import newobject
_builtin_list = list
ver = sys.version_info[:2]
class BaseNewList(type):
def __instancecheck__(cls, instance):
if cls == newlist:
return isinstance(instance, _builtin_list)
else:
return issubclass(instance.__class__, cls)
class newlist(with_metaclass(BaseNewList, _builtin_list)):
"""
A backport of the Python 3 list object to Py2
"""
def copy(self):
"""
L.copy() -> list -- a shallow copy of L
"""
return copy.copy(self)
def clear(self):
"""L.clear() -> None -- remove all items from L"""
for i in range(len(self)):
self.pop()
def __new__(cls, *args, **kwargs):
"""
list() -> new empty list
list(iterable) -> new list initialized from iterable's items
"""
if len(args) == 0:
return super(newlist, cls).__new__(cls)
elif type(args[0]) == newlist:
value = args[0]
else:
value = args[0]
return super(newlist, cls).__new__(cls, value)
def __add__(self, value):
return newlist(super(newlist, self).__add__(value))
def __radd__(self, left):
" left + self "
try:
return newlist(left) + self
except:
return NotImplemented
def __getitem__(self, y):
"""
x.__getitem__(y) <==> x[y]
Warning: a bug in Python 2.x prevents indexing via a slice from
returning a newlist object.
"""
if isinstance(y, slice):
return newlist(super(newlist, self).__getitem__(y))
else:
return super(newlist, self).__getitem__(y)
def __native__(self):
"""
Hook for the future.utils.native() function
"""
return list(self)
def __nonzero__(self):
return len(self) > 0
__all__ = ['newlist']
| bsd-3-clause |
michaelhowden/eden | controllers/vulnerability.py | 6 | 86050 | # -*- coding: utf-8 -*-
"""
Sahana Eden Vulnerability Controller
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# @ToDo: deployment_setting
#countries = ["TL", "VN"]
countries = ["VN"]
#DEBUG = True
#s3_debug = s3base.s3_debug
# -----------------------------------------------------------------------------
def init():
"""
Create the static GeoJSONs that the app needs
"""
gis.export_admin_areas(countries)
return "complete"
# -----------------------------------------------------------------------------
def index():
""" Module Home Page: Map """
# This module uses it's own Theme
settings.base.theme = "Vulnerability"
# Additional scripts
append = s3.scripts.append
append("/%s/static/scripts/yepnope.1.5.4-min.js" % appname)
append("/%s/static/scripts/jit/jit-yc.js" % appname)
append("/%s/static/scripts/S3/s3.gis.loader.js" % appname)
if s3.debug:
append("/%s/static/themes/Vulnerability/js/jquery.ui.fnselectmenu.js" % appname)
append("/%s/static/themes/Vulnerability/js/TypeHelpers.js" % appname)
#append("/%s/static/scripts/ui/progressbar.js" % appname)
append("/%s/static/themes/Vulnerability/js/s3.vulnerability.js" % appname)
append("/%s/static/themes/Vulnerability/js/s3.reports.js" % appname)
append("/%s/static/themes/Vulnerability/js/s3.analysis.js" % appname)
append("/%s/static/themes/Vulnerability/js/s3.treemap.js" % appname)
append("/%s/static/scripts/jquery.dataTables.js" % appname)
append("/%s/static/scripts/jquery.dataTables.fnSetFilteringDelay.js" % appname)
append("/%s/static/scripts/S3/s3.dataTables.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.fillbetween.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.crosshair.js" % appname)
else:
append("/%s/static/themes/Vulnerability/js/s3.vulnerability.min.js" % appname)
append("/%s/static/scripts/S3/s3.dataTables.min.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.min.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.fillbetween.min.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.crosshair.min.js" % appname)
js_global = []
append = js_global.append
# i18n
i18n = "\n".join((
"i18n.all='%s'" % T("All"),
"i18n.gis_requires_login='%s'" % T("Requires Login"),
"i18n.no_matching_result='%s'" % T("No matching result"),
"i18n.no_entries_found='%s'" % T("No Entries Found"),
"i18n.loading_report_details='%s'" % T("Loading report details"),
"i18n.choose='%s'" % T("Choose"),
"i18n.population='%s'" % T("Population"),
"i18n.reported='%s'" % T("Reported"),
"i18n.country='%s'" % COUNTRY,
"i18n.country_in='%s'" % T("Country in"),
"i18n.select_country='%s'" % T("Select a Country"),
"i18n.show_more='%s'" % T("Show more"),
"i18n.show_less='%s'" % T("Show less"),
"i18n.submit_data='%s'" % T("Submit Data"),
"i18n.analysis='%s'" % T("Analysis"),
"i18n.reports='%s'" % T("Reports"),
"i18n.all_reports='%s'" % T("All reports"),
"i18n.my_reports='%s'" % T("My reports"),
"i18n.approval_request_submitted='%s'" % T("Approval request submitted"),
"i18n.thankyou_for_your_approval='%s'" % T("Thank you for your approval"),
"i18n.reject_request_submitted='%s'" % T("Reject request submitted"),
"i18n.submission_has_been_declined='%s'" % T("Thank you, the submission%(br)shas been declined") % dict(br="<br />"),
"i18n.last_data_collected_on='%s'" % T("Last Data Collected on"),
"i18n.by='%s'" % T("by"),
"i18n.in_='%s'" % T("in"),
"i18n.in_this='%s'" % T("in this"),
"i18n.of='%s'" % T("of"),
"i18n.out_of='%s'" % T("out of"),
"i18n.review='%s'" % T("Review"),
"i18n.submitted_by='%s'" % T("submitted by"),
"i18n.go_to_the='%s'" % T("Go to the"),
"i18n.select_data_type='%s'" % T("Select data type"),
"i18n.about_to_submit_indicator_ratings='%s'" % T("You are about to submit indicator ratings for"),
"i18n.poor='%s'" % T("poor"),
"i18n.fair='%s'" % T("fair"),
"i18n.moderate='%s'" % T("moderate"),
"i18n.strong='%s'" % T("strong"),
"i18n.data_quality='%s'" % T("Data Quality"),
"i18n.of_total_data_reported='%s'" % T("of total data reported"),
"i18n.uploading_report_details='%s'" % T("Uploading report details"),
"i18n.upload_successful='%s'" % T("Upload successful"),
"i18n.no_data='%s'" % T("No Data"),
"i18n.extrapolated='%s'" % T("Extrapolated"),
"\n",
))
append(i18n)
append(s3base.S3DataTable.i18n())
# Save data in the session for later
table = s3db.vulnerability_aggregated_indicator
query = (table.uuid == "Resilience")
result = db(query).select(table.parameter_id, limitby=(0, 1))
if result:
session.s3.resilience_id = result.first().parameter_id
dtable = s3db.stats_demographic
query = (dtable.name == "Population")
result = db(query).select(dtable.parameter_id, limitby=(0, 1))
if result:
session.s3.population_id = result.first().parameter_id
# Get the list of indicators
itable = db.vulnerability_indicator
rows = db(itable.deleted == False).select(itable.name,
itable.description,
itable.parameter_id,
orderby=itable.posn)
pids = []
pappend = pids.append
indicators = OrderedDict()
count = 1
for row in rows:
pappend(row.parameter_id)
indicators[count] = dict(i=row.parameter_id,
n=row.name,
d=row.description)
count += 1
append('''\nidata=%s''' % json.dumps(indicators))
session.s3.indicator_pids = pids
# Get the L0 hdata & summary vdata
hdata, vdata = l0()
# Get the default location to open the map
bounds = None
root_org = auth.root_org()
start = False
if root_org:
otable = s3db.org_organisation
ttable = s3db.gis_location_tag
gtable = db.gis_location
query = (otable.id == root_org) & \
(ttable.tag == "ISO2") & \
(ttable.value == otable.country)
r = db(query).select(ttable.location_id,
limitby=(0, 1)).first()
if r and r.location_id in countries:
start = True
append('''\nstart=%s''' % r.location_id)
# Add the child L1 summary vdata
l1(r.location_id, vdata)
if not start:
append('''\nstart=""''')
dumps = json.dumps
script = '''
hdata=%s
vdata=%s''' % (dumps(hdata), dumps(vdata))
append(script)
s3.js_global.append("".join(js_global))
# Reports
# These get pulled-in via AJAX
# from s3.s3data import S3DataTable
# resource = s3db.resource("vulnerability_document")
# list_fields = ["id",
# "date",
# "location_id",
# "location_id$L2",
# "source_id"
# "document_type",
# "created_by",
# "approved_by",
# ]
# rfields = resource.resolve_selectors(list_fields)[0]
# filteredrows = resource.count()
# dt = S3DataTable(rfields, [], orderby=~s3db.vulnerability_document.date)
# level_1_titles = [["Approval pending", T("Approval pending")],
# ["VCA Report", T("VCA Report")],
# ["Report", T("Report")],
# ]
# report = dt.html(filteredrows,
# filteredrows,
# "report",
# dt_pagination = "false",
# dt_searching = "false",
# dt_dom = "t",
# dt_group = [4, 3],
# dt_group_totals = [level_1_titles],
# dt_ajax_url = URL(c="vulnerability",
# f="report",
# extension="aadata",
# vars={"id": "report"},
# ),
# dt_action_col = -1,
# dt_group_space = "true",
# dt_shrink_groups = "accordion",
# dt_group_types = ["text", "none"],
# )
# s3.report = report
# TreeMap
s3.stylesheets.append("jit/base.css")
user = auth.user
if user:
user_name = "%s %s" % (user.first_name, user.last_name)
else:
user_name = ""
today = request.utcnow.strftime("%d-%b-%y")
response.view = "vulnerability/map.html"
return dict(indicators=indicators,
user_name = user_name,
today = today,
COUNTRY = COUNTRY.upper(),
CHOOSE_COUNTRY = T("Choose Country"))
# -----------------------------------------------------------------------------
def l0():
"""
Return hdata (Hierarchy Labels) & summary vdata (Resilience) for all Countries
- used only by the initial map load
"""
gtable = db.gis_location
ttable = s3db.gis_location_tag
htable = s3db.gis_hierarchy
query = (gtable.id == ttable.location_id) & \
(ttable.tag == "ISO2") & \
(ttable.value.belongs(countries)) & \
(gtable.id == htable.location_id)
atable = s3db.vulnerability_aggregate
lquery = (atable.parameter_id == session.s3.resilience_id) & \
(atable.agg_type == 4) & \
(atable.location_id == gtable.id)
left = atable.on(lquery)
hdata = {}
vdata = {}
ids = []
append = ids.append
rows = db(query).select(gtable.id,
gtable.name,
htable.L1,
htable.L2,
htable.L3,
#htable.L4,
#atable.date,
atable.median,
orderby=~atable.date,
left=left)
for row in rows:
id = row[gtable].id
if id in ids:
# We're only interested in the most recent data per location
continue
append(id)
_grow = row[gtable]
_hrow = row[htable]
hdata[id] = dict(l1 = _hrow.L1,
l2 = _hrow.L2,
l3 = _hrow.L3,
#l4 = _hrow.L4,
)
median = row[atable].median
if median is None:
resilience = 0
else:
resilience = int(round(median, 0))
vdata[id] = dict(r = resilience,
n = _grow.name,
l = 0,
)
return hdata, vdata
# -----------------------------------------------------------------------------
def l1(id, vdata):
"""
Update summary vdata (Resilience) for all child L1s of the start country
- used only by the initial map load
"""
gtable = db.gis_location
# @ToDo: Filter by Date not just filter-out old locations
query = (gtable.parent == id) & \
(gtable.level == "L1") & \
(gtable.end_date == None)
aitable = db.vulnerability_aggregated_indicator
atable = db.vulnerability_aggregate
rquery = (aitable.name == "Resilience") & \
(atable.parameter_id == aitable.parameter_id) & \
(atable.agg_type == 4)
rows = db(query).select(gtable.id,
gtable.name,
)
for row in rows:
query = rquery & (atable.location_id == row.id)
_row = db(query).select(#atable.date,
atable.median,
orderby=~atable.date).first()
resilience = 0
if _row and _row.median is not None:
resilience = int(round(_row.median, 0))
vdata[row.id] = dict(r = resilience,
n = row.name,
l = 1,
f = id,
)
return
# -----------------------------------------------------------------------------
def vdata():
"""
Return JSON of the Vulnerability data for a location
- for display in Map Popups and the Drawer
vdata = { id : {
'n' : name,
'l' : level,
'f' : parent,
'r' : resilience,
'i' : indicator data,
'c' : count (how many L3s reported in this region),
't' : count (how many L3s total in this region),
'q' : quality,
'p' : population,
's' : source (for population),
'b' : population breakdown (for L3s),
'd' : date last collected (for L3s),
'w' : collected by (for L3s),
'm' : images (for L3s),
}
}
"""
try:
id = request.args[0]
except:
raise HTTP(400)
#if DEBUG:
# start = datetime.datetime.now()
gtable = s3db.gis_location
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("LocationModel load completed in %s seconds" % duration)
# start = datetime.datetime.now()
query = (gtable.id == id)
location = db(query).select(gtable.name,
gtable.level,
gtable.parent,
gtable.L0,
gtable.L1,
gtable.L2,
#gtable.L3,
limitby=(0, 1)).first()
if not location or not location.level:
return ""
script = ""
level = location.level
data = dict(n = location.name,
l = int(level[1]),
f = location.parent,
)
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("Query 1 (location lookup) completed in %s seconds" % duration)
# start = datetime.datetime.now()
# Represent numbers in the correct format
nrepresent = IS_INT_AMOUNT().represent
vdata = {}
atable = s3db.vulnerability_aggregate
resilience_id = session.s3.resilience_id
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("StatsModel load completed in %s seconds" % duration)
# start = datetime.datetime.now()
if level != "L3":
# We need to read the ids, names & resiliences of the next level down for the selectmenu styling of the dropdown
_level = int(level[1]) + 1
# @ToDo: Filter by Date not just filter-out old locations
query = (gtable.parent == id) & \
(gtable.level == "L%s" % _level) & \
(gtable.deleted == False) & \
(gtable.end_date == None)
lquery = (atable.parameter_id == resilience_id) & \
(atable.agg_type == 4) & \
(atable.end_date == None) & \
(atable.location_id == gtable.id)
left = atable.on(lquery)
rows = db(query).select(gtable.id,
gtable.name,
#atable.date,
atable.median,
#atable.ward_count,
#atable.reported_count,
left=left)
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("Query 2 (next level down) completed in %s seconds" % duration)
# start = datetime.datetime.now()
for row in rows:
grow = row[gtable]
median = row[atable].median
if median is None:
resilience = 0
else:
resilience = int(round(median, 0))
vdata[grow.id] = dict(r = resilience,
n = grow.name,
l = _level,
f = id,
)
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("Query 2 (row in rows) completed in %s seconds" % duration)
# start = datetime.datetime.now()
else:
# We are an L3 already
# Last Data Collected on d by w
utable = auth.settings.table_user
vtable = s3db.vulnerability_data
query = (vtable.location_id == id)
left = utable.on(utable.id == vtable.created_by)
row = db(query).select(vtable.date,
utable.first_name,
utable.last_name,
orderby=~vtable.date,
left=left,
limitby=(0, 1)).first()
if row:
data["d"] = row[vtable].date.isoformat()
user = row[utable]
data["w"] = "%s %s" % (user.first_name, user.last_name)
else:
data["d"] = ""
data["w"] = ""
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("Query 2 (last data collection) completed in %s seconds" % duration)
# start = datetime.datetime.now()
# Get the Resilience
query = (atable.parameter_id == resilience_id) & \
(atable.agg_type == 4) & \
(atable.end_date == None) & \
(atable.location_id == id)
r = db(query).select(atable.date,
atable.median,
atable.ward_count,
atable.reported_count,
# Should be only one with end_date == None
#orderby=~atable.date,
limitby=(0, 1)).first()
if not r or r.median is None:
data["r"] = 0
if level != "L3":
data["c"] = 0
data["q"] = "p"
# Total number of L3s in this region
data["t"] = nrepresent(len(gis.get_children(id, level="L3")))
else:
data["r"] = int(round(r.median, 0))
# How many L3s have reported?
reported_count = r.reported_count
data["c"] = nrepresent(reported_count)
# Total number of L3s in this region
ward_count = r.ward_count
data["t"] = nrepresent(ward_count)
if level != "L3":
# Calculate Quality
if reported_count == 0 or ward_count == 0:
q = "p"
else:
q = reported_count / ward_count
if q < 0.25:
q = "p"
elif q < 0.50:
q = "f"
elif q < 0.75:
q = "m"
else:
q = "s"
data["q"] = q
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("Query 3 (resilience) completed in %s seconds" % duration)
# start = datetime.datetime.now()
# Get the aggregated data for this location for all indicators
query = (atable.location_id == id) & \
(atable.parameter_id.belongs(session.s3.indicator_pids))
rows = db(query).select(atable.parameter_id,
atable.min,
atable.max,
atable.median,
orderby=~atable.date,
)
indicator_data = {}
pids = []
pappend = pids.append
for row in rows:
pid = row.parameter_id
if pid in pids:
# We're only interested in the most recent data per indicator
continue
pappend(pid)
indicator_data[pid] = dict(min = row.min,
max = row.max,
med = row.median,
)
data["i"] = indicator_data
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("Query 4 (indicators) completed in %s seconds" % duration)
# start = datetime.datetime.now()
# Get the Demographic data for the location
ddtable = s3db.stats_demographic_data
if level != "L3":
# Just Population
p = None
if level != "L2":
# Lookup direct
query = (ddtable.location_id == id) & \
(ddtable.parameter_id == session.s3.population_id)
row = db(query).select(ddtable.value,
orderby=~ddtable.date,
limitby=(0, 1)).first()
if row:
p = row.value
if not p:
# Fallback to an aggregate
# @ToDo: mark this in some way - either '> p' or else '~p' by averaging from the data that we do have
atable = s3db.stats_demographic_aggregate
query = (atable.agg_type == 2) & \
(atable.location_id == id) & \
(atable.parameter_id == session.s3.population_id) & \
(atable.end_date == None)
row = db(query).select(atable.sum,
# Should be only one with end_date == None
#orderby=~atable.date,
limitby=(0, 1)).first()
if row:
p = row.sum
data["p"] = nrepresent(p) if p else ""
else:
# L3: Population, Breakdowns & Source
# Add all available breakdowns to the output
b = {}
dtable = s3db.stats_demographic
query = (dtable.deleted != True) & \
(dtable.name != "Population")
demos = db(query).select(dtable.id,
dtable.name)
for d in demos:
b[d.id] = dict(n = s3_unicode(T(d.name)),
v = "",
s = "")
srctable = s3db.stats_source
query = (ddtable.location_id == id) & \
(ddtable.parameter_id == dtable.parameter_id) & \
(ddtable.source_id == srctable.id)
rows = db(query).select(dtable.id,
dtable.name,
ddtable.value,
srctable.name,
#ddtable.date,
orderby=~ddtable.date
)
ids = []
append = ids.append
for row in rows:
_id = row[dtable].id
if _id in ids:
# We're only interested in the most recent data per demographic
continue
append(_id)
d = row[dtable]
if d.name == "Population":
data["p"] = nrepresent(row[ddtable].value)
data["s"] = row[srctable].name
else:
# Breakdown
b[_id]["v"] = nrepresent(row[ddtable].value)
b[_id]["s"] = row[srctable].name
data["b"] = b
# Images
itable = s3db.doc_image
ttable = s3db.pr_image_library
vdoc_table = s3db.vulnerability_document
query = (vdoc_table.location_id == id) & \
(vdoc_table.approved_by != None) & \
(vdoc_table.document_type.belongs(("image", "map"))) & \
(vdoc_table.doc_id == itable.doc_id) & \
(ttable.original_name == itable.file)
left = utable.on(utable.id == itable.created_by)
images = db(query).select(itable.file,
itable.comments,
ttable.new_name,
utable.first_name,
utable.last_name,
left=left,
orderby=~itable.date)
m = []
mappend = m.append
for image in images:
i = image[itable]
user = image[utable]
mappend([image[ttable].new_name, i.file, i.comments,
"%s %s" % (user.first_name, user.last_name)])
data["m"] = m
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# s3_debug("Query 5 (demographics) completed in %s seconds" % duration)
# start = datetime.datetime.now()
vdata[id] = data
script = '''n=%s\n''' % json.dumps(vdata)
response.headers["Content-Type"] = "application/json"
return script
# -----------------------------------------------------------------------------
def rdata():
"""
Controller to extract data for resilience analysis line graph
returns a JavaScript like:
r={"location_id":
{"year":
{"indicator_index": [value, deviation]}
}
}
where indicator_index is 0 for the overall resilience (median), or
1-10 for the individual indicators (=index in the list + 1).
Any data which are not available from the db will be omitted (to
save bandwidth) - the client-side script must detect any missing
keys itself.
@todo: this controller must make sure that there is always a median
(overall resilience) in each set => calculate if not present.
"""
response.headers["Content-Type"] = "application/json"
if not len(request.args):
return '''n={}'''
else:
locations = list(set([a for a in request.args if a.isdigit()]))
fyear = None
lyear = None
if "after" in get_vars:
try:
fyear = int(get_vars["after"])
except ValueError:
pass
if "before" in get_vars:
try:
lyear = int(get_vars["before"])
except ValueError:
pass
if lyear and fyear and lyear > fyear:
lyear, fyear = fyear, lyear
if fyear:
fdate = datetime.datetime(fyear, 1, 1)
else:
fdate = None
if lyear:
ldate = datetime.datetime(lyear + 1, 1, 1)
else:
ldate = request.utcnow
resilience_id = session.s3.resilience_id
indicator_pids = session.s3.indicator_pids
pos = Storage([(indicator_pids[i], i + 1)
for i in xrange(len(indicator_pids))])
pos[resilience_id] = 0
atable = s3db.vulnerability_aggregate
query = ((atable.parameter_id == resilience_id) & \
(atable.agg_type == 4)) | \
(atable.parameter_id.belongs(indicator_pids))
if len(locations) == 1:
query &= (atable.location_id == locations[0])
else:
query &= (atable.location_id.belongs(locations))
if fyear:
query &= (atable.date >= fdate)
if lyear is None or lyear == request.utcnow.year:
query &= ((atable.end_date < ldate) | (atable.end_date == None))
else:
query &= (atable.end_date < ldate)
rows = db(query).select(atable.location_id,
atable.parameter_id,
atable.date,
atable.mean,
atable.median,
atable.mad,
orderby=~atable.date)
keys = []
seen = keys.append
data = dict()
for row in rows:
l = row.location_id
y = row.date.year
p = pos[row.parameter_id]
if (l, y, p) in keys:
continue
seen((l, y, p))
if p == pos[resilience_id]:
val = int(round(row.median, 0))
else:
val = row.median
dev = row.mad
if l not in data:
ldata = data[l] = dict()
else:
ldata = data[l]
if y not in ldata:
ydata = ldata[y] = dict()
else:
ydata = ldata[y]
ydata[p] = (val, dev)
script = '''r=%s\n''' % json.dumps(data)
return script
# -----------------------------------------------------------------------------
def tmdata():
""" Controller to extract tree map data """
MAX_LEVEL = 3 # the lowest level for child lookups
# Requested locations
if not len(request.args):
response.headers["Content-Type"] = "application/json"
return '''sdata={}'''
else:
locations = list(set([int(a) for a in request.args if a.isdigit()]))
sdata = Storage()
# Vulnerability Indicators
indicator_pids = session.s3.indicator_pids
idefaults = [(i, 0) for i in indicator_pids]
# Locations Hierarchy
ltable = s3db.gis_location
parents = list(locations)
children = list(locations)
while parents or children:
query = None
if children:
query = (ltable.id.belongs(children))
if parents:
q = (ltable.parent.belongs(parents))
if query is None:
query = q
else:
query |= q
if query is None:
break
rows = db(query).select(ltable.id,
ltable.name,
ltable.level,
ltable.parent)
next_parents = []
next_children = []
for row in rows:
this = row.id
level = int(row.level[1])
parent = row.parent
if this not in sdata:
sdata[this] = {}
data = sdata[this]
data["n"] = row.name
data["l"] = level
data["f"] = parent
data["p"] = 0
data["i"] = dict(idefaults)
data["x"] = this not in locations
if level > 0 and parent:
if parent in parents and \
level < MAX_LEVEL and \
parent in locations:
pass
#next_parents.append(this)
elif this in children and parent not in sdata:
next_children.append(parent)
parents = next_parents
children = next_children
# Population
if level in ("L0", "L1"):
# Lookup direct
ddtable = s3db.stats_demographic_data
query = (ddtable.location_id.belongs(sdata.keys())) & \
(ddtable.parameter_id == session.s3.population_id)
rows = db(query).select(ddtable.location_id,
ddtable.value,
orderby=~ddtable.date)
location_ids = []
seen = location_ids.append
for row in rows:
location_id = row.location_id
if location_id not in location_ids:
seen(location_id)
sdata[location_id]["p"] = row.value
# Look up aggregates
atable = s3db.vulnerability_aggregate
query = (atable.location_id.belongs(sdata.keys())) & \
(atable.parameter_id == session.s3.population_id)
rows = db(query).select(atable.location_id,
atable.sum,
atable.ward_count,
atable.reported_count,
orderby=~atable.date)
location_ids = []
seen = location_ids.append
for row in rows:
location_id = row.location_id
if location_id not in location_ids:
seen(location_id)
data = sdata[location_id]
if not data["p"]:
data["p"] = row.sum
data["t"] = row.ward_count
data["r"] = row.reported_count
# Calculate ward_count manually for Lx without aggregates
#commune_level = "L%s" % MAX_LEVEL
#for location_id in sdata.keys():
# data = sdata[location_id]
# if "t" not in data:
# data["r"] = 0
# # @ToDo: optimise this to do in-bulk rather than per-record
# data["t"] = len(gis.get_children(location_id, level=commune_level))
# Indicators
query = (atable.location_id.belongs(sdata.keys())) & \
(atable.parameter_id.belongs(indicator_pids))
rows = db(query).select(atable.location_id,
atable.parameter_id,
atable.median)
for row in rows:
location_id = row.location_id
location_data = sdata[location_id]
if "i" not in location_data:
location_data["i"] = dict(idefaults)
location_data["i"][row.parameter_id] = row.median
# Return as script
script = '''sdata=%s\n''' % json.dumps(sdata)
response.headers["Content-Type"] = "application/json"
return script
# -----------------------------------------------------------------------------
def filter_report(filter_request, loc_id, loc_level):
"""
Helper function to extract the selections from the side panel
and generate a resource filter
"""
vdoc_table = db.vulnerability_document
gtable = db.gis_location
query = (vdoc_table.deleted != True) & \
(vdoc_table.location_id == gtable.id)
if loc_id != -1:
# Don't filter to just next level
#next_loc_level = "L%s" % (int(loc_level[1:]) + 1)
#child_locations = gis.get_children(loc_id, next_loc_level)
child_locations = gis.get_children(loc_id)
if len(child_locations) == 0:
query &= (vdoc_table.location_id == loc_id)
else:
child_ids = [row.id for row in child_locations]
child_ids.append(loc_id) # include the selected location
query &= (vdoc_table.location_id.belongs(child_ids))
else:
# Show the country-level
query &= (gtable.level == "L0")
if filter_request["from_date"]:
query &= (vdoc_table.date >= filter_request["from_date"])
if filter_request["to_date"]:
query &= (vdoc_table.date <= filter_request["to_date"])
document_types = ["vca"]
indicator = (vdoc_table.document_type == "vca")
if "indicator" in filter_request:
document_types.append("indicator")
if "demographics" in filter_request:
document_types.append("demographic")
if "map" in filter_request:
document_types.append("map")
if "images" in filter_request:
document_types.append("image")
if "reports" in filter_request:
document_types.append("other")
if len(document_types) == 1:
query &= (vdoc_table.document_type == "vca")
else:
query &= (vdoc_table.document_type.belongs(document_types))
if "myReports" in filter_request:
user_id = auth.user.id
query &= ((vdoc_table.approved_by == user_id)
| (vdoc_table.created_by == user_id))
if "text" in filter_request and filter_request["text"] != "":
utable = auth.settings.table_user
text = "%%%s%%" % filter_request["text"].lower()
query &= (vdoc_table.location_id == gtable.id)
query &= (vdoc_table.created_by == utable.id)
query &= ((gtable.name.lower().like(text))
| (utable.first_name.lower().like(text))
| (utable.last_name.lower().like(text)))
# Now ensure that all unapproved records are added to the return list
query = ((vdoc_table.deleted != True) & \
(vdoc_table.approved_by == None) & \
(vdoc_table.location_id == gtable.id)
) | (query)
return query
# -------------------------------------------------------------------------
def report_group(row):
"""
Virtual field to show the group that the report belongs to
used by vulnerability/report
"""
if "vulnerability_document" in row:
row = row["vulnerability_document"]
# These get i18n later
if row.approved_by is None:
return "Approval pending"
elif row.document_type == "vca":
return "VCA Report"
else:
return "Report"
# -----------------------------------------------------------------------------
def reportDataTable():
"""
Return a dataTable using the selected filter options
"""
from s3.s3data import S3DataTable
vdoc_table = s3db.vulnerability_document
vdoc_table.group = Field.Method("group", report_group)
gtable = db.gis_location
# -------------------------------------------------------------------------
# Set up custom represents
# -------------------------------------------------------------------------
def location_repr(id):
"""
Return the location name (level) wrapped in a span
"""
if not id:
repr_text = messages["NONE"]
else:
row = locations.get(id, None)
if not row:
repr_text = messages.UNKNOWN_OPT
else:
level = loc_labels[row["level"]]
repr_text = "%s (%s)" % (row["name"], level)
return SPAN(repr_text, _class="communeCell")
# -------------------------------------------------------------------------
def submitted_repr(id):
"""
Return the initial of the first name and the complete last name
"""
if not id:
repr_text = T("Imported data")
else:
row = users.get(id, None)
if row:
repr_text = "%s. %s" % (row["first_name"][0], row["last_name"])
else:
repr_text = messages.UNKNOWN_OPT
return repr_text
# -------------------------------------------------------------------------
def approved_repr(id):
"""
Return the initials of the first and the last name
"""
if id is None:
repr_text = APPROVAL_PENDING
elif id == 0:
repr_text = APPROVED
else:
row = users.get(id, None)
if row:
repr_text = T("Approved by %(first_name)s.%(last_name)s") % \
dict(first_name = row["first_name"][0],
last_name = row["last_name"][0])
else:
repr_text = messages.UNKNOWN_OPT
return repr_text
# -------------------------------------------------------------------------
def action_repr(id):
"""
Return the action button for this row
"""
approved = approvals.get(id, None)
if approved != None:
repr_text = A(VIEW,
_id = id,
_class = "viewButton",
_href = "javascript:viewReportDetails(%s)" % id
)
else:
repr_text = A(REVIEW,
_id = id,
_class = "reviewButton",
_href = "javascript:showReportDetails(%s)" % id
)
repr_text.append(A(CLOSE,
_class = "closeReviewButton",
_href = "javascript:hideReportDetails(%s)" % id
))
return repr_text
filter_request = request.post_vars
loc_level = -1
if filter_request:
loc_id = filter_request.get("location_id", -1)
if loc_id == "-1":
loc_id = -1
if loc_id:
row = db(gtable.id == loc_id).select(gtable.level,
gtable.path,
limitby=(0, 1)
).first()
try:
loc_level = row.level
except:
# Invalid location ID
loc_id = -1
else:
if loc_level == "L0":
L0 = loc_id
else:
L0 = row.path.split("/")[0]
filter = filter_report(filter_request, loc_id, loc_level)
if loc_id == -1:
loc_labels = gis.get_location_hierarchy()
else:
loc_labels = gis.get_location_hierarchy(location=L0)
#############################################################
# Note if list_fields are changed here then they also need
# to be changed in index, where the table is initialised
#############################################################
if loc_level == -1:
loc_list_field = "location_id$L0"
loc_group_field = "gis_location.L0"
elif loc_level == "L0":
loc_list_field = "location_id$L1"
loc_group_field = "gis_location.L1"
elif loc_level == "L1":
loc_list_field = "location_id$L2"
loc_group_field = "gis_location.L2"
elif loc_level == "L2":
loc_list_field = "location_id$L3"
loc_group_field = "gis_location.L3"
elif loc_level == "L3":
loc_list_field = "location_id$L3"
loc_group_field = "gis_location.L3"
# @ToDo: Support countries with L4s/L5s
#elif loc_level == "L4":
# loc_list_field = "location_id$L4"
# loc_group_field = "gis_location.L4"
list_fields = [(T("Action"), "id"),
(T("Date"), "date"),
(T("Location"), "location_id"),
# Field.Method
"group",
loc_list_field,
"document_type",
(T("Submitted by"), "created_by"),
(T("Status"), "approved_by"),
]
# Ensure that we also get the records awaiting for approval
resource = s3db.resource("vulnerability_document", unapproved=True)
if filter_request:
resource.add_filter(filter)
totalrows = resource.count()
data = resource.select(list_fields,
orderby=~vdoc_table.date,
limit=None,
count=True,
represent=False,
#raw_data=True
)
filteredrows = data["numrows"]
if filteredrows > 0:
# Do represents in-bulk
# @ToDo: Replace with S3Represents & define before select
approvals = {}
locations = []
lappend = locations.append
users = []
uappend = users.append
rows = data["rows"]
for row in rows:
#raw = row["_row"]
location_id = row["vulnerability_document.location_id"]
if location_id and location_id not in locations:
lappend(location_id)
user_id = row["vulnerability_document.created_by"]
if user_id and user_id not in users:
uappend(user_id)
user_id = row["vulnerability_document.approved_by"]
if user_id:
approvals[row["vulnerability_document.id"]] = user_id
if user_id not in users:
uappend(user_id)
lrows = db(gtable.id.belongs(locations)).select(gtable.id,
gtable.name,
gtable.level,
gtable.L1,
gtable.L2)
locations = lrows.as_dict()
utable = auth.settings.table_user
urows = db(utable.id.belongs(users)).select(utable.id,
utable.first_name,
utable.last_name)
users = urows.as_dict()
APPROVED = T("Approved")
APPROVAL_PENDING = T("Approval pending")
CLOSE = T("Close")
REVIEW = T("Review")
VIEW = T("View")
# Apply represents
date_repr = vdoc_table.date.represent
doc_type_repr = vdoc_table.document_type.represent
for row in rows:
v = row["vulnerability_document.id"]
row["vulnerability_document.id"] = action_repr(v)
v = row["vulnerability_document.date"]
row["vulnerability_document.date"] = date_repr(v)
v = row["vulnerability_document.location_id"]
row["vulnerability_document.location_id"] = location_repr(v)
v = row["vulnerability_document.document_type"]
row["vulnerability_document.document_type"] = doc_type_repr(v)
v = row["vulnerability_document.created_by"]
row["vulnerability_document.created_by"] = submitted_repr(v)
v = row["vulnerability_document.approved_by"]
row["vulnerability_document.approved_by"] = approved_repr(v)
# The types are fixed and will always be displayed (even if empty)
type_totals = {"Approval pending" : 0,
"VCA Report" : 0,
"Report" : 0
}
# Calculate the report group totals
location_totals = {}
if loc_level != -1:
loc_level = int(loc_level[1:])
if loc_level < 3:
loc_label = loc_labels["L%s" % (loc_level + 1)]
else:
loc_label = ""
for row in rows:
# Collect the type totals
group = row["vulnerability_document.group"]
if not group:
group = "Report"
type_totals[group] += 1
# Collect the Location sub totals
if row[loc_group_field] == "None":
# If the group field is none then use the location for the group
# This will happen for any report for the selected location
#location = row["vulnerability_document.location_id"].components[0]
# This gives invalid Unicode conversion & anyway doesn't seem useful
continue
else:
if loc_level != -1:
location = "%s (%s)" % (row[loc_group_field], loc_label)
else:
location = row[loc_group_field]
# Represent the field
row[loc_group_field] = location
# Populate the groupTotals to be read by dataTables
loc_code = "%s_%s" % (group, s3_unicode(location))
if loc_code in location_totals:
location_totals[loc_code] += 1
else:
location_totals[loc_code] = 1
group_totals = {
unicode(T("Approval pending")) : type_totals["Approval pending"],
unicode(T("VCA Reports")) : type_totals["VCA Report"],
unicode(T("Reports")) : type_totals["Report"]
}
rfields = data["rfields"]
dt = S3DataTable(rfields,
rows,
orderby=~vdoc_table.date
)
# No need as hidden when used for Grouping
#if loc_level != -1:
# # Amend the column label
# dt.heading[loc_group_field] = loc_label
dt.defaultActionButtons(resource)
if request.extension == "html":
level_1_titles = [["Approval pending", T("Approval pending")],
["VCA Report", T("VCA Reports")],
["Report", T("Reports")],
]
report = dt.html(totalrows,
filteredrows,
"report",
dt_action_col = -1,
# Pagination done client-side currently!
dt_ajax_url = None,
#dt_ajax_url = URL(c="vulnerability",
# f="report",
# extension="aadata",
# vars={"id": "report"},
# ),
dt_dom = "t",
# No server-side pagination
dt_pagination = "false",
dt_pageLength = filteredrows,
dt_searching = "false",
dt_group = [3, 4],
dt_group_totals = [group_totals, location_totals],
dt_group_titles = [level_1_titles],
dt_group_types = ["text", "none"],
dt_group_space = "true",
dt_shrink_groups = "accordion",
)
reportCount = T("%(count)s Entries Found") % dict(count=filteredrows)
report.append(INPUT(_type="hidden",
_id="reportCount",
_name="config",
_value=reportCount))
return str(report)
elif request.extension == "aadata":
# Unsupported
raise
else:
return ""
# -----------------------------------------------------------------------------
def getReportDetails():
"""
Method to get the details of a report from the vulnerability_document id
It will build the custom display, which is essentially a form
wrapped around a table, if buttons are required then they will be added
allowing for the report to be approved or rejected.
"""
_id = get_vars.id
vdoc_table = s3db.vulnerability_document
vdoc = db(vdoc_table.id == _id).select(vdoc_table.name,
vdoc_table.document_type,
vdoc_table.doc_id,
vdoc_table.source_id,
limitby=(0, 1)).first()
document_type = vdoc.document_type
valid = True
if document_type == "indicator":
# Get the data for this report
vdtable = db.vulnerability_data
vitable = db.vulnerability_indicator
query = (vdtable.deleted == False) & \
(vdtable.source_id == vdoc.source_id) & \
(vitable.parameter_id == vdtable.parameter_id)
rows = db(query).select(vdtable.value,
vitable.name,
orderby=vitable.posn)
# Build the custom table
table = TABLE(TR(TH(_class="indicatorLabels"),
TH(DIV(1), _class="indicator1"),
TH(DIV(2), _class="indicator2"),
TH(DIV(3), _class="indicator3"),
TH(DIV(4), _class="indicator4"),
TH(DIV(5), _class="indicator5"),
),
TR(TH(),
TH(SPAN(XML("←"), _class="arrow"),
" %s" % T("LOW RESILIENCE"),
_colspan=2),
TH(" %s" % T("HIGH RESILIENCE"),
SPAN(XML("→"), _class="arrow"),
_class="highResilienceLabel",
_colspan=3)
),
_class="indicatorsTable")
mark = XML("<mark>*</mark>")
tr_class = "white"
for row in rows:
tr_class = "gray" if tr_class == "white" else "white"
tr = TR(_class=tr_class)
name = row.vulnerability_indicator.name
td = TD(mark, _class="indicatorLabels")
td.append(name)
tr.append(td)
value = int(row.vulnerability_data.value)
for i in range(5):
option = INPUT(_type = "radio",
_name = name,
_value = i + 1,
value = value,
_disabled = "disabled",
)
tr.append(option)
table.append(tr)
elif document_type == "demographic":
# Get the data for this report
ddtable = s3db.stats_demographic_data
sdtable = db.stats_demographic
query = (ddtable.deleted == False) & \
(ddtable.source_id == vdoc.source_id) & \
(sdtable.parameter_id == ddtable.parameter_id)
rows = db(query).select(ddtable.value,
ddtable.location_id,
sdtable.name,
orderby = sdtable.name)
# Build the custom table
table = TABLE(_class = "demographicsTable")
table.append(TR(TD(vdoc.name, _colspan=3)))
tr_class = "grey"
location_represent = s3db.gis_LocationRepresent()
for row in rows:
tr_class = "grey" if tr_class == "white" else "white"
tr = TR(_class = tr_class)
name = row.stats_demographic.name
tr.append(TD(name, _class = "demoLabel"))
value = IS_INT_AMOUNT().represent(row.stats_demographic_data.value)
tr.append(TD(value, _class = "demoStatistic"))
location = location_represent(row.stats_demographic_data.location_id)
tr.append(TD(location, _class = "demoSource"))
table.append(tr)
elif document_type in ("map", "image"):
ditable = s3db.doc_image
record = db(ditable.doc_id == vdoc.doc_id).select(ditable.id,
ditable.name,
ditable.file,
ditable.comments,
limitby=(0, 1)
).first()
if record:
size = (250, 250)
image = s3db.pr_image_represent(record.file, size=size)
size = s3db.pr_image_size(image, size)
desc = DIV(record.comments, _class="imageDesc")
filename = record.name
url_small = URL(c="default", f="download", args=image)
alt = record.comments if record.comments else filename
thumb = IMG(_src=url_small,
_alt=alt,
_width=size[0],
_height=size[1]
)
url_full = URL(c="default", f="download", args=record.file)
download = A(T("Download"), _class="download", _href=url_full)
view = A(T("View full size"),
_class="download",
_href=URL(c="vulnerability", f="view_image",
args=record.id),
_target="blank")
table = TABLE(_class = "imageTable")
table.append(TR(TD(thumb, _colspan=4)))
table.append(TR(TD(desc),
TD(download),
TD(DIV(" | ", _class="divider")),
TD(view),
_class="mapRow"))
else:
valid = False
elif document_type in ("other", "vca"):
doctable = s3db.doc_document
record = db(doctable.doc_id == vdoc.doc_id).select(doctable.id,
doctable.file,
doctable.name,
limitby=(0, 1)
).first()
if record:
desc = DIV(record.name, _class="imageDesc")
url = URL(c="default", f="download", args=record.file)
download = A(T("Download"), _class="download", _href=url)
table = TABLE(_class="imageTable")
table.append(TR(TD(desc),
TD(download),
_class="mapRow"))
else:
valid = False
else:
valid = False
# Place the table in a form and attach the buttons (if required)
form = FORM(_id="form%s" % _id)
if valid:
form.append(table)
else:
form.append(DIV(T("No data available"), _class="mapRow"))
if request.args(0) == "review":
if valid:
form.append(INPUT(_type="button", _name="Approve%s" % _id,
_value=T("Approve"), _class="approveButton"))
form.append(INPUT(_type="button", _name="Decline%s" % _id,
_value=T("Decline"), _class="declineButton"))
return str(form)
# -----------------------------------------------------------------------------
def view_image():
"""
View a Fullscreen version of an Image - called from Reports
"""
try:
_id = request.args[0]
except:
return "Need to provide the id of the Image"
table = s3db.doc_image
record = db(table.id == _id).select(table.name,
table.file,
table.comments,
limitby=(0, 1)).first()
desc = DIV(record.comments, _class="imageDesc")
filename = record.name
url = URL(c="default", f="download", args=record.file)
alt = record.comments if record.comments else filename
image = IMG(_src=url, _alt=alt)
output = Storage(image = image,
desc = desc,
)
return output
# -----------------------------------------------------------------------------
def approve_report(id):
"""
Function to approve a report
"""
# Approve the vulnerability_document record
resource = s3db.resource("vulnerability_document", id=id, unapproved=True)
resource.approve()
# Read the record details
vdoc_table = db.vulnerability_document
record = db(vdoc_table.id == id).select(vdoc_table.document_type,
vdoc_table.doc_id,
vdoc_table.source_id,
limitby=(0, 1)).first()
# Approve the linked records
document_type = record.document_type
if document_type == "indicator":
tablename = "vulnerability_data"
table = s3db[tablename]
query = (table.source_id == record.source_id)
agg_function = "vulnerability_update_aggregates"
elif document_type == "demographic":
tablename = "stats_demographic_data"
table = s3db[tablename]
query = (table.source_id == record.source_id)
agg_function = "stats_demographic_update_aggregates"
elif document_type in ("map", "image"):
tablename = "doc_image"
query = (s3db[tablename].doc_id == record.doc_id)
elif document_type in ("vca", "other"):
tablename = "doc_document"
query = (s3db[tablename].doc_id == record.doc_id)
else:
current.log.error("Report not Approved as unknown type", document_type)
return False
resource = s3db.resource(tablename, filter=query, unapproved=True)
resource.approve()
if document_type in ("indicator", "demographic"):
# Rebuild the relevant aggregates
rows = resource.select(fields=["data_id",
"parameter_id",
"date",
"location_id",
"value"],
as_rows=True)
s3task.async(agg_function,
vars=dict(records=rows.json()))
return True
# -----------------------------------------------------------------------------
def decline_report(id):
"""
Function to Decline a report
"""
# Find the type of report that we have
vdoc_table = s3db.vulnerability_document
record = db(vdoc_table.id == id).select(vdoc_table.document_type,
vdoc_table.doc_id,
vdoc_table.source_id,
limitby=(0, 1)).first()
document_type = record.document_type
# Now that we have the necessary data, reject the report
resource = s3db.resource("vulnerability_document", id=id, unapproved=True)
resource.reject()
# Reject the linked data
if document_type in ("indicator", "demographic"):
source_id = record.source_id
# Reject the stats_data records
query = (db.stats_data.source_id == source_id)
resource = s3db.resource("stats_data", filter=query, unapproved=True)
resource.reject()
# Reject the instance records
if document_type == "indicator":
query = (s3db.vulnerability_data.source_id == source_id)
resource = s3db.resource("vulnerability_data", filter=query,
unapproved=True)
resource.reject()
elif document_type == "demographic":
query = (s3db.stats_demographic_data.source_id == source_id)
resource = s3db.resource("stats_demographic_data", filter=query,
unapproved=True)
resource.reject()
elif document_type in ("image", "map"):
query = (s3db.doc_image.doc_id == record.doc_id)
resource = s3db.resource("doc_image", filter=query, unapproved=True)
resource.reject()
elif document_type in ("other", "vca"):
query = (s3db.doc_document.doc_id == record.doc_id)
resource = s3db.resource("doc_document", filter=query, unapproved=True)
resource.reject()
else:
return False
return True
# -----------------------------------------------------------------------------
def report():
"""
Controller to list/view/approve/reject Reports.
- list uses a suitably-filtered dataTable
"""
s3.no_formats = True
arg = request.args(0)
if arg == "filter":
data = reportDataTable()
elif arg == "review" or arg == "view":
data = getReportDetails()
elif arg == "approve":
# Check authorization
permitted = auth.s3_has_permission("approve", "vulnerability_document")
if not permitted:
data = s3_unicode(T("You are not permitted to approve documents"))
else:
id = request.post_vars.id
if approve_report(id):
data = reportDataTable()
else:
data = s3_unicode(T("Failed to approve"))
elif arg == "decline":
# Check authorization
permitted = auth.s3_has_permission("approve", "vulnerability_document")
if not permitted:
data = s3_unicode(T("You are not permitted to approve documents"))
else:
id = request.post_vars.id
if decline_report(id):
data = reportDataTable()
else:
data = s3_unicode(T("Decline failed"))
else:
date_widget = S3DateWidget(format="yy-mm-dd", future=0)
to_date = Field("to_date")
to_date.tablename = to_date._tablename = ""
from_date = Field("from_date")
from_date.tablename = from_date._tablename = ""
report = reportDataTable()
data = {"filter" : {"to_date" : str(date_widget(to_date, None)),
"from_date" : str(date_widget(from_date, None)),
},
"report" : report
}
response.headers["Content-Type"] = "application/json"
return json.dumps(data)
# -----------------------------------------------------------------------------
def submitData():
""" Controller to manage the AJAX import of vulnerability data """
# Get the action to be performed
action = request.post_vars.action
if action == "vulnerability":
return import_vul_ui()
elif action == "vulnerability_part1":
return import_vul_csv_part1()
elif action == "vulnerability_part2":
return import_vul_csv_part2()
elif action in ("map", "image", "other", "vca"):
return import_document(action)
elif action == "demographics":
return import_demo_ui()
elif action == "demographics_part1":
return import_demo_csv_part1()
elif action == "demographics_part2":
return import_demo_csv_part2()
# -----------------------------------------------------------------------------
def import_vul_ui():
"""
Controller to add a new set of vulnerability indicators
which have been input direct into the GUI
"""
date = request.utcnow
post_vars = request.post_vars
location_id = post_vars.location
update_super = s3db.update_super
# First create the stats_source
# NB This is direct to SE, no E here!
ss_table = s3db.stats_source
source_id = ss_table.insert(name = "Vulnerability indicators submitted through UI")
# Next create the vulnerability_document
vdoc_table = s3db.vulnerability_document
id = vdoc_table.insert(document_type = "indicator",
date = date,
location_id = location_id,
source_id = source_id,
)
update_super(vdoc_table, dict(id=id))
# Get the list of indicators
itable = s3db.vulnerability_indicator
rows = db(itable.deleted == False).select(itable.posn,
itable.parameter_id,
orderby=itable.posn)
vd_table = db.vulnerability_data
for row in rows:
id = vd_table.insert(parameter_id = row.parameter_id,
location_id = location_id,
value = post_vars[str(row.posn)],
date = date,
source_id = source_id,
)
update_super(vd_table, dict(id=id))
# -----------------------------------------------------------------------------
def import_vul_csv_part1():
"""
Controller to manage the first phase of the import of vulnerability
indicators from CSV
"""
from gluon.serializers import json as jsons
try:
file = request.post_vars.file.file
except:
response.headers["Content-Type"] = "application/json"
return jsons({"Error": s3_unicode(T("File missing"))})
# Check authorization
authorised = auth.s3_has_permission("create", "vulnerability_data")
if not authorised:
response.headers["Content-Type"] = "application/json"
return jsons({"Error": s3_unicode(T("You are not permitted to upload files"))})
# Do a normal CSV import
output = s3_rest_controller("vulnerability", "data",
csv_stylesheet="data.xsl")
if "Error" in output:
response.headers["Content-Type"] = "application/json"
return jsons({"Error": s3_unicode(output["Error"])})
upload_id = output[0]
item_ids = output[1]
data = output[2]
# Loop through all the vulnerability_data & group by source_id
from lxml import etree
loc_labels = {}
ele_dict = {}
for value in data:
if value["s3_import_item.error"]:
response.headers["Content-Type"] = "application/json"
return jsons({"Error": value["s3_import_item.error"]})
ele = value["s3_import_item.element"]
ele = s3xml.xml_decode(ele)
try:
element = etree.fromstring(ele)
except:
return T("No valid data in the file")
data_dict = {}
data = element.findall("data")
for item in data:
f = item.get("field", None)
v = item.get("value", None)
data_dict[f] = v
references = element.findall("reference")
for reference in references:
f = reference.get("field", None)
if f == "source_id":
source_tuid = reference.get("tuid", None)
# tuid: stats_source//Level/Country/L1/L2/L3//Date
try:
# Extract the Location
loc_parts = source_tuid.split("//")[1].split("/")
data_dict["location"] = loc_parts[-1]
level = loc_parts[0]
country_code = loc_parts[1]
if country_code not in loc_labels:
country_name = gis.get_country(country_code, key_type="code")
table = s3db.gis_location
country_id = db(table.name == country_name).select(table.id,
limitby=(0, 1)).first().id
lx_labels = gis.get_location_hierarchy(location=country_id)
loc_labels[country_code] = lx_labels
else:
lx_labels = loc_labels[country_code]
data_dict["loc_label"] = lx_labels[level]
except:
# Invalid source_tuid
continue
elif f == "parameter_id":
t = reference.get("tuid", None)
try:
indicator = t.split("/")[1]
data_dict[f] = indicator
except:
# We can't do anything with a data element not linked to an Indicator
continue
if source_tuid in ele_dict:
ele_dict[source_tuid].append(data_dict)
else:
ele_dict[source_tuid] = [data_dict]
# Now prepare the data for display in the UI
from datetime import datetime
data_list = []
for (key, group) in ele_dict.items():
row = group[0]
group_dict = dict(
group = key,
date = datetime.strptime(row["date"], "%Y-%m-%d").strftime("%d-%b-%y"),
location = "%s %s" % (row["location"], row["loc_label"])
)
indicator_dict = {}
param_len = len(row["parameter_id"][0]) + 1 # include the separator
for row in group:
param = row["parameter_id"]
indicator_dict[param] = row["value"]
group_dict["data"] = indicator_dict
data_list.append(group_dict)
# Return the output
response.headers["Content-Type"] = "application/json"
return jsons({"upload_id" : upload_id,
"items" : item_ids,
"data" : data_list
})
# -----------------------------------------------------------------------------
def import_vul_csv_part2():
"""
Controller to manage the second phase of the import of vulnerability
indicators from CSV
"""
job_id = request.post_vars.job
if not job_id:
return "Error No Job ID's provided"
output = s3_rest_controller("vulnerability", "data",
csv_stylesheet="data.xsl")
totalRecords = output[0]
totalErrors = output[1]
totalIgnored = output[2]
from gluon.serializers import json as jsons
response.headers["Content-Type"] = "application/json"
return jsons({"totalRecords" : totalRecords,
"totalErrors" : totalErrors,
"totalIgnored" : totalIgnored
})
# -----------------------------------------------------------------------------
def import_document(document_type):
"""
Controller to store a document
"""
if document_type in ("map", "image"):
image = True
doc_table = s3db.doc_image
else:
image = False
doc_table = s3db.doc_document
post_vars = request.post_vars
file = post_vars.file
real_filename = file.filename
new_filename = doc_table.file.store(file, real_filename)
date = request.utcnow
location_id = post_vars.location
# Create the vulnerability_document
vdoc_table = s3db.vulnerability_document
id = vdoc_table.insert(document_type = document_type,
date = date,
location_id = location_id,
)
record = dict(id=id)
s3db.update_super(vdoc_table, record)
# Create the doc_document or doc_image
doc_table.insert(doc_id = record["doc_id"],
file = new_filename,
name = real_filename,
date = date,
comments = post_vars.desc,
location_id = location_id,
)
if image:
# Create a thumbnail of the image
s3db.pr_image_resize(file.file,
new_filename,
real_filename,
(250, 250),
)
# -----------------------------------------------------------------------------
def import_demo_ui():
"""
Controller to store a new set of demographic data which has been input
direct into the GUI
"""
vdoc_table = s3db.vulnerability_document
ss_table = db.stats_source
update_super = s3db.update_super
post_vars = request.post_vars
location_id = post_vars.location
date_submitted = post_vars.reportDate
# First create the demographic_documents (one per source)
last_source = ""
source_list = {} # the sources
seen_source = [] # the sources that have already been seen
data = []
for x in range(7):
value = post_vars["demoField%s" % x]
source = post_vars["sourceField%s" % x]
if source == "":
# Allow user to enter the source in just 1 field to use for all subsequent
source = last_source
else:
last_source = source
date = post_vars["reportDate%s" % x]
data.append((value, source, date))
if source != "" and value != "":
# Add the source if we have a value
if source not in seen_source:
seen_source.append(source)
# Create the stats_source
# - note that this means we'll get multiple copies of the same sources
# - however approval is done by vulnerability_document, so each vulnerability_document needs a unique source :/
source_id = ss_table.insert(name = source)
# Now create the vulnerability_document
id = vdoc_table.insert(name = source,
date = date_submitted,
location_id = location_id,
document_type = "demographic",
source_id = source_id,
)
update_super(vdoc_table, dict(id=id))
source_list[source] = source_id
# Now get the Demographic parameter_ids
demo_string_list = ["Population",
"Male",
"Female",
"Over 60",
"Under 5",
"Households",
"Households below poverty line"
]
sd_table = s3db.stats_demographic
rows = db(sdtable.name.belongs(demo_string_list)).select(sd_table.name,
sd_table.parameter_id)
# Sort these into the order of the UI
demo_recs = {}
for record in rows:
demo_recs[record.name] = record.parameter_id
demographics_list = []
for demo_string in demo_string_list:
if demo_string in demo_recs:
demographics_list.append(demo_recs[demo_string])
else:
demographics_list.append(None) # Should never have this
# Create the demographic_data records
sdd_table = db.stats_demographic_data
for x in range(7):
_data = data[x]
if _data[0] != "":
id = sdd_table.insert(parameter_id = demographics_list[x],
location_id = location_id,
value = _data[0],
date = _data[2],
source_id = source_list[_data[1]],
)
update_super(sdd_table, dict(id=id))
# -----------------------------------------------------------------------------
def import_demo_csv_part1():
"""
Controller to manage the first phase of the import of demographic data
from CSV
"""
from gluon.serializers import json as jsons
try:
file = request.post_vars.file.file
except:
response.headers["Content-Type"] = "application/json"
return jsons({"Error": s3_unicode(T("File missing"))})
# Check authorization
permitted = auth.s3_has_permission
authorised = permitted("create", "stats_demographic_data")
if not authorised:
response.headers["Content-Type"] = "application/json"
return jsons({"Error": s3_unicode(T("You are not permitted to upload files"))})
request.controller = "stats" # Need to set the controller to stats
output = s3_rest_controller("stats", "demographic_data",
csv_stylesheet="demographic_data.xsl")
if "Error" in output:
response.headers["Content-Type"] = "application/json"
return jsons({"Error": s3_unicode(output["Error"])})
upload_id = output[0]
item_ids = output[1]
data = output[2]
# Loop through all the stats_demographic_data & group by source_id
from lxml import etree
loc_labels = {}
ele_dict = {}
for value in data:
if value["s3_import_item.error"]:
response.headers["Content-Type"] = "application/json"
return jsons({"Error": value["s3_import_item.error"]})
ele = value["s3_import_item.element"]
ele = s3xml.xml_decode(ele)
try:
element = etree.fromstring(ele)
except:
return T("No valid data in the file")
data_dict = {}
data = element.findall("data")
for item in data:
f = item.get("field", None)
v = item.get("value", None)
data_dict[f] = v
references = element.findall("reference")
for reference in references:
f = reference.get("field", None)
if f == "source_id":
source_tuid = reference.get("tuid", None)
elif f == "location_id":
# tuid: Level/Country/L1/L2/L3
tuid = reference.get("tuid", None)
if tuid:
try:
# Extract the Location
loc_parts = tuid.split("/")
data_dict["location"] = loc_parts[-1]
level = loc_parts[0]
country_code = loc_parts[1]
if country_code not in loc_labels:
country_name = gis.get_country(country_code, key_type="code")
table = s3db.gis_location
country_id = db(table.name == country_name).select(table.id,
limitby=(0, 1)).first().id
lx_labels = gis.get_location_hierarchy(location=country_id)
loc_labels[country_code] = lx_labels
else:
lx_labels = loc_labels[country_code]
data_dict["loc_label"] = lx_labels[level]
except:
# Invalid location_tuid
continue
else:
uuid = reference.get("uuid", None)
if uuid:
data_dict["loc_label"] = COUNTRY
country_code = uuid.split(":")[-1]
data_dict["location"] = gis.get_country(country_code, key_type="code")
elif f == "parameter_id":
t = reference.get("tuid", None)
try:
demographic = t.split("/")[1]
data_dict[f] = demographic
except:
# We can't do anything with a data element not linked to a Demographic
continue
if source_tuid in ele_dict:
ele_dict[source_tuid].append(data_dict)
else:
ele_dict[source_tuid] = [data_dict]
# Now prepare the data for display in the UI
from datetime import datetime
data_list = []
for (key, group) in ele_dict.items():
row = group[0]
group_dict = dict(
group = key,
date = datetime.strptime(row["date"], "%Y-%m-%d").strftime("%d-%b-%y"),
location = "%s %s" % (row["location"], row["loc_label"])
)
indicator_dict = {}
param_len = len(row["parameter_id"][0]) + 1 # include the separator
for row in group:
param = row["parameter_id"]
indicator_dict[param] = row["value"]
group_dict["data"] = indicator_dict
data_list.append(group_dict)
# Return the output
response.headers["Content-Type"] = "application/json"
return jsons({"upload_id" : upload_id,
"items" : item_ids,
"data" : data_list
})
# -----------------------------------------------------------------------------
def import_demo_csv_part2():
"""
Controller to manage the second phase of the import of demographic data
from CSV
"""
job_id = request.post_vars.job
if not job_id:
return "Error No Job ID's provided"
# Fake the controller for the import
request.controller = "stats"
output = s3_rest_controller("stats", "demographic_data",
csv_stylesheet="demographic_data.xsl")
totalRecords = output[0]
totalErrors = output[1]
totalIgnored = output[2]
from gluon.serializers import json as jsons
response.headers["Content-Type"] = "application/json"
return jsons({"totalRecords" : totalRecords,
"totalErrors" : totalErrors,
"totalIgnored" : totalIgnored
})
# -----------------------------------------------------------------------------
def indicator():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def aggregated_indicator():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def data():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def document():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def aggregate():
""" REST Controller """
def clear_aggregates(r, **attr):
if not s3_has_role(ADMIN):
auth.permission.fail()
s3db.stats_demographic_rebuild_all_aggregates()
redirect(URL(c="vulnerability",
f="aggregate",
args="",
))
s3db.set_method("vulnerability", "aggregate",
method="clear",
action=vulnerability_rebuild_all_aggregates)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def handdrawn():
""" REST Controller for Hand-drawn Maps """
table = s3db.vulnerability_document
s3.filter = (s3db.doc_image.doc_id == table.doc_id) & \
(table.document_type == "map")
return s3_rest_controller("doc", "image")
# -----------------------------------------------------------------------------
def hazard():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def risk():
""" REST Controller """
return s3_rest_controller(rheader=s3db.vulnerability_rheader)
# -----------------------------------------------------------------------------
def evac_route():
""" REST Controller """
return s3_rest_controller()
# END =========================================================================
| mit |
uruz/django-rest-framework | runtests.py | 63 | 3080 | #! /usr/bin/env python
from __future__ import print_function
import os
import subprocess
import sys
import pytest
PYTEST_ARGS = {
'default': ['tests', '--tb=short', '-s'],
'fast': ['tests', '--tb=short', '-q', '-s'],
}
FLAKE8_ARGS = ['rest_framework', 'tests', '--ignore=E501']
ISORT_ARGS = ['--recursive', '--check-only', 'rest_framework', 'tests']
sys.path.append(os.path.dirname(__file__))
def exit_on_failure(ret, message=None):
if ret:
sys.exit(ret)
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
print('flake8 failed' if ret else 'flake8 passed')
return ret
def isort_main(args):
print('Running isort code checking')
ret = subprocess.call(['isort'] + args)
if ret:
print('isort failed: Some modules have incorrectly ordered imports. Fix by running `isort --recursive .`')
else:
print('isort passed')
return ret
def split_class_and_function(string):
class_string, function_string = string.split('.', 1)
return "%s and %s" % (class_string, function_string)
def is_function(string):
# `True` if it looks like a test function is included in the string.
return string.startswith('test_') or '.test_' in string
def is_class(string):
# `True` if first character is uppercase - assume it's a class name.
return string[0] == string[0].upper()
if __name__ == "__main__":
try:
sys.argv.remove('--nolint')
except ValueError:
run_flake8 = True
run_isort = True
else:
run_flake8 = False
run_isort = False
try:
sys.argv.remove('--lintonly')
except ValueError:
run_tests = True
else:
run_tests = False
try:
sys.argv.remove('--fast')
except ValueError:
style = 'default'
else:
style = 'fast'
run_flake8 = False
run_isort = False
if len(sys.argv) > 1:
pytest_args = sys.argv[1:]
first_arg = pytest_args[0]
try:
pytest_args.remove('--coverage')
except ValueError:
pass
else:
pytest_args = ['--cov', 'rest_framework'] + pytest_args
if first_arg.startswith('-'):
# `runtests.py [flags]`
pytest_args = ['tests'] + pytest_args
elif is_class(first_arg) and is_function(first_arg):
# `runtests.py TestCase.test_function [flags]`
expression = split_class_and_function(first_arg)
pytest_args = ['tests', '-k', expression] + pytest_args[1:]
elif is_class(first_arg) or is_function(first_arg):
# `runtests.py TestCase [flags]`
# `runtests.py test_function [flags]`
pytest_args = ['tests', '-k', pytest_args[0]] + pytest_args[1:]
else:
pytest_args = PYTEST_ARGS[style]
if run_tests:
exit_on_failure(pytest.main(pytest_args))
if run_flake8:
exit_on_failure(flake8_main(FLAKE8_ARGS))
if run_isort:
exit_on_failure(isort_main(ISORT_ARGS))
| bsd-2-clause |
hyperized/ansible | lib/ansible/module_utils/network/frr/providers/module.py | 20 | 2106 | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.frr.providers import providers
from ansible.module_utils._text import to_text
class NetworkModule(AnsibleModule):
fail_on_missing_provider = True
def __init__(self, connection=None, *args, **kwargs):
super(NetworkModule, self).__init__(*args, **kwargs)
if connection is None:
connection = Connection(self._socket_path)
self.connection = connection
@property
def provider(self):
if not hasattr(self, '_provider'):
capabilities = self.from_json(self.connection.get_capabilities())
network_os = capabilities['device_info']['network_os']
network_api = capabilities['network_api']
if network_api == 'cliconf':
connection_type = 'network_cli'
cls = providers.get(network_os, self._name.split('.')[-1], connection_type)
if not cls:
msg = 'unable to find suitable provider for network os %s' % network_os
if self.fail_on_missing_provider:
self.fail_json(msg=msg)
else:
self.warn(msg)
obj = cls(self.params, self.connection, self.check_mode)
setattr(self, '_provider', obj)
return getattr(self, '_provider')
def get_facts(self, subset=None):
try:
self.provider.get_facts(subset)
except Exception as exc:
self.fail_json(msg=to_text(exc))
def edit_config(self, config_filter=None):
current_config = self.connection.get_config(flags=config_filter)
try:
commands = self.provider.edit_config(current_config)
changed = bool(commands)
return {'commands': commands, 'changed': changed}
except Exception as exc:
self.fail_json(msg=to_text(exc))
| gpl-3.0 |
CydarLtd/ansible | lib/ansible/modules/network/panos/panos_interface.py | 78 | 5736 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_interface
short_description: configure data-port network interface for DHCP
description:
- Configure data-port (DP) network interface for DHCP. By default DP interfaces are static.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
notes:
- Checkmode is not supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth.
default: "admin"
password:
description:
- Password credentials to use for auth.
required: true
if_name:
description:
- Name of the interface to configure.
required: true
zone_name:
description: >
Name of the zone for the interface. If the zone does not exist it is created but if the zone exists and
it is not of the layer3 type the operation will fail.
required: true
create_default_route:
description:
- Whether or not to add default route with router learned via DHCP.
default: "false"
commit:
description:
- Commit if changed
default: true
'''
EXAMPLES = '''
- name: enable DHCP client on ethernet1/1 in zone public
interface:
password: "admin"
ip_address: "192.168.1.1"
if_name: "ethernet1/1"
zone_name: "public"
create_default_route: "yes"
'''
RETURN='''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_IF_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/interface/ethernet/entry[@name='%s']"
_ZONE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry/zone/entry"
_ZONE_XPATH_QUERY = _ZONE_XPATH+"[network/layer3/member/text()='%s']"
_ZONE_XPATH_IF = _ZONE_XPATH+"[@name='%s']/network/layer3/member[text()='%s']"
_VR_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/network/virtual-router/entry"
def add_dhcp_if(xapi, if_name, zone_name, create_default_route):
if_xml = [
'<entry name="%s">',
'<layer3>',
'<dhcp-client>',
'<create-default-route>%s</create-default-route>',
'</dhcp-client>'
'</layer3>'
'</entry>'
]
cdr = 'yes'
if not create_default_route:
cdr = 'no'
if_xml = (''.join(if_xml)) % (if_name, cdr)
xapi.edit(xpath=_IF_XPATH % if_name, element=if_xml)
xapi.set(xpath=_ZONE_XPATH+"[@name='%s']/network/layer3" % zone_name,
element='<member>%s</member>' % if_name)
xapi.set(xpath=_VR_XPATH+"[@name='default']/interface",
element='<member>%s</member>' % if_name)
return True
def if_exists(xapi, if_name):
xpath = _IF_XPATH % if_name
xapi.get(xpath=xpath)
network = xapi.element_root.find('.//layer3')
return (network is not None)
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
if_name=dict(required=True),
zone_name=dict(required=True),
create_default_route=dict(type='bool', default=False),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if_name = module.params['if_name']
zone_name = module.params['zone_name']
create_default_route = module.params['create_default_route']
commit = module.params['commit']
ifexists = if_exists(xapi, if_name)
if ifexists:
module.exit_json(changed=False, msg="interface exists, not changed")
try:
changed = add_dhcp_if(xapi, if_name, zone_name, create_default_route)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
MonicaHsu/truvaluation | venv/lib/python2.7/site-packages/gunicorn/util.py | 24 | 15402 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import fcntl
import io
import os
import pkg_resources
import random
import resource
import socket
import sys
import textwrap
import time
import traceback
import inspect
import errno
import warnings
from gunicorn.errors import AppImportError
from gunicorn.six import text_type, string_types
MAXFD = 1024
REDIRECT_TO = getattr(os, 'devnull', '/dev/null')
timeout_default = object()
CHUNK_SIZE = (16 * 1024)
MAX_BODY = 1024 * 132
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Server and Date aren't technically hop-by-hop
# headers, but they are in the purview of the
# origin server which the WSGI spec says we should
# act like. So we drop them and add our own.
#
# In the future, concatenation server header values
# might be better, but nothing else does it and
# dropping them is easier.
hop_headers = set("""
connection keep-alive proxy-authenticate proxy-authorization
te trailers transfer-encoding upgrade
server date
""".split())
try:
from setproctitle import setproctitle
def _setproctitle(title):
setproctitle("gunicorn: %s" % title)
except ImportError:
def _setproctitle(title):
return
try:
from importlib import import_module
except ImportError:
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in range(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def load_class(uri, default="sync", section="gunicorn.workers"):
if inspect.isclass(uri):
return uri
if uri.startswith("egg:"):
# uses entry points
entry_str = uri.split("egg:")[1]
try:
dist, name = entry_str.rsplit("#", 1)
except ValueError:
dist = entry_str
name = default
try:
return pkg_resources.load_entry_point(dist, section, name)
except:
exc = traceback.format_exc()
raise RuntimeError("class uri %r invalid or not found: \n\n[%s]" % (uri,
exc))
else:
components = uri.split('.')
if len(components) == 1:
try:
if uri.startswith("#"):
uri = uri[1:]
return pkg_resources.load_entry_point("gunicorn",
section, uri)
except:
exc = traceback.format_exc()
raise RuntimeError("class uri %r invalid or not found: \n\n[%s]" % (uri,
exc))
klass = components.pop(-1)
try:
mod = __import__('.'.join(components))
except:
exc = traceback.format_exc()
raise RuntimeError("class uri %r invalid or not found: \n\n[%s]" % (uri,
exc))
for comp in components[1:]:
mod = getattr(mod, comp)
return getattr(mod, klass)
def set_owner_process(uid, gid):
""" set user and group of workers processes """
if gid:
# versions of python < 2.6.2 don't manage unsigned int for
# groups like on osx or fedora
gid = abs(gid) & 0x7FFFFFFF
os.setgid(gid)
if uid:
os.setuid(uid)
def chown(path, uid, gid):
gid = abs(gid) & 0x7FFFFFFF # see note above.
os.chown(path, uid, gid)
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a [email protected] shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
else:
_unlink = os.unlink
def unlink(filename):
try:
_unlink(filename)
except OSError as error:
# The filename need not exist.
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
def is_ipv6(addr):
try:
socket.inet_pton(socket.AF_INET6, addr)
except socket.error: # not a valid address
return False
except ValueError: # ipv6 not supported on this platform
return False
return True
def parse_address(netloc, default_port=8000):
if netloc.startswith("unix://"):
return netloc.split("unix://")[1]
if netloc.startswith("unix:"):
return netloc.split("unix:")[1]
if netloc.startswith("tcp://"):
netloc = netloc.split("tcp://")[1]
# get host
if '[' in netloc and ']' in netloc:
host = netloc.split(']')[0][1:].lower()
elif ':' in netloc:
host = netloc.split(':')[0].lower()
elif netloc == "":
host = "0.0.0.0"
else:
host = netloc.lower()
#get port
netloc = netloc.split(']')[-1]
if ":" in netloc:
port = netloc.split(':', 1)[1]
if not port.isdigit():
raise RuntimeError("%r is not a valid port number." % port)
port = int(port)
else:
port = default_port
return (host, port)
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def close_on_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def set_non_blocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def close(sock):
try:
sock.close()
except socket.error:
pass
try:
from os import closerange
except ImportError:
def closerange(fd_low, fd_high):
# Iterate through and close all file descriptors.
for fd in range(fd_low, fd_high):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
def write_chunk(sock, data):
if isinstance(data, text_type):
data = data.encode('utf-8')
chunk_size = "%X\r\n" % len(data)
chunk = b"".join([chunk_size.encode('utf-8'), data, b"\r\n"])
sock.sendall(chunk)
def write(sock, data, chunked=False):
if chunked:
return write_chunk(sock, data)
sock.sendall(data)
def write_nonblock(sock, data, chunked=False):
timeout = sock.gettimeout()
if timeout != 0.0:
try:
sock.setblocking(0)
return write(sock, data, chunked)
finally:
sock.setblocking(1)
else:
return write(sock, data, chunked)
def writelines(sock, lines, chunked=False):
for line in list(lines):
write(sock, line, chunked)
def write_error(sock, status_int, reason, mesg):
html = textwrap.dedent("""\
<html>
<head>
<title>%(reason)s</title>
</head>
<body>
<h1>%(reason)s</h1>
%(mesg)s
</body>
</html>
""") % {"reason": reason, "mesg": mesg}
http = textwrap.dedent("""\
HTTP/1.1 %s %s\r
Connection: close\r
Content-Type: text/html\r
Content-Length: %d\r
\r
%s
""") % (str(status_int), reason, len(html), html)
write_nonblock(sock, http.encode('latin1'))
def normalize_name(name):
return "-".join([w.lower().capitalize() for w in name.split("-")])
def import_app(module):
parts = module.split(":", 1)
if len(parts) == 1:
module, obj = module, "application"
else:
module, obj = parts[0], parts[1]
try:
__import__(module)
except ImportError:
if module.endswith(".py") and os.path.exists(module):
raise ImportError("Failed to find application, did "
"you mean '%s:%s'?" % (module.rsplit(".", 1)[0], obj))
else:
raise
mod = sys.modules[module]
try:
app = eval(obj, mod.__dict__)
except NameError:
raise AppImportError("Failed to find application: %r" % module)
if app is None:
raise AppImportError("Failed to find application object: %r" % obj)
if not callable(app):
raise AppImportError("Application object must be callable.")
return app
def getcwd():
# get current path, try to use PWD env first
try:
a = os.stat(os.environ['PWD'])
b = os.stat(os.getcwd())
if a.st_ino == b.st_ino and a.st_dev == b.st_dev:
cwd = os.environ['PWD']
else:
cwd = os.getcwd()
except:
cwd = os.getcwd()
return cwd
def http_date(timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
weekdayname[wd],
day, monthname[month], year,
hh, mm, ss)
return s
def is_hoppish(header):
return header.lower().strip() in hop_headers
def daemonize(enable_stdio_inheritance=False):
"""\
Standard daemonization of a process.
http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16
"""
if not 'GUNICORN_FD' in os.environ:
if os.fork():
os._exit(0)
os.setsid()
if os.fork():
os._exit(0)
os.umask(0)
# In both the following any file descriptors above stdin
# stdout and stderr are left untouched. The inheritence
# option simply allows one to have output go to a file
# specified by way of shell redirection when not wanting
# to use --error-log option.
if not enable_stdio_inheritance:
# Remap all of stdin, stdout and stderr on to
# /dev/null. The expectation is that users have
# specified the --error-log option.
closerange(0, 3)
fd_null = os.open(REDIRECT_TO, os.O_RDWR)
if fd_null != 0:
os.dup2(fd_null, 0)
os.dup2(fd_null, 1)
os.dup2(fd_null, 2)
else:
fd_null = os.open(REDIRECT_TO, os.O_RDWR)
# Always redirect stdin to /dev/null as we would
# never expect to need to read interactive input.
if fd_null != 0:
os.close(0)
os.dup2(fd_null, 0)
# If stdout and stderr are still connected to
# their original file descriptors we check to see
# if they are associated with terminal devices.
# When they are we map them to /dev/null so that
# are still detached from any controlling terminal
# properly. If not we preserve them as they are.
#
# If stdin and stdout were not hooked up to the
# original file descriptors, then all bets are
# off and all we can really do is leave them as
# they were.
#
# This will allow 'gunicorn ... > output.log 2>&1'
# to work with stdout/stderr going to the file
# as expected.
#
# Note that if using --error-log option, the log
# file specified through shell redirection will
# only be used up until the log file specified
# by the option takes over. As it replaces stdout
# and stderr at the file descriptor level, then
# anything using stdout or stderr, including having
# cached a reference to them, will still work.
def redirect(stream, fd_expect):
try:
fd = stream.fileno()
if fd == fd_expect and stream.isatty():
os.close(fd)
os.dup2(fd_null, fd)
except AttributeError:
pass
redirect(sys.stdout, 1)
redirect(sys.stderr, 2)
def seed():
try:
random.seed(os.urandom(64))
except NotImplementedError:
random.seed('%s.%s' % (time.time(), os.getpid()))
def check_is_writeable(path):
try:
f = open(path, 'a')
except IOError as e:
raise RuntimeError("Error: '%s' isn't writable [%r]" % (path, e))
f.close()
def to_bytestring(value):
"""Converts a string argument to a byte string"""
if isinstance(value, bytes):
return value
assert isinstance(value, text_type)
return value.encode("utf-8")
def is_fileobject(obj):
if not hasattr(obj, "tell") or not hasattr(obj, "fileno"):
return False
# check BytesIO case and maybe others
try:
obj.fileno()
except io.UnsupportedOperation:
return False
return True
def warn(msg):
sys.stderr.write("!!!\n")
lines = msg.splitlines()
for i, line in enumerate(lines):
if i == 0:
line = "WARNING: %s" % line
sys.stderr.write("!!! %s\n" % line)
sys.stderr.write("!!!\n\n")
sys.stderr.flush()
| mit |
marissazhou/django | django/template/backends/dummy.py | 480 | 2037 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import errno
import io
import string
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template import Origin, TemplateDoesNotExist
from django.utils.html import conditional_escape
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class TemplateStrings(BaseEngine):
app_dirname = 'template_strings'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
if options:
raise ImproperlyConfigured(
"Unknown options: {}".format(", ".join(options)))
super(TemplateStrings, self).__init__(params)
def from_string(self, template_code):
return Template(template_code)
def get_template(self, template_name):
tried = []
for template_file in self.iter_template_filenames(template_name):
try:
with io.open(template_file, encoding=settings.FILE_CHARSET) as fp:
template_code = fp.read()
except IOError as e:
if e.errno == errno.ENOENT:
tried.append((
Origin(template_file, template_name, self),
'Source does not exist',
))
continue
raise
return Template(template_code)
else:
raise TemplateDoesNotExist(template_name, tried=tried, backend=self)
class Template(string.Template):
def render(self, context=None, request=None):
if context is None:
context = {}
else:
context = {k: conditional_escape(v) for k, v in context.items()}
if request is not None:
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.safe_substitute(context)
| bsd-3-clause |
izhukov/ansible | v2/ansible/plugins/inventory/__init__.py | 8 | 2702 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from six import add_metaclass
@add_metaclass(ABCMeta)
class InventoryParser:
'''Abstract Base Class for retrieving inventory information
Any InventoryParser functions by taking an inven_source. The caller then
calls the parser() method. Once parser is called, the caller can access
InventoryParser.hosts for a mapping of Host objects and
InventoryParser.Groups for a mapping of Group objects.
'''
def __init__(self, inven_source):
'''
InventoryParser contructors take a source of inventory information
that they will parse the host and group information from.
'''
self.inven_source = inven_source
self.reset_parser()
@abstractmethod
def reset_parser(self):
'''
InventoryParsers generally cache their data once parser() is
called. This method initializes any parser state before calling parser
again.
'''
self.hosts = dict()
self.groups = dict()
self.parsed = False
def _merge(self, target, addition):
'''
This method is provided to InventoryParsers to merge host or group
dicts since it may take several passes to get all of the data
Example usage:
self.hosts = self.from_ini(filename)
new_hosts = self.from_script(scriptname)
self._merge(self.hosts, new_hosts)
'''
for i in addition:
if i in target:
target[i].merge(addition[i])
else:
target[i] = addition[i]
@abstractmethod
def parse(self, refresh=False):
if refresh:
self.reset_parser()
if self.parsed:
return self.parsed
# Parse self.inven_sources here
pass
| gpl-3.0 |
mdg/pygrate | test/migration_test.py | 1 | 5291 | import unittest
import os.path
import types
from pygration.migration import VersionNumber, Loader
import pygration
@pygration.step_class
class TestStep(object):
ADD_FILE = 'add.sql'
class StepTest(unittest.TestCase):
def test_class_decorator(self):
self.assertEqual("test.migration_test", TestStep.version)
self.assertEqual("TestStep", TestStep.step_name)
self.assertEqual("TestStep", TestStep.step_id)
class VersionComponentCompare(unittest.TestCase):
"""Test results for the component comparison function in Version."""
def test_numeric_comparison(self):
v = VersionNumber("v0")
self.assertTrue( v._component_compare("1","2") < 0 )
def test_numeric_comparison_double_digits(self):
"""Test that double digit numbers compare later than single digits."""
v = VersionNumber("v0")
self.assertTrue( v._component_compare("2","12") < 0 )
class VersionNumberTest(unittest.TestCase):
"""Tests for the pygration Version class."""
def test_underscore_is_pygration(self):
"""Check that v0_0_0 is reported as a pygration version."""
v = VersionNumber("v1_2_13")
self.assertTrue( v.is_pygration() )
self.assertEqual(v._component(0), "1")
self.assertEqual(v._component(1), "2")
self.assertEqual(v._component(2), "13")
def test_dash_is_pygration(self):
"""Check that v0-0-0 is reported as a pygration version."""
v = VersionNumber("v1-2-3")
self.assertTrue( v.is_pygration() )
self.assertEqual(v._component(0), "1")
self.assertEqual(v._component(1), "2")
self.assertEqual(v._component(2), "3")
def test_dot_is_pygration(self):
"""Check that v0.0.0 is reported as a pygration version."""
v = VersionNumber("v1.2.3")
self.assertTrue( v.is_pygration() )
self.assertEqual(v._component(0), "1")
self.assertEqual(v._component(1), "2")
self.assertEqual(v._component(2), "3")
def test_asdf_is_not_pygration(self):
"""Assert that asdf is reported as not a pygration version."""
v = VersionNumber("asdf")
self.assertFalse( v.is_pygration() )
def test_extended_version(self):
"""Test that a version with a sub-build number is compared later"""
v1 = VersionNumber("v1")
v2 = VersionNumber("v1-2")
self.assertTrue( cmp(v1, v2) < 0 )
self.assertTrue( cmp(v2, v1) > 0 )
def test_numeric_compare(self):
"""Test that a numeric version is compared as a number."""
v1 = VersionNumber("v1-2")
v2 = VersionNumber("v1-12")
self.assertTrue( cmp(v1, v2) < 0 )
self.assertTrue( cmp(v2, v1) > 0 )
def test_underscore_comparison(self):
v1 = VersionNumber("v0_1_2")
v2 = VersionNumber("v0_2_2")
self.assertTrue( cmp(v1, v2) < 0 )
self.assertTrue( cmp(v2, v1) > 0 )
def test_dash_comparison(self):
v1 = VersionNumber("v0-1-2")
v2 = VersionNumber("v0-2-2")
self.assertTrue( cmp(v1, v2) < 0 )
self.assertTrue( cmp(v2, v1) > 0 )
def test_dot_comparison(self):
v1 = VersionNumber("v0.1.2")
v2 = VersionNumber("v0.2.2")
self.assertTrue( cmp(v1, v2) < 0 )
self.assertTrue( cmp(v2, v1) > 0 )
def test_self_comparison(self):
v = VersionNumber("v0.1.2")
self.assertTrue( cmp(v, v) == 0 )
def test_equality_comparison(self):
vA = VersionNumber("v001")
vB = VersionNumber("v001")
self.assertTrue(vA == vB)
class MigrationSetTest(unittest.TestCase):
pass
class LoaderTest(unittest.TestCase):
def setUp( self ):
test_dir = os.path.join( os.path.dirname( __file__ ), "test1" )
self._loader = Loader(test_dir)
def test_find_versions(self):
v001 = VersionNumber('v001')
v002 = VersionNumber('v002')
v07 = VersionNumber('v0-7')
self._loader._find_files()
self.assertEqual([v07, v001, v002], self._loader._find_versions())
def test_load_migration_module(self):
self._loader._load_migration_module('v001')
m = self._loader._modules
self.assertEqual( 1, len(m) )
self.assertEqual( types.ModuleType, type(m[0]) )
class MigrationLoadTest(unittest.TestCase):
def setUp( self ):
self._test_dir = os.path.join( os.path.dirname( __file__ ), "test1" )
def test_load(self):
"""Test that the migration loader loads correctly."""
migset = pygration.migration.load(self._test_dir)
migs = migset.migrations()
self.assertEqual(3, len(migs))
self.assertEqual("v0-7", migs[0].version())
self.assertEqual("v001", migs[1].version())
self.assertEqual("v002", migs[2].version())
v07 = migs[0]
self.assertEqual(2, len(v07.steps()))
self.assertEqual("EmployeeTable", v07.step(0).step_name)
v001 = migs[1]
self.assertEqual(2, len(v001.steps()))
self.assertEqual("SalaryTable", v001.step(0).step_name)
self.assertEqual("EmployeeTable", v001.step(1).step_name)
v002 = migs[2]
self.assertEqual(1, len(v002.steps()))
self.assertEqual("AccountTable", v002.step(0).step_name)
| apache-2.0 |
guerrerocarlos/odoo | openerp/tools/import_email.py | 337 | 6376 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os, sys
import re
import smtplib
import email, mimetypes
from email.header import decode_header
from email.mime.text import MIMEText
import xmlrpclib
warn_msg = """
Bonjour,
Le message avec le sujet "%s" n'a pu รชtre archivรฉ dans l'ERP.
""".decode('utf-8')
class EmailParser(object):
def __init__(self, headers, dispatcher):
self.headers = headers
self.dispatcher = dispatcher
def parse(self, msg):
dispatcher((self.headers, msg))
class CommandDispatcher(object):
def __init__(self, receiver):
self.receiver = receiver
def __call__(self, request):
return self.receiver(request)
class RPCProxy(object):
def __init__(self, uid, passwd, host='localhost', port=8069, path='object'):
self.rpc = xmlrpclib.ServerProxy('http://%s:%s/%s' % (host, port, path))
self.user_id = uid
self.passwd = passwd
def __call__(self, request):
return self.rpc.execute(self.user_id, self.passwd, *request)
class ReceiverEmail2Event(object):
email_re = re.compile(r"""
([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part
@ # mandatory @ sign
[a-zA-Z0-9][\w\.-]* # domain must start with a letter
\.
[a-z]{2,3} # TLD
)
""", re.VERBOSE)
project_re = re.compile(r"^ *\[?(\d{4}\.?\d{0,3})\]?", re.UNICODE)
def __init__(self, rpc):
self.rpc = rpc
def get_addresses(self, headers, msg):
hcontent = ''
for header in [h for h in headers if msg.has_key(h)]:
hcontent += msg[header]
return self.email_re.findall(hcontent)
def get_partners(self, headers, msg):
alladdresses = self.get_addresses(headers, msg)
address_ids = self.rpc(('res.partner', 'search', [('email', 'in', alladdresses)]))
addresses = self.rpc(('res.partner', 'read', address_ids))
return [x['partner_id'][0] for x in addresses]
def __call__(self, request):
headers, msg = request
partners = self.get_partners(headers, msg)
subject = u''
for string, charset in decode_header(msg['Subject']):
if charset:
subject += string.decode(charset)
else:
subject += unicode(string)
if partners:
self.save_mail(msg, subject, partners)
else:
warning = MIMEText((warn_msg % (subject,)).encode('utf-8'), 'plain', 'utf-8')
warning['Subject'] = 'Message de OpenERP'
warning['From'] = '[email protected]'
warning['To'] = msg['From']
s = smtplib.SMTP()
s.connect()
s.sendmail('[email protected]', self.email_re.findall(msg['From']), warning.as_string())
s.close()
if msg.is_multipart():
for message in [m for m in msg.get_payload() if m.get_content_type() == 'message/rfc822']:
self((headers, message.get_payload()[0]))
def save_mail(self, msg, subject, partners):
counter, description = 1, u''
if msg.is_multipart():
for part in msg.get_payload():
stockdir = os.path.join('emails', msg['Message-Id'][1:-1])
newdir = os.path.join('/tmp', stockdir)
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_type())
if not ext:
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
if part.get_content_maintype() == 'multipart':
continue
elif part.get_content_maintype() == 'text':
if part.get_content_subtype() == 'plain':
description += part.get_payload(decode=1).decode(part.get_charsets()[0])
description += u'\n\nVous trouverez les รฉventuels fichiers dans le rรฉpertoire: %s' % stockdir
continue
else:
description += u'\n\nCe message est en "%s", vous trouverez ce texte dans le rรฉpertoire: %s' % (part.get_content_type(), stockdir)
elif part.get_content_type() == 'message/rfc822':
continue
if not os.path.isdir(newdir):
os.mkdir(newdir)
counter += 1
fd = file(os.path.join(newdir, filename), 'w')
fd.write(part.get_payload(decode=1))
fd.close()
else:
description = msg.get_payload(decode=1).decode(msg.get_charsets()[0])
project = self.project_re.search(subject)
if project:
project = project.groups()[0]
else:
project = ''
for partner in partners:
self.rpc(('res.partner.event', 'create', {'name' : subject, 'partner_id' : partner, 'description' : description, 'project' : project}))
if __name__ == '__main__':
rpc_dispatcher = CommandDispatcher(RPCProxy(4, 'admin'))
dispatcher = CommandDispatcher(ReceiverEmail2Event(rpc_dispatcher))
parser = EmailParser(['To', 'Cc', 'From'], dispatcher)
parser.parse(email.message_from_file(sys.stdin))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
iuliat/nova | nova/objects/host_mapping.py | 29 | 5690 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as ovo
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
from nova.objects import base
from nova.objects import cell_mapping
from nova.objects import fields
def _cell_id_in_updates(updates):
cell_mapping_obj = updates.pop("cell_mapping", None)
if cell_mapping_obj:
updates["cell_id"] = cell_mapping_obj.id
# NOTE(danms): Maintain Dict compatibility because of ovo bug 1474952
@base.NovaObjectRegistry.register
class HostMapping(base.NovaTimestampObject, base.NovaObject,
ovo.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'host': fields.StringField(),
'cell_mapping': fields.ObjectField('CellMapping'),
}
obj_relationships = {
'cell_mapping': [('1.0', '1.0')]
}
def _get_cell_mapping(self):
session = db_api.get_api_session()
with session.begin():
cell_map = (session.query(api_models.CellMapping)
.join(api_models.HostMapping)
.filter(api_models.HostMapping.host == self.host)
.first())
if cell_map is not None:
return cell_mapping.CellMapping._from_db_object(
self._context, cell_mapping.CellMapping(), cell_map)
def _load_cell_mapping(self):
self.cell_mapping = self._get_cell_mapping()
def obj_load_attr(self, attrname):
if attrname == 'cell_mapping':
self._load_cell_mapping()
@staticmethod
def _from_db_object(context, host_mapping, db_host_mapping):
for key in host_mapping.fields:
db_value = db_host_mapping.get(key)
if key == "cell_mapping":
# NOTE(dheeraj): If cell_mapping is stashed in db object
# we load it here. Otherwise, lazy loading will happen
# when .cell_mapping is accessd later
if not db_value:
continue
db_value = cell_mapping.CellMapping._from_db_object(
host_mapping._context, cell_mapping.CellMapping(),
db_value)
setattr(host_mapping, key, db_value)
host_mapping.obj_reset_changes()
host_mapping._context = context
return host_mapping
@staticmethod
def _get_by_host_from_db(context, host):
session = db_api.get_api_session()
with session.begin():
db_mapping = (session.query(api_models.HostMapping)
.join(api_models.CellMapping)
.with_entities(api_models.HostMapping,
api_models.CellMapping)
.filter(api_models.HostMapping.host == host)).first()
if not db_mapping:
raise exception.HostMappingNotFound(name=host)
host_mapping = db_mapping[0]
host_mapping["cell_mapping"] = db_mapping[1]
return host_mapping
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_mapping = cls._get_by_host_from_db(context, host)
return cls._from_db_object(context, cls(), db_mapping)
@staticmethod
def _create_in_db(context, updates):
session = db_api.get_api_session()
db_mapping = api_models.HostMapping()
db_mapping.update(updates)
db_mapping.save(session)
return db_mapping
@base.remotable
def create(self):
changes = self.obj_get_changes()
# cell_mapping must be mapped to cell_id for create
_cell_id_in_updates(changes)
db_mapping = self._create_in_db(self._context, changes)
self._from_db_object(self._context, self, db_mapping)
@staticmethod
def _save_in_db(context, obj, updates):
session = db_api.get_api_session()
with session.begin():
db_mapping = session.query(
api_models.HostMapping).filter_by(
id=obj.id).first()
if not db_mapping:
raise exception.HostMappingNotFound(name=obj.host)
db_mapping.update(updates)
return db_mapping
@base.remotable
def save(self):
changes = self.obj_get_changes()
# cell_mapping must be mapped to cell_id for updates
_cell_id_in_updates(changes)
db_mapping = self._save_in_db(self._context, self.host, changes)
self._from_db_object(self._context, self, db_mapping)
self.obj_reset_changes()
@staticmethod
def _destroy_in_db(context, host):
session = db_api.get_api_session()
with session.begin():
result = session.query(api_models.HostMapping).filter_by(
host=host).delete()
if not result:
raise exception.HostMappingNotFound(name=host)
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.host)
| apache-2.0 |
ThePirateWhoSmellsOfSunflowers/Empire | lib/modules/powershell/situational_awareness/network/smbscanner.py | 10 | 5259 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-SMBScanner',
'Author': ['@obscuresec', '@harmj0y'],
'Description': ('Tests a username/password combination across a number of machines.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://gist.github.com/obscuresec/df5f652c7e7088e2412c'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'CredID' : {
'Description' : 'CredID from the store to use.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Comma-separated hostnames to try username/password combinations against. Otherwise enumerate the domain for machines.',
'Required' : False,
'Value' : ''
},
'Password' : {
'Description' : 'Password to test.',
'Required' : True,
'Value' : ''
},
'UserName' : {
'Description' : '[domain\]username to test.',
'Required' : True,
'Value' : ''
},
'NoPing' : {
'Description' : 'Switch. Don\'t ping hosts before enumeration.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-SmbScanner.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode + "\n"
scriptEnd = ""
# if a credential ID is specified, try to parse
credID = self.options["CredID"]['Value']
if credID != "":
if not self.mainMenu.credentials.is_credential_valid(credID):
print helpers.color("[!] CredID is invalid!")
return ""
(credID, credType, domainName, userName, password, host, os, sid, notes) = self.mainMenu.credentials.get_credentials(credID)[0]
if domainName != "":
self.options["UserName"]['Value'] = str(domainName) + "\\" + str(userName)
else:
self.options["UserName"]['Value'] = str(userName)
if password != "":
self.options["Password"]['Value'] = password
if self.options["UserName"]['Value'] == "" or self.options["Password"]['Value'] == "":
print helpers.color("[!] Username and password must be specified.")
if (self.options['ComputerName']['Value'] != ''):
usernames = "\"" + "\",\"".join(self.options['ComputerName']['Value'].split(",")) + "\""
scriptEnd += usernames + " | "
scriptEnd += "Invoke-SMBScanner "
for option,values in self.options.iteritems():
if option.lower() != "agent" and option.lower() != "computername" and option.lower() != "credid":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " '" + str(values['Value']) + "'"
scriptEnd += "| Out-String | %{$_ + \"`n\"};"
scriptEnd += "'Invoke-SMBScanner completed'"
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| bsd-3-clause |
gram526/VTK | Rendering/Annotation/Testing/Python/bore.py | 20 | 2855 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create arc plots
# get the interactor ui
camera = vtk.vtkCamera()
# read the bore
bore = vtk.vtkPolyDataReader()
bore.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/bore.vtk")
tuber = vtk.vtkTubeFilter()
tuber.SetInputConnection(bore.GetOutputPort())
tuber.SetNumberOfSides(6)
tuber.SetRadius(15)
mapBore = vtk.vtkPolyDataMapper()
mapBore.SetInputConnection(tuber.GetOutputPort())
mapBore.ScalarVisibilityOff()
boreActor = vtk.vtkActor()
boreActor.SetMapper(mapBore)
boreActor.GetProperty().SetColor(0,0,0)
# create the arc plots
#
track1 = vtk.vtkPolyDataReader()
track1.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/track1.binary.vtk")
ap = vtk.vtkArcPlotter()
ap.SetInputConnection(track1.GetOutputPort())
ap.SetCamera(camera)
ap.SetRadius(250.0)
ap.SetHeight(200.0)
ap.UseDefaultNormalOn()
ap.SetDefaultNormal(1,1,0)
mapArc = vtk.vtkPolyDataMapper()
mapArc.SetInputConnection(ap.GetOutputPort())
arcActor = vtk.vtkActor()
arcActor.SetMapper(mapArc)
arcActor.GetProperty().SetColor(0,1,0)
track2 = vtk.vtkPolyDataReader()
track2.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/track2.binary.vtk")
ap2 = vtk.vtkArcPlotter()
ap2.SetInputConnection(track2.GetOutputPort())
ap2.SetCamera(camera)
ap2.SetRadius(450.0)
ap2.SetHeight(200.0)
ap2.UseDefaultNormalOn()
ap2.SetDefaultNormal(1,1,0)
mapArc2 = vtk.vtkPolyDataMapper()
mapArc2.SetInputConnection(ap2.GetOutputPort())
arcActor2 = vtk.vtkActor()
arcActor2.SetMapper(mapArc2)
arcActor2.GetProperty().SetColor(0,0,1)
track3 = vtk.vtkPolyDataReader()
track3.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/track3.binary.vtk")
ap3 = vtk.vtkArcPlotter()
ap3.SetInputConnection(track3.GetOutputPort())
ap3.SetCamera(camera)
ap3.SetRadius(250.0)
ap3.SetHeight(50.0)
ap3.SetDefaultNormal(1,1,0)
mapArc3 = vtk.vtkPolyDataMapper()
mapArc3.SetInputConnection(ap3.GetOutputPort())
arcActor3 = vtk.vtkActor()
arcActor3.SetMapper(mapArc3)
arcActor3.GetProperty().SetColor(1,0,1)
# Create graphics objects
# Create the rendering window renderer and interactive renderer
ren1 = vtk.vtkRenderer()
ren1.SetActiveCamera(camera)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer set the background and size
ren1.AddActor(boreActor)
ren1.AddActor(arcActor)
ren1.AddActor(arcActor2)
ren1.AddActor(arcActor3)
ren1.SetBackground(1,1,1)
renWin.SetSize(235,500)
camera.SetClippingRange(14144,32817)
camera.SetFocalPoint(-1023,680,5812)
camera.SetPosition(15551,-2426,19820)
camera.SetViewUp(-0.651889,-0.07576,0.754521)
camera.SetViewAngle(20)
renWin.Render()
# render the image
#
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause |
40223136/w17test2 | static/Brython3.1.3-20150514-095342/Lib/unittest/case.py | 743 | 48873 | """Test case implementation"""
import sys
import functools
import difflib
import pprint
import re
import warnings
import collections
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcomeForDoCleanups = None
self._testMethodDoc = 'No test'
try:
testMethod = getattr(self, methodName)
except AttributeError:
if methodName != 'runTest':
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
else:
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def _executeTestPart(self, function, outcome, isTest=False):
try:
function()
except KeyboardInterrupt:
raise
except SkipTest as e:
outcome.success = False
outcome.skipped = str(e)
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
outcome = _Outcome()
self._outcomeForDoCleanups = outcome
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, isTest=True)
self._executeTestPart(self.tearDown, outcome)
self.doCleanups()
if outcome.success:
result.addSuccess(self)
else:
if outcome.skipped is not None:
self._addSkip(result, outcome.skipped)
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, outcome.unexpectedSuccess)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcomeForDoCleanups or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
part = lambda: function(*args, **kwargs)
self._executeTestPart(part, outcome)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
is used as a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, str):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
warnings.warn('assertDictContainsSubset is deprecated',
DeprecationWarning)
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertCountEqual(self, first, second, msg=None):
"""An unordered sequence comparison asserting that the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(first), list(second)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, str, 'First argument is not a string')
self.assertIsInstance(second, str, 'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regexp.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertWarnsRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, bytes)):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
# see #9424
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| gpl-3.0 |
jtyuan/racetrack | src/arch/x86/isa/insts/general_purpose/flags/set_and_clear.py | 91 | 2816 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CLD {
ruflags t1
limm t2, "~((uint64_t)DFBit)", dataSize=8
and t1, t1, t2
wruflags t1, t0
};
def macroop STD {
ruflags t1
limm t2, "DFBit", dataSize=8
or t1, t1, t2
wruflags t1, t0
};
def macroop CLC {
ruflags t1
andi t2, t1, "CFBit"
wruflags t1, t2
};
def macroop STC {
ruflags t1
ori t1, t1, "CFBit"
wruflags t1, t0
};
def macroop CMC {
ruflags t1
wruflagsi t1, "CFBit"
};
def macroop STI {
rflags t1
limm t2, "IFBit", dataSize=8
or t1, t1, t2
wrflags t1, t0
};
def macroop CLI {
rflags t1
limm t2, "~IFBit", dataSize=8
and t1, t1, t2
wrflags t1, t0
};
'''
| bsd-3-clause |
ianmiell/OLD-shutitdist | inetutils/inetutils.py | 1 | 1466 | """ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class inetutils(ShutItModule):
def is_installed(self, shutit):
return shutit.file_exists('/root/shutit_build/module_record/' + self.module_id + '/built')
def build(self, shutit):
shutit.send('mkdir -p /tmp/build/inetutils')
shutit.send('cd /tmp/build/inetutils')
shutit.send('curl -L http://ftp.gnu.org/gnu/inetutils/inetutils-1.9.2.tar.gz | tar -zxf -')
shutit.send('cd inetutils*')
shutit.send('''echo '#define PATH_PROCNET_DEV "/proc/net/dev"' >> ifconfig/system/linux.h''')
shutit.send('./configure --prefix=/usr --localstatedir=/var --disable-logger --disable-whois --disable-servers')
shutit.send('make')
shutit.send('make install')
shutit.send('mv -v /usr/bin/{hostname,ping,ping6,traceroute} /bin')
shutit.send('mv -v /usr/bin/ifconfig /sbin')
shutit.send('./configure --prefix=/usr')
return True
#def get_config(self, shutit):
# shutit.get_config(self.module_id,'item','default')
# return True
#def check_ready(self, shutit):
# return True
#def start(self, shutit):
# return True
#def stop(self, shutit):
# return True
#def finalize(self, shutit):
# return True
#def remove(self, shutit):
# return True
#def test(self, shutit):
# return True
def module():
return inetutils(
'shutit.tk.sd.inetutils.inetutils', 158844782.0047,
description='',
maintainer='',
depends=['shutit.tk.sd.pkg_config.pkg_config']
)
| gpl-2.0 |
JackKelly/neuralnilm_prototype | scripts/experiment029.py | 2 | 3262 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import lasagne
from gen_data_029 import gen_data, N_BATCH, LENGTH
theano.config.compute_test_value = 'raise'
# Number of units in the hidden (recurrent) layer
N_HIDDEN = 5
# SGD learning rate
LEARNING_RATE = 1e-1
# Number of iterations to train the net
N_ITERATIONS = 200
# Generate a "validation" sequence whose cost we will periodically compute
X_val, y_val = gen_data()
n_features = X_val.shape[-1]
n_output = y_val.shape[-1]
assert X_val.shape == (N_BATCH, LENGTH, n_features)
assert y_val.shape == (N_BATCH, LENGTH, n_output)
# Construct LSTM RNN: One LSTM layer and one dense output layer
l_in = lasagne.layers.InputLayer(shape=(N_BATCH, LENGTH, n_features))
# setup fwd and bck LSTM layer.
l_fwd = lasagne.layers.LSTMLayer(
l_in, N_HIDDEN, backwards=False, learn_init=True, peepholes=True)
l_bck = lasagne.layers.LSTMLayer(
l_in, N_HIDDEN, backwards=True, learn_init=True, peepholes=True)
# concatenate forward and backward LSTM layers
l_fwd_reshape = lasagne.layers.ReshapeLayer(l_fwd, (N_BATCH*LENGTH, N_HIDDEN))
l_bck_reshape = lasagne.layers.ReshapeLayer(l_bck, (N_BATCH*LENGTH, N_HIDDEN))
l_concat = lasagne.layers.ConcatLayer([l_fwd_reshape, l_bck_reshape], axis=1)
l_recurrent_out = lasagne.layers.DenseLayer(
l_concat, num_units=n_output, nonlinearity=None)
l_out = lasagne.layers.ReshapeLayer(
l_recurrent_out, (N_BATCH, LENGTH, n_output))
input = T.tensor3('input')
target_output = T.tensor3('target_output')
# add test values
input.tag.test_value = np.random.rand(
*X_val.shape).astype(theano.config.floatX)
target_output.tag.test_value = np.random.rand(
*y_val.shape).astype(theano.config.floatX)
# Cost = mean squared error
cost = T.mean((l_out.get_output(input) - target_output)**2)
# Use NAG for training
all_params = lasagne.layers.get_all_params(l_out)
updates = lasagne.updates.nesterov_momentum(cost, all_params, LEARNING_RATE)
# Theano functions for training, getting output, and computing cost
train = theano.function([input, target_output],
cost, updates=updates, on_unused_input='warn',
allow_input_downcast=True)
y_pred = theano.function(
[input], l_out.get_output(input), on_unused_input='warn',
allow_input_downcast=True)
compute_cost = theano.function(
[input, target_output], cost, on_unused_input='warn',
allow_input_downcast=True)
# Train the net
def run_training():
costs = np.zeros(N_ITERATIONS)
for n in range(N_ITERATIONS):
X, y = gen_data()
# you should use your own training data mask instead of mask_val
costs[n] = train(X, y)
if not n % 10:
cost_val = compute_cost(X_val, y_val)
print "Iteration {} validation cost = {}".format(n, cost_val)
plt.plot(costs)
plt.xlabel('Iteration')
plt.ylabel('Cost')
plt.show()
def plot_estimates():
X, y = gen_data()
y_predictions = y_pred(X)
ax = plt.gca()
ax.plot(y_predictions[0,:,0], label='estimate')
ax.plot(y[0,:,0], label='ground truth')
# ax.plot(X[0,:,0], label='aggregate')
ax.legend()
plt.show()
run_training()
plot_estimates()
| mit |
Unow/edx-platform | common/lib/capa/capa/tests/test_correctmap.py | 61 | 5833 | """
Tests to verify that CorrectMap behaves correctly
"""
import unittest
from capa.correctmap import CorrectMap
import datetime
class CorrectMapTest(unittest.TestCase):
"""
Tests to verify that CorrectMap behaves correctly
"""
def setUp(self):
self.cmap = CorrectMap()
def test_set_input_properties(self):
# Set the correctmap properties for two inputs
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5,
msg='Test message',
hint='Test hint',
hintmode='always',
queuestate={
'key': 'secretstring',
'time': '20130228100026'
}
)
self.cmap.set(
answer_id='2_2_1',
correctness='incorrect',
npoints=None,
msg=None,
hint=None,
hintmode=None,
queuestate=None
)
# Assert that each input has the expected properties
self.assertTrue(self.cmap.is_correct('1_2_1'))
self.assertFalse(self.cmap.is_correct('2_2_1'))
self.assertEqual(self.cmap.get_correctness('1_2_1'), 'correct')
self.assertEqual(self.cmap.get_correctness('2_2_1'), 'incorrect')
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 0)
self.assertEqual(self.cmap.get_msg('1_2_1'), 'Test message')
self.assertEqual(self.cmap.get_msg('2_2_1'), None)
self.assertEqual(self.cmap.get_hint('1_2_1'), 'Test hint')
self.assertEqual(self.cmap.get_hint('2_2_1'), None)
self.assertEqual(self.cmap.get_hintmode('1_2_1'), 'always')
self.assertEqual(self.cmap.get_hintmode('2_2_1'), None)
self.assertTrue(self.cmap.is_queued('1_2_1'))
self.assertFalse(self.cmap.is_queued('2_2_1'))
self.assertEqual(self.cmap.get_queuetime_str('1_2_1'), '20130228100026')
self.assertEqual(self.cmap.get_queuetime_str('2_2_1'), None)
self.assertTrue(self.cmap.is_right_queuekey('1_2_1', 'secretstring'))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', 'invalidstr'))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', ''))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', None))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', 'secretstring'))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', 'invalidstr'))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', ''))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', None))
def test_get_npoints(self):
# Set the correctmap properties for 4 inputs
# 1) correct, 5 points
# 2) correct, None points
# 3) incorrect, 5 points
# 4) incorrect, None points
# 5) correct, 0 points
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5
)
self.cmap.set(
answer_id='2_2_1',
correctness='correct',
npoints=None
)
self.cmap.set(
answer_id='3_2_1',
correctness='incorrect',
npoints=5
)
self.cmap.set(
answer_id='4_2_1',
correctness='incorrect',
npoints=None
)
self.cmap.set(
answer_id='5_2_1',
correctness='correct',
npoints=0
)
# Assert that we get the expected points
# If points assigned --> npoints
# If no points assigned and correct --> 1 point
# If no points assigned and incorrect --> 0 points
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 1)
self.assertEqual(self.cmap.get_npoints('3_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('4_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('5_2_1'), 0)
def test_set_overall_message(self):
# Default is an empty string string
self.assertEqual(self.cmap.get_overall_message(), "")
# Set a message that applies to the whole question
self.cmap.set_overall_message("Test message")
# Retrieve the message
self.assertEqual(self.cmap.get_overall_message(), "Test message")
# Setting the message to None --> empty string
self.cmap.set_overall_message(None)
self.assertEqual(self.cmap.get_overall_message(), "")
def test_update_from_correctmap(self):
# Initialize a CorrectMap with some properties
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5,
msg='Test message',
hint='Test hint',
hintmode='always',
queuestate={
'key': 'secretstring',
'time': '20130228100026'
}
)
self.cmap.set_overall_message("Test message")
# Create a second cmap, then update it to have the same properties
# as the first cmap
other_cmap = CorrectMap()
other_cmap.update(self.cmap)
# Assert that it has all the same properties
self.assertEqual(
other_cmap.get_overall_message(),
self.cmap.get_overall_message()
)
self.assertEqual(
other_cmap.get_dict(),
self.cmap.get_dict()
)
def test_update_from_invalid(self):
# Should get an exception if we try to update() a CorrectMap
# with a non-CorrectMap value
invalid_list = [None, "string", 5, datetime.datetime.today()]
for invalid in invalid_list:
with self.assertRaises(Exception):
self.cmap.update(invalid)
| agpl-3.0 |
zmr/namsel | accuracy_test.py | 1 | 2139 | #encoding: utf-8
import cPickle as pickle
from classify import load_cls, label_chars
from cv2 import GaussianBlur
from feature_extraction import get_zernike_moments, get_hu_moments, \
extract_features, normalize_and_extract_features
from functools import partial
import glob
from multiprocessing.pool import Pool
import numpy as np
import os
from sklearn.externals import joblib
from sobel_features import sobel_features
from transitions import transition_features
from fast_utils import fnormalize, ftrim
cls = load_cls('logistic-cls')
# Load testing sets
print 'Loading test data'
tsets = pickle.load(open('datasets/testing/training_sets.pkl', 'rb'))
scaler = joblib.load('zernike_scaler-latest')
print 'importing classifier'
print cls.get_params()
print 'scoring ...'
keys = tsets.keys()
keys.sort()
all_samples = []
## Baseline accuracies for the data in tsets
baseline = [0.608, 0.5785123966942148, 0.4782608695652174, 0.7522123893805309,
0.6884057971014492, 0.5447154471544715, 0.9752066115702479,
0.9830508474576272]
def test_accuracy(t, clsf=None):
'''Get accuracy score for a testset t'''
if clsf:
cls = clsf
else:
global cls
y = tsets[t][:,0]
x = tsets[t][:,1:]
x3 = []
for j in x:
j = ftrim(j.reshape((32,16)).astype(np.uint8))
x3.append(normalize_and_extract_features(j))
pred = cls.predict(x3)
s = 0
for i, p in enumerate(pred):
if float(p) == y[i]:
s += 1.0
else:
pass
print 'correct', label_chars[y[i]], '||', label_chars[p], t #, max(cls.predict_proba(x3[i])[0])
score = s / len(y)
return score
def test_all(clsf=None):
'''Run accuracy tests for all testsets'''
print 'starting tests. this will take a moment'
test_accuracy(keys[0], clsf)
test_all = partial(test_accuracy, clsf=clsf)
p = Pool()
all_samples = p.map(test_all, keys)
for t, s in zip(keys, all_samples):
print t, s
return np.mean(all_samples)
if __name__ == '__main__':
print test_all()
| mit |
cdrooom/odoo | addons/account_payment/wizard/account_payment_order.py | 8 | 5838 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.tools.translate import _
class payment_order_create(osv.osv_memory):
"""
Create a payment object with lines corresponding to the account move line
to pay according to the date and the mode provided by the user.
Hypothesis:
- Small number of non-reconciled move line, payment mode and bank account type,
- Big number of partner and bank account.
If a type is given, unsuitable account Entry lines are ignored.
"""
_name = 'payment.order.create'
_description = 'payment.order.create'
_columns = {
'duedate': fields.date('Due Date', required=True),
'entries': fields.many2many('account.move.line', 'line_pay_rel', 'pay_id', 'line_id', 'Entries')
}
_defaults = {
'duedate': lambda *a: time.strftime('%Y-%m-%d'),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if not context: context = {}
res = super(payment_order_create, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
if context and 'line_ids' in context:
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='entries']")
for node in nodes:
node.set('domain', '[("id", "in", '+ str(context['line_ids'])+')]')
res['arch'] = etree.tostring(doc)
return res
def create_payment(self, cr, uid, ids, context=None):
order_obj = self.pool.get('payment.order')
line_obj = self.pool.get('account.move.line')
payment_obj = self.pool.get('payment.line')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
line_ids = [entry.id for entry in data.entries]
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
payment = order_obj.browse(cr, uid, context['active_id'], context=context)
t = None
line2bank = line_obj.line2bank(cr, uid, line_ids, t, context)
## Finally populate the current payment with new lines:
for line in line_obj.browse(cr, uid, line_ids, context=context):
if payment.date_prefered == "now":
#no payment date => immediate payment
date_to_pay = False
elif payment.date_prefered == 'due':
date_to_pay = line.date_maturity
elif payment.date_prefered == 'fixed':
date_to_pay = payment.date_scheduled
payment_obj.create(cr, uid,{
'move_line_id': line.id,
'amount_currency': line.amount_residual_currency,
'bank_id': line2bank.get(line.id),
'order_id': payment.id,
'partner_id': line.partner_id and line.partner_id.id or False,
'communication': line.ref or '/',
'state': line.invoice and line.invoice.reference_type != 'none' and 'structured' or 'normal',
'date': date_to_pay,
'currency': (line.invoice and line.invoice.currency_id.id) or line.journal_id.currency.id or line.journal_id.company_id.currency_id.id,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
def search_entries(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
search_due_date = data.duedate
# payment = self.pool.get('payment.order').browse(cr, uid, context['active_id'], context=context)
# Search for move line to pay:
domain = [('reconcile_id', '=', False), ('account_id.type', '=', 'payable'), ('credit', '>', 0), ('account_id.reconcile', '=', True)]
domain = domain + ['|', ('date_maturity', '<=', search_due_date), ('date_maturity', '=', False)]
line_ids = line_obj.search(cr, uid, domain, context=context)
context = dict(context, line_ids=line_ids)
model_data_ids = mod_obj.search(cr, uid,[('model', '=', 'ir.ui.view'), ('name', '=', 'view_create_payment_order_lines')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {'name': _('Entry Lines'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'payment.order.create',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
| agpl-3.0 |
jseabold/scipy | scipy/sparse/csc.py | 58 | 6330 | """Compressed Sparse Column matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
import numpy as np
from scipy._lib.six import xrange
from ._sparsetools import csc_tocsr
from . import _sparsetools
from .sputils import upcast, isintlike, IndexMixin, get_index_dtype
from .compressed import _cs_matrix
class csc_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
CSC format index array
indptr
CSC format index pointer array
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> csc_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 2, 2, 0, 1, 2])
>>> col = np.array([0, 0, 1, 2, 2, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
def transpose(self, copy=False):
from .csr import csr_matrix
M,N = self.shape
return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy)
def __iter__(self):
csr = self.tocsr()
for r in xrange(self.shape[0]):
yield csr[r,:]
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
def tocsr(self):
M,N = self.shape
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
def __getitem__(self, key):
# Use CSR to implement fancy indexing.
row, col = self._unpack_index(key)
# Things that return submatrices. row or col is a int or slice.
if (isinstance(row, slice) or isinstance(col, slice) or
isintlike(row) or isintlike(col)):
return self.T[col, row].T
# Things that return a sequence of values.
else:
return self.T[col, row]
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indptr.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Sort them to be in C-style order
ind = np.lexsort((col, row))
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
# we convert to CSR to maintain compatibility with old impl.
# in spmatrix.getrow()
return self._get_submatrix(i, slice(None)).tocsr()
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
return self._get_submatrix(slice(None), i)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self,x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[1],x[0])
def isspmatrix_csc(x):
return isinstance(x, csc_matrix)
| bsd-3-clause |
ktan2020/legacy-automation | win/Lib/test/test_os.py | 7 | 32181 | # As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import errno
import unittest
import warnings
import sys
import signal
import subprocess
import time
from test import test_support
import mmap
import uuid
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.exists(test_support.TESTFN):
os.unlink(test_support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(test_support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, "a")
@test_support.cpython_only
def test_rename(self):
path = unicode(test_support.TESTFN)
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
class TemporaryFileTests(unittest.TestCase):
def setUp(self):
self.files = []
os.mkdir(test_support.TESTFN)
def tearDown(self):
for name in self.files:
os.unlink(name)
os.rmdir(test_support.TESTFN)
def check_tempfile(self, name):
# make sure it doesn't already exist:
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
# make sure we can create the file
open(name, "w")
self.files.append(name)
def test_tempnam(self):
if not hasattr(os, "tempnam"):
return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
self.check_tempfile(os.tempnam())
name = os.tempnam(test_support.TESTFN)
self.check_tempfile(name)
name = os.tempnam(test_support.TESTFN, "pfx")
self.assertTrue(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
def test_tmpfile(self):
if not hasattr(os, "tmpfile"):
return
# As with test_tmpnam() below, the Windows implementation of tmpfile()
# attempts to create a file in the root directory of the current drive.
# On Vista and Server 2008, this test will always fail for normal users
# as writing to the root directory requires elevated privileges. With
# XP and below, the semantics of tmpfile() are the same, but the user
# running the test is more likely to have administrative privileges on
# their account already. If that's the case, then os.tmpfile() should
# work. In order to make this test as useful as possible, rather than
# trying to detect Windows versions or whether or not the user has the
# right permissions, just try and create a file in the root directory
# and see if it raises a 'Permission denied' OSError. If it does, then
# test that a subsequent call to os.tmpfile() raises the same error. If
# it doesn't, assume we're on XP or below and the user running the test
# has administrative privileges, and proceed with the test as normal.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
if sys.platform == 'win32':
name = '\\python_test_os_test_tmpfile.txt'
if os.path.exists(name):
os.remove(name)
try:
fp = open(name, 'w')
except IOError, first:
# open() failed, assert tmpfile() fails in the same way.
# Although open() raises an IOError and os.tmpfile() raises an
# OSError(), 'args' will be (13, 'Permission denied') in both
# cases.
try:
fp = os.tmpfile()
except OSError, second:
self.assertEqual(first.args, second.args)
else:
self.fail("expected os.tmpfile() to raise OSError")
return
else:
# open() worked, therefore, tmpfile() should work. Close our
# dummy file and proceed with the test as normal.
fp.close()
os.remove(name)
fp = os.tmpfile()
fp.write("foobar")
fp.seek(0,0)
s = fp.read()
fp.close()
self.assertTrue(s == "foobar")
def test_tmpnam(self):
if not hasattr(os, "tmpnam"):
return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tmpnam", DeprecationWarning)
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
self.fname = os.path.join(test_support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write("ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
def test_stat_attributes(self):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(self.fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
try:
result[200]
self.fail("No exception thrown")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.st_rdev = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(test_support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(test_support.TESTFN)
self.assertEqual(st2.st_mtime, int(st.st_mtime-delta))
# Restrict test to Win32, since there is no guarantee other
# systems support centiseconds
if sys.platform == 'win32':
def get_file_system(path):
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_string_buffer("", 100)
if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
if get_file_system(test_support.TESTFN) == "NTFS":
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_large_time(self):
t1 = 5000000000 # some day in 2128
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError, e:
if e.errno == 2: # file does not exist; cannot run test
return
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
def setUp(self):
self.__save = dict(os.environ)
os.environ.clear()
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
# Bug 1110478
def test_update2(self):
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
with os.popen("/bin/sh -c 'echo $HELLO'") as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(test_support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(test_support.TESTFN, "TEST2")
tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = file(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if hasattr(os, "symlink"):
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if hasattr(os, "symlink"):
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(test_support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(test_support.TESTFN)
class MakedirTests (unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
def test_makedir(self):
base = test_support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def tearDown(self):
path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != test_support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests (unittest.TestCase):
def test_devnull(self):
f = file(os.devnull, 'w')
f.write('hello')
f.close()
f = file(os.devnull, 'r')
self.assertEqual(f.read(), '')
f.close()
class URandomTests (unittest.TestCase):
def test_urandom(self):
try:
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
# see http://bugs.python.org/issue3708
self.assertRaises(TypeError, os.urandom, 0.9)
self.assertRaises(TypeError, os.urandom, 1.1)
self.assertRaises(TypeError, os.urandom, 2.0)
except NotImplementedError:
pass
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, test_support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_mkdir(self):
f = open(test_support.TESTFN, "w")
try:
self.assertRaises(WindowsError, os.mkdir, test_support.TESTFN)
finally:
f.close()
os.unlink(test_support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None)
def test_chmod(self):
self.assertRaises(WindowsError, os.chmod, test_support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(test_support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise a OSError with a bad file descriptor"
% f)
def test_isatty(self):
if hasattr(os, "isatty"):
self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
def test_closerange(self):
if hasattr(os, "closerange"):
fd = test_support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
def test_dup2(self):
if hasattr(os, "dup2"):
self.check(os.dup2, 20)
def test_fchmod(self):
if hasattr(os, "fchmod"):
self.check(os.fchmod, 0)
def test_fchown(self):
if hasattr(os, "fchown"):
self.check(os.fchown, -1, -1)
def test_fpathconf(self):
if hasattr(os, "fpathconf"):
self.check(os.fpathconf, "PC_NAME_MAX")
def test_ftruncate(self):
if hasattr(os, "ftruncate"):
self.check(os.ftruncate, 0)
def test_lseek(self):
if hasattr(os, "lseek"):
self.check(os.lseek, 0, 0)
def test_read(self):
if hasattr(os, "read"):
self.check(os.read, 1)
def test_tcsetpgrpt(self):
if hasattr(os, "tcsetpgrp"):
self.check(os.tcsetpgrp, 0)
def test_write(self):
if hasattr(os, "write"):
self.check(os.write, " ")
if sys.platform != 'win32':
class Win32ErrorTests(unittest.TestCase):
pass
class PosixUidGidTests(unittest.TestCase):
if hasattr(os, 'setuid'):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
if hasattr(os, 'setgid'):
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
if hasattr(os, 'seteuid'):
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
if hasattr(os, 'setegid'):
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
if hasattr(os, 'setreuid'):
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
if hasattr(os, 'setregid'):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
else:
class PosixUidGidTests(unittest.TestCase):
pass
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value)
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = '0'
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 20
while count < max and proc.poll() is None:
if m[0] == '1':
break
time.sleep(0.5)
count += 1
else:
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting CTRL+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle CTRL+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
def test_main():
test_support.run_unittest(
FileTests,
TemporaryFileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
Win32KillTests
)
if __name__ == "__main__":
test_main()
| mit |
bruderstein/PythonScript | PythonLib/full/unittest/test/testmock/testmock.py | 1 | 71837 | import copy
import re
import sys
import tempfile
from test.support import ALWAYS_EQ
import unittest
from unittest.test.testmock.support import is_instance
from unittest import mock
from unittest.mock import (
call, DEFAULT, patch, sentinel,
MagicMock, Mock, NonCallableMock,
NonCallableMagicMock, AsyncMock, _Call, _CallList,
create_autospec
)
class Iter(object):
def __init__(self):
self.thing = iter(['this', 'is', 'an', 'iter'])
def __iter__(self):
return self
def next(self):
return next(self.thing)
__next__ = next
class Something(object):
def meth(self, a, b, c, d=None): pass
@classmethod
def cmeth(cls, a, b, c, d=None): pass
@staticmethod
def smeth(a, b, c, d=None): pass
def something(a): pass
class MockTest(unittest.TestCase):
def test_all(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from unittest.mock import *")
def test_constructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(is_instance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock
self.assertNotIn('_items', mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._mock_parent,
"parent not initialised correctly")
self.assertIsNone(mock._mock_methods,
"methods not initialised correctly")
self.assertEqual(mock._mock_children, {},
"children not initialised incorrectly")
def test_return_value_in_constructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def test_change_return_value_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.return_value = 1
self.assertEqual(mock(), 1)
def test_change_side_effect_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.side_effect = TypeError()
with self.assertRaises(TypeError):
mock()
def test_repr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
for mock, name in mocks:
self.assertIn('%s.bar' % name, repr(mock.bar))
self.assertIn('%s.foo()' % name, repr(mock.foo()))
self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
self.assertIn('%s()' % name, repr(mock()))
self.assertIn('%s()()' % name, repr(mock()()))
self.assertIn('%s()().foo.bar.baz().bing' % name,
repr(mock()().foo.bar.baz().bing))
def test_repr_with_spec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def test_side_effect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
def test_autospec_side_effect(self):
# Test for issue17826
results = [1, 2, 3]
def effect():
return results.pop()
def f(): pass
mock = create_autospec(f)
mock.side_effect = [1, 2, 3]
self.assertEqual([mock(), mock(), mock()], [1, 2, 3],
"side effect not used correctly in create_autospec")
# Test where side effect is a callable
results = [1, 2, 3]
mock = create_autospec(f)
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"callable side effect not used correctly")
def test_autospec_side_effect_exception(self):
# Test for issue 23661
def f(): pass
mock = create_autospec(f)
mock.side_effect = ValueError('Bazinga!')
self.assertRaisesRegex(ValueError, 'Bazinga!', mock)
def test_reset_mock(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._mock_name, "child",
"name incorrectly reset")
self.assertEqual(mock._mock_parent, parent,
"parent incorrectly reset")
self.assertEqual(mock._mock_methods, spec,
"methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.mock_calls, [])
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._mock_children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def test_reset_mock_on_mock_open_issue_18622(self):
a = mock.mock_open()
a.reset_mock()
def test_call(self):
mock = Mock()
self.assertTrue(is_instance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incorrect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args.args, (sentinel.Arg,),
"call_args not set")
self.assertEqual(mock.call_args.kwargs, {},
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def test_call_args_comparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
self.assertEqual(mock.call_args.args, (sentinel.Arg,))
self.assertEqual(mock.call_args.kwargs, {"kw": sentinel.Kwarg})
# Comparing call_args to a long sequence should not raise
# an exception. See issue 24857.
self.assertFalse(mock.call_args == "a long sequence")
def test_calls_equal_with_any(self):
# Check that equality and non-equality is consistent even when
# comparing with mock.ANY
mm = mock.MagicMock()
self.assertTrue(mm == mm)
self.assertFalse(mm != mm)
self.assertFalse(mm == mock.MagicMock())
self.assertTrue(mm != mock.MagicMock())
self.assertTrue(mm == mock.ANY)
self.assertFalse(mm != mock.ANY)
self.assertTrue(mock.ANY == mm)
self.assertFalse(mock.ANY != mm)
self.assertTrue(mm == ALWAYS_EQ)
self.assertFalse(mm != ALWAYS_EQ)
call1 = mock.call(mock.MagicMock())
call2 = mock.call(mock.ANY)
self.assertTrue(call1 == call2)
self.assertFalse(call1 != call2)
self.assertTrue(call2 == call1)
self.assertFalse(call2 != call1)
self.assertTrue(call1 == ALWAYS_EQ)
self.assertFalse(call1 != ALWAYS_EQ)
self.assertFalse(call1 == 1)
self.assertTrue(call1 != 1)
def test_assert_called_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def test_assert_called_with_any(self):
m = MagicMock()
m(MagicMock())
m.assert_called_with(mock.ANY)
def test_assert_called_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_called_with_method_spec(self):
def _check(mock):
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
mock = Mock(spec=Something().meth)
_check(mock)
mock = Mock(spec=Something.cmeth)
_check(mock)
mock = Mock(spec=Something().cmeth)
_check(mock)
mock = Mock(spec=Something.smeth)
_check(mock)
mock = Mock(spec=Something().smeth)
_check(mock)
def test_assert_called_exception_message(self):
msg = "Expected '{0}' to have been called"
with self.assertRaisesRegex(AssertionError, msg.format('mock')):
Mock().assert_called()
with self.assertRaisesRegex(AssertionError, msg.format('test_name')):
Mock(name="test_name").assert_called()
def test_assert_called_once_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(
AssertionError,
lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
)
def test_assert_called_once_with_call_list(self):
m = Mock()
m(1)
m(2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1), call(2)]"),
lambda: m.assert_called_once_with(2))
def test_assert_called_once_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_once_with(1, 2, 3)
mock.assert_called_once_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_once_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
# Mock called more than once => always fails
mock(4, 5, 6)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, 2, 3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
4, 5, 6)
def test_attribute_access_returns_mocks(self):
mock = Mock()
something = mock.something
self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def test_attributes_have_name_and_parent_set(self):
mock = Mock()
something = mock.something
self.assertEqual(something._mock_name, "something",
"attribute name not set correctly")
self.assertEqual(something._mock_parent, mock,
"attribute parent not set correctly")
def test_method_calls_recorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def test_method_calls_compare_easily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls,
[('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def test_only_allowed_methods_exist(self):
for spec in ['something'], ('something',):
for arg in 'spec', 'spec_set':
mock = Mock(**{arg: spec})
# this should be allowed
mock.something
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'something_else'",
getattr, mock, 'something_else'
)
def test_from_spec(self):
class Something(object):
x = 3
__something__ = None
def y(self): pass
def test_attributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'z'",
getattr, mock, 'z'
)
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute '__foobar__'",
getattr, mock, '__foobar__'
)
test_attributes(Mock(spec=Something))
test_attributes(Mock(spec=Something()))
def test_wraps_calls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def test_wraps_prevents_automatic_creation_of_mocks(self):
class Real(object):
pass
real = Real()
mock = Mock(wraps=real)
self.assertRaises(AttributeError, lambda: mock.new_attr())
def test_wraps_call_with_nondefault_return_value(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def test_wraps_attributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def test_customize_wrapped_object_with_side_effect_iterable_with_default(self):
class Real(object):
def method(self):
return sentinel.ORIGINAL_VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.ORIGINAL_VALUE)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_iterable(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_exception(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = RuntimeError
self.assertRaises(RuntimeError, mock.method)
def test_customize_wrapped_object_with_side_effect_function(self):
class Real(object):
def method(self): pass
def side_effect():
return sentinel.VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = side_effect
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect(self):
# side_effect should always take precedence over return_value.
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
mock.method.return_value = sentinel.WRONG_VALUE
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_return_value_and_side_effect2(self):
# side_effect can return DEFAULT to default to return_value
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = lambda: DEFAULT
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect_default(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
mock.method.return_value = sentinel.RETURN
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.RETURN)
self.assertRaises(StopIteration, mock.method)
def test_magic_method_wraps_dict(self):
# bpo-25597: MagicMock with wrap doesn't call wrapped object's
# method for magic methods with default values.
data = {'foo': 'bar'}
wrapped_dict = MagicMock(wraps=data)
self.assertEqual(wrapped_dict.get('foo'), 'bar')
# Accessing key gives a MagicMock
self.assertIsInstance(wrapped_dict['foo'], MagicMock)
# __contains__ method has a default value of False
self.assertFalse('foo' in wrapped_dict)
# return_value is non-sentinel and takes precedence over wrapped value.
wrapped_dict.get.return_value = 'return_value'
self.assertEqual(wrapped_dict.get('foo'), 'return_value')
# return_value is sentinel and hence wrapped value is returned.
wrapped_dict.get.return_value = sentinel.DEFAULT
self.assertEqual(wrapped_dict.get('foo'), 'bar')
self.assertEqual(wrapped_dict.get('baz'), None)
self.assertIsInstance(wrapped_dict['baz'], MagicMock)
self.assertFalse('bar' in wrapped_dict)
data['baz'] = 'spam'
self.assertEqual(wrapped_dict.get('baz'), 'spam')
self.assertIsInstance(wrapped_dict['baz'], MagicMock)
self.assertFalse('bar' in wrapped_dict)
del data['baz']
self.assertEqual(wrapped_dict.get('baz'), None)
def test_magic_method_wraps_class(self):
class Foo:
def __getitem__(self, index):
return index
def __custom_method__(self):
return "foo"
klass = MagicMock(wraps=Foo)
obj = klass()
self.assertEqual(obj.__getitem__(2), 2)
self.assertEqual(obj[2], 2)
self.assertEqual(obj.__custom_method__(), "foo")
def test_exceptional_side_effect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def test_baseexceptional_side_effect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def test_assert_called_with_message(self):
mock = Mock()
self.assertRaisesRegex(AssertionError, 'not called',
mock.assert_called_with)
def test_assert_called_once_with_message(self):
mock = Mock(name='geoffrey')
self.assertRaisesRegex(AssertionError,
r"Expected 'geoffrey' to be called once\.",
mock.assert_called_once_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def test_spec_list_subclass(self):
class Sub(list):
pass
mock = Mock(spec=Sub(['foo']))
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock, 'foo')
def test_spec_class(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_spec_class_no_object_base(self):
class X:
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_setting_attribute_with_spec_set(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def test_copy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
def test_subclass_with_properties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
def test_setting_call(self):
mock = Mock()
def __call__(self, a):
self._increment_mock_call(a)
return self._mock_call(a)
type(mock).__call__ = __call__
mock('one')
mock.assert_called_with('one')
self.assertRaises(TypeError, mock, 'one', 'two')
def test_dir(self):
mock = Mock()
attrs = set(dir(mock))
type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
# all public attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
# creates these attributes
mock.a, mock.b
self.assertIn('a', dir(mock))
self.assertIn('b', dir(mock))
# instance attributes
mock.c = mock.d = None
self.assertIn('c', dir(mock))
self.assertIn('d', dir(mock))
# magic methods
mock.__iter__ = lambda s: iter([])
self.assertIn('__iter__', dir(mock))
def test_dir_from_spec(self):
mock = Mock(spec=unittest.TestCase)
testcase_attrs = set(dir(unittest.TestCase))
attrs = set(dir(mock))
# all attributes from the spec are included
self.assertEqual(set(), testcase_attrs - attrs)
# shadow a sys attribute
mock.version = 3
self.assertEqual(dir(mock).count('version'), 1)
def test_filter_dir(self):
patcher = patch.object(mock, 'FILTER_DIR', False)
patcher.start()
try:
attrs = set(dir(Mock()))
type_attrs = set(dir(Mock))
# ALL attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
finally:
patcher.stop()
def test_dir_does_not_include_deleted_attributes(self):
mock = Mock()
mock.child.return_value = 1
self.assertIn('child', dir(mock))
del mock.child
self.assertNotIn('child', dir(mock))
def test_configure_mock(self):
mock = Mock(foo='bar')
self.assertEqual(mock.foo, 'bar')
mock = MagicMock(foo='bar')
self.assertEqual(mock.foo, 'bar')
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
mock = Mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
mock = Mock()
mock.configure_mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
# needed because assertRaisesRegex doesn't work easily with newlines
with self.assertRaises(exception) as context:
func(*args, **kwargs)
msg = str(context.exception)
self.assertEqual(msg, message)
def test_assert_called_with_failure_message(self):
mock = NonCallableMock()
actual = 'not called.'
expected = "mock(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
mock.assert_called_with, 1, '2', 3, bar='foo'
)
mock.foo(1, '2', 3, foo='foo')
asserters = [
mock.foo.assert_called_with, mock.foo.assert_called_once_with
]
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, '2', 3, bar='foo'
)
# just kwargs
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, bar='foo'
)
# just args
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, 2, 3)"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, 2, 3
)
# empty
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo()"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual), meth
)
def test_mock_calls(self):
mock = MagicMock()
# need to do this because MagicMock.mock_calls used to just return
# a MagicMock which also returned a MagicMock when __eq__ was called
self.assertIs(mock.mock_calls == [], True)
mock = MagicMock()
mock()
expected = [('', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock.foo()
expected.append(call.foo())
self.assertEqual(mock.mock_calls, expected)
# intermediate mock_calls work too
self.assertEqual(mock.foo.mock_calls, [('', (), {})])
mock = MagicMock()
mock().foo(1, 2, 3, a=4, b=5)
expected = [
('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.return_value.foo.mock_calls,
[('', (1, 2, 3), dict(a=4, b=5))])
self.assertEqual(mock.return_value.mock_calls,
[('foo', (1, 2, 3), dict(a=4, b=5))])
mock = MagicMock()
mock().foo.bar().baz()
expected = [
('', (), {}), ('().foo.bar', (), {}),
('().foo.bar().baz', (), {})
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().call_list())
for kwargs in dict(), dict(name='bar'):
mock = MagicMock(**kwargs)
int(mock.foo)
expected = [('foo.__int__', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock = MagicMock(**kwargs)
mock.a()()
expected = [('a', (), {}), ('a()', (), {})]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.a().mock_calls, [call()])
mock = MagicMock(**kwargs)
mock(1)(2)(3)
self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
self.assertEqual(mock().mock_calls, call(2)(3).call_list())
self.assertEqual(mock()().mock_calls, call(3).call_list())
mock = MagicMock(**kwargs)
mock(1)(2)(3).a.b.c(4)
self.assertEqual(mock.mock_calls,
call(1)(2)(3).a.b.c(4).call_list())
self.assertEqual(mock().mock_calls,
call(2)(3).a.b.c(4).call_list())
self.assertEqual(mock()().mock_calls,
call(3).a.b.c(4).call_list())
mock = MagicMock(**kwargs)
int(mock().foo.bar().baz())
last_call = ('().foo.bar().baz().__int__', (), {})
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().__int__().call_list())
self.assertEqual(mock().foo.bar().mock_calls,
call.baz().__int__().call_list())
self.assertEqual(mock().foo.bar().baz.mock_calls,
call().__int__().call_list())
def test_child_mock_call_equal(self):
m = Mock()
result = m()
result.wibble()
# parent looks like this:
self.assertEqual(m.mock_calls, [call(), call().wibble()])
# but child should look like this:
self.assertEqual(result.mock_calls, [call.wibble()])
def test_mock_call_not_equal_leaf(self):
m = Mock()
m.foo().something()
self.assertNotEqual(m.mock_calls[1], call.foo().different())
self.assertEqual(m.mock_calls[0], call.foo())
def test_mock_call_not_equal_non_leaf(self):
m = Mock()
m.foo().bar()
self.assertNotEqual(m.mock_calls[1], call.baz().bar())
self.assertNotEqual(m.mock_calls[0], call.baz())
def test_mock_call_not_equal_non_leaf_params_different(self):
m = Mock()
m.foo(x=1).bar()
# This isn't ideal, but there's no way to fix it without breaking backwards compatibility:
self.assertEqual(m.mock_calls[1], call.foo(x=2).bar())
def test_mock_call_not_equal_non_leaf_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.baz.bar())
def test_mock_call_not_equal_non_leaf_call_versus_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.foo().bar())
def test_mock_call_repr(self):
m = Mock()
m.foo().bar().baz.bob()
self.assertEqual(repr(m.mock_calls[0]), 'call.foo()')
self.assertEqual(repr(m.mock_calls[1]), 'call.foo().bar()')
self.assertEqual(repr(m.mock_calls[2]), 'call.foo().bar().baz.bob()')
def test_mock_call_repr_loop(self):
m = Mock()
m.foo = m
repr(m.foo())
self.assertRegex(repr(m.foo()), r"<Mock name='mock\(\)' id='\d+'>")
def test_mock_calls_contains(self):
m = Mock()
self.assertFalse([call()] in m.mock_calls)
def test_subclassing(self):
class Subclass(Mock):
pass
mock = Subclass()
self.assertIsInstance(mock.foo, Subclass)
self.assertIsInstance(mock(), Subclass)
class Subclass(Mock):
def _get_child_mock(self, **kwargs):
return Mock(**kwargs)
mock = Subclass()
self.assertNotIsInstance(mock.foo, Subclass)
self.assertNotIsInstance(mock(), Subclass)
def test_arg_lists(self):
mocks = [
Mock(),
MagicMock(),
NonCallableMock(),
NonCallableMagicMock()
]
def assert_attrs(mock):
names = 'call_args_list', 'method_calls', 'mock_calls'
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
for mock in mocks:
assert_attrs(mock)
if callable(mock):
mock()
mock(1, 2)
mock(a=3)
mock.reset_mock()
assert_attrs(mock)
mock.foo()
mock.foo.bar(1, a=3)
mock.foo(1).bar().baz(3)
mock.reset_mock()
assert_attrs(mock)
def test_call_args_two_tuple(self):
mock = Mock()
mock(1, a=3)
mock(2, b=4)
self.assertEqual(len(mock.call_args), 2)
self.assertEqual(mock.call_args.args, (2,))
self.assertEqual(mock.call_args.kwargs, dict(b=4))
expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
for expected, call_args in zip(expected_list, mock.call_args_list):
self.assertEqual(len(call_args), 2)
self.assertEqual(expected[0], call_args[0])
self.assertEqual(expected[1], call_args[1])
def test_side_effect_iterator(self):
mock = Mock(side_effect=iter([1, 2, 3]))
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
mock = MagicMock(side_effect=['a', 'b', 'c'])
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
mock = Mock(side_effect='ghi')
self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
self.assertRaises(StopIteration, mock)
class Foo(object):
pass
mock = MagicMock(side_effect=Foo)
self.assertIsInstance(mock(), Foo)
mock = Mock(side_effect=Iter())
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
def test_side_effect_iterator_exceptions(self):
for Klass in Mock, MagicMock:
iterable = (ValueError, 3, KeyError, 6)
m = Klass(side_effect=iterable)
self.assertRaises(ValueError, m)
self.assertEqual(m(), 3)
self.assertRaises(KeyError, m)
self.assertEqual(m(), 6)
def test_side_effect_setting_iterator(self):
mock = Mock()
mock.side_effect = iter([1, 2, 3])
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
mock.side_effect = ['a', 'b', 'c']
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
this_iter = Iter()
mock.side_effect = this_iter
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
self.assertIs(mock.side_effect, this_iter)
def test_side_effect_iterator_default(self):
mock = Mock(return_value=2)
mock.side_effect = iter([1, DEFAULT])
self.assertEqual([mock(), mock()], [1, 2])
def test_assert_has_calls_any_order(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(b=6)
kalls = [
call(1, 2), ({'a': 3},),
((3, 4),), ((), {'a': 3}),
('', (1, 2)), ('', {'a': 3}),
('', (1, 2), {}), ('', (), {'a': 3})
]
for kall in kalls:
mock.assert_has_calls([kall], any_order=True)
for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
self.assertRaises(
AssertionError, mock.assert_has_calls,
[kall], any_order=True
)
kall_lists = [
[call(1, 2), call(b=6)],
[call(3, 4), call(1, 2)],
[call(b=6), call(b=6)],
]
for kall_list in kall_lists:
mock.assert_has_calls(kall_list, any_order=True)
kall_lists = [
[call(b=6), call(b=6), call(b=6)],
[call(1, 2), call(1, 2)],
[call(3, 4), call(1, 2), call(5, 7)],
[call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
]
for kall_list in kall_lists:
self.assertRaises(
AssertionError, mock.assert_has_calls,
kall_list, any_order=True
)
def test_assert_has_calls(self):
kalls1 = [
call(1, 2), ({'a': 3},),
((3, 4),), call(b=6),
('', (1,), {'b': 6}),
]
kalls2 = [call.foo(), call.bar(1)]
kalls2.extend(call.spam().baz(a=3).call_list())
kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
mocks = []
for mock in Mock(), MagicMock():
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(1, b=6)
mocks.append((mock, kalls1))
mock = Mock()
mock.foo()
mock.bar(1)
mock.spam().baz(a=3)
mock.bam(set(), foo={}).fish([1])
mocks.append((mock, kalls2))
for mock, kalls in mocks:
for i in range(len(kalls)):
for step in 1, 2, 3:
these = kalls[i:i+step]
mock.assert_has_calls(these)
if len(these) > 1:
self.assertRaises(
AssertionError,
mock.assert_has_calls,
list(reversed(these))
)
def test_assert_has_calls_nested_spec(self):
class Something:
def __init__(self): pass
def meth(self, a, b, c, d=None): pass
class Foo:
def __init__(self, a): pass
def meth1(self, a, b): pass
mock_class = create_autospec(Something)
for m in [mock_class, mock_class()]:
m.meth(1, 2, 3, d=1)
m.assert_has_calls([call.meth(1, 2, 3, d=1)])
m.assert_has_calls([call.meth(1, 2, 3, 1)])
mock_class.reset_mock()
for m in [mock_class, mock_class()]:
self.assertRaises(AssertionError, m.assert_has_calls, [call.Foo()])
m.Foo(1).meth1(1, 2)
m.assert_has_calls([call.Foo(1), call.Foo(1).meth1(1, 2)])
m.Foo.assert_has_calls([call(1), call().meth1(1, 2)])
mock_class.reset_mock()
invalid_calls = [call.meth(1),
call.non_existent(1),
call.Foo().non_existent(1),
call.Foo().meth(1, 2, 3, 4)]
for kall in invalid_calls:
self.assertRaises(AssertionError,
mock_class.assert_has_calls,
[kall]
)
def test_assert_has_calls_nested_without_spec(self):
m = MagicMock()
m().foo().bar().baz()
m.one().two().three()
calls = call.one().two().three().call_list()
m.assert_has_calls(calls)
def test_assert_has_calls_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock(10, 11, c=12)
calls = [
('', (1, 2, 3), {}),
('', (4, 5, 6), {'d': 7}),
((10, 11, 12), {}),
]
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
# Reversed order
calls = list(reversed(calls))
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
def test_assert_has_calls_not_matching_spec_error(self):
def f(x=None): pass
mock = Mock(spec=f)
mock(1)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape('Calls not found.\n'
'Expected: [call()]\n'
'Actual: [call(1)]'))) as cm:
mock.assert_has_calls([call()])
self.assertIsNone(cm.exception.__cause__)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape(
'Error processing expected calls.\n'
"Errors: [None, TypeError('too many positional arguments')]\n"
"Expected: [call(), call(1, 2)]\n"
'Actual: [call(1)]'))) as cm:
mock.assert_has_calls([call(), call(1, 2)])
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_any_call(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(1, b=6)
mock.assert_any_call(1, 2)
mock.assert_any_call(a=3)
mock.assert_any_call(1, b=6)
self.assertRaises(
AssertionError,
mock.assert_any_call
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
1, 3
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
a=4
)
def test_assert_any_call_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock.assert_any_call(1, 2, 3)
mock.assert_any_call(a=1, b=2, c=3)
mock.assert_any_call(4, 5, 6, 7)
mock.assert_any_call(a=4, b=5, c=6, d=7)
self.assertRaises(AssertionError, mock.assert_any_call,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_any_call(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_mock_calls_create_autospec(self):
def f(a, b): pass
obj = Iter()
obj.f = f
funcs = [
create_autospec(f),
create_autospec(obj).f
]
for func in funcs:
func(1, 2)
func(3, 4)
self.assertEqual(
func.mock_calls, [call(1, 2), call(3, 4)]
)
#Issue21222
def test_create_autospec_with_name(self):
m = mock.create_autospec(object(), name='sweet_func')
self.assertIn('sweet_func', repr(m))
#Issue23078
def test_create_autospec_classmethod_and_staticmethod(self):
class TestClass:
@classmethod
def class_method(cls): pass
@staticmethod
def static_method(): pass
for method in ('class_method', 'static_method'):
with self.subTest(method=method):
mock_method = mock.create_autospec(getattr(TestClass, method))
mock_method()
mock_method.assert_called_once_with()
self.assertRaises(TypeError, mock_method, 'extra_arg')
#Issue21238
def test_mock_unsafe(self):
m = Mock()
msg = "Attributes cannot start with 'assert' or 'assret'"
with self.assertRaisesRegex(AttributeError, msg):
m.assert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.assret_foo_call()
m = Mock(unsafe=True)
m.assert_foo_call()
m.assret_foo_call()
#Issue21262
def test_assert_not_called(self):
m = Mock()
m.hello.assert_not_called()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_not_called()
def test_assert_not_called_message(self):
m = Mock()
m(1, 2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2)]"),
m.assert_not_called)
def test_assert_called(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called()
m.hello()
m.hello.assert_called()
m.hello()
m.hello.assert_called()
def test_assert_called_once(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
m.hello()
m.hello.assert_called_once()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
def test_assert_called_once_message(self):
m = Mock()
m(1, 2)
m(3)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2), call(3)]"),
m.assert_called_once)
def test_assert_called_once_message_not_called(self):
m = Mock()
with self.assertRaises(AssertionError) as e:
m.assert_called_once()
self.assertNotIn("Calls:", str(e.exception))
#Issue37212 printout of keyword args now preserves the original order
def test_ordered_call_signature(self):
m = Mock()
m.hello(name='hello', daddy='hero')
text = "call(name='hello', daddy='hero')"
self.assertEqual(repr(m.hello.call_args), text)
#Issue21270 overrides tuple methods for mock.call objects
def test_override_tuple_methods(self):
c = call.count()
i = call.index(132,'hello')
m = Mock()
m.count()
m.index(132,"hello")
self.assertEqual(m.method_calls[0], c)
self.assertEqual(m.method_calls[1], i)
def test_reset_return_sideeffect(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True, side_effect=True)
self.assertIsInstance(m.return_value, Mock)
self.assertEqual(m.side_effect, None)
def test_reset_return(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True)
self.assertIsInstance(m.return_value, Mock)
self.assertNotEqual(m.side_effect, None)
def test_reset_sideeffect(self):
m = Mock(return_value=10, side_effect=[2, 3])
m.reset_mock(side_effect=True)
self.assertEqual(m.return_value, 10)
self.assertEqual(m.side_effect, None)
def test_reset_return_with_children(self):
m = MagicMock(f=MagicMock(return_value=1))
self.assertEqual(m.f(), 1)
m.reset_mock(return_value=True)
self.assertNotEqual(m.f(), 1)
def test_reset_return_with_children_side_effect(self):
m = MagicMock(f=MagicMock(side_effect=[2, 3]))
self.assertNotEqual(m.f.side_effect, None)
m.reset_mock(side_effect=True)
self.assertEqual(m.f.side_effect, None)
def test_mock_add_spec(self):
class _One(object):
one = 1
class _Two(object):
two = 2
class Anything(object):
one = two = three = 'four'
klasses = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
]
for Klass in list(klasses):
klasses.append(lambda K=Klass: K(spec=Anything))
klasses.append(lambda K=Klass: K(spec_set=Anything))
for Klass in klasses:
for kwargs in dict(), dict(spec_set=True):
mock = Klass()
#no error
mock.one, mock.two, mock.three
for One, Two in [(_One, _Two), (['one'], ['two'])]:
for kwargs in dict(), dict(spec_set=True):
mock.mock_add_spec(One, **kwargs)
mock.one
self.assertRaises(
AttributeError, getattr, mock, 'two'
)
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
mock.mock_add_spec(Two, **kwargs)
self.assertRaises(
AttributeError, getattr, mock, 'one'
)
mock.two
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
# note that creating a mock, setting an instance attribute, and
# *then* setting a spec doesn't work. Not the intended use case
def test_mock_add_spec_magic_methods(self):
for Klass in MagicMock, NonCallableMagicMock:
mock = Klass()
int(mock)
mock.mock_add_spec(object)
self.assertRaises(TypeError, int, mock)
mock = Klass()
mock['foo']
mock.__int__.return_value =4
mock.mock_add_spec(int)
self.assertEqual(int(mock), 4)
self.assertRaises(TypeError, lambda: mock['foo'])
def test_adding_child_mock(self):
for Klass in (NonCallableMock, Mock, MagicMock, NonCallableMagicMock,
AsyncMock):
mock = Klass()
mock.foo = Mock()
mock.foo()
self.assertEqual(mock.method_calls, [call.foo()])
self.assertEqual(mock.mock_calls, [call.foo()])
mock = Klass()
mock.bar = Mock(name='name')
mock.bar()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
# mock with an existing _new_parent but no name
mock = Klass()
mock.baz = MagicMock()()
mock.baz()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
def test_adding_return_value_mock(self):
for Klass in Mock, MagicMock:
mock = Klass()
mock.return_value = MagicMock()
mock()()
self.assertEqual(mock.mock_calls, [call(), call()()])
def test_manager_mock(self):
class Foo(object):
one = 'one'
two = 'two'
manager = Mock()
p1 = patch.object(Foo, 'one')
p2 = patch.object(Foo, 'two')
mock_one = p1.start()
self.addCleanup(p1.stop)
mock_two = p2.start()
self.addCleanup(p2.stop)
manager.attach_mock(mock_one, 'one')
manager.attach_mock(mock_two, 'two')
Foo.two()
Foo.one()
self.assertEqual(manager.mock_calls, [call.two(), call.one()])
def test_magic_methods_mock_calls(self):
for Klass in Mock, MagicMock:
m = Klass()
m.__int__ = Mock(return_value=3)
m.__float__ = MagicMock(return_value=3.0)
int(m)
float(m)
self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
self.assertEqual(m.method_calls, [])
def test_mock_open_reuse_issue_21750(self):
mocked_open = mock.mock_open(read_data='data')
f1 = mocked_open('a-name')
f1_data = f1.read()
f2 = mocked_open('another-name')
f2_data = f2.read()
self.assertEqual(f1_data, f2_data)
def test_mock_open_dunder_iter_issue(self):
# Test dunder_iter method generates the expected result and
# consumes the iterator.
mocked_open = mock.mock_open(read_data='Remarkable\nNorwegian Blue')
f1 = mocked_open('a-name')
lines = [line for line in f1]
self.assertEqual(lines[0], 'Remarkable\n')
self.assertEqual(lines[1], 'Norwegian Blue')
self.assertEqual(list(f1), [])
def test_mock_open_using_next(self):
mocked_open = mock.mock_open(read_data='1st line\n2nd line\n3rd line')
f1 = mocked_open('a-name')
line1 = next(f1)
line2 = f1.__next__()
lines = [line for line in f1]
self.assertEqual(line1, '1st line\n')
self.assertEqual(line2, '2nd line\n')
self.assertEqual(lines[0], '3rd line')
self.assertEqual(list(f1), [])
with self.assertRaises(StopIteration):
next(f1)
def test_mock_open_next_with_readline_with_return_value(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.return_value = 'abc'
self.assertEqual('abc', next(mopen()))
def test_mock_open_write(self):
# Test exception in file writing write()
mock_namedtemp = mock.mock_open(mock.MagicMock(name='JLV'))
with mock.patch('tempfile.NamedTemporaryFile', mock_namedtemp):
mock_filehandle = mock_namedtemp.return_value
mock_write = mock_filehandle.write
mock_write.side_effect = OSError('Test 2 Error')
def attempt():
tempfile.NamedTemporaryFile().write('asd')
self.assertRaises(OSError, attempt)
def test_mock_open_alter_readline(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.side_effect = lambda *args:'abc'
first = mopen().readline()
second = mopen().readline()
self.assertEqual('abc', first)
self.assertEqual('abc', second)
def test_mock_open_after_eof(self):
# read, readline and readlines should work after end of file.
_open = mock.mock_open(read_data='foo')
h = _open('bar')
h.read()
self.assertEqual('', h.read())
self.assertEqual('', h.read())
self.assertEqual('', h.readline())
self.assertEqual('', h.readline())
self.assertEqual([], h.readlines())
self.assertEqual([], h.readlines())
def test_mock_parents(self):
for Klass in Mock, MagicMock:
m = Klass()
original_repr = repr(m)
m.return_value = m
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m.reset_mock()
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m = Klass()
m.b = m.a
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m.reset_mock()
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m = Klass()
original_repr = repr(m)
m.a = m()
m.a.return_value = m
self.assertEqual(repr(m), original_repr)
self.assertEqual(repr(m.a()), original_repr)
def test_attach_mock(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in classes:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'bar')
self.assertIs(m.bar, m2)
self.assertIn("name='mock.bar'", repr(m2))
m.bar.baz(1)
self.assertEqual(m.mock_calls, [call.bar.baz(1)])
self.assertEqual(m.method_calls, [call.bar.baz(1)])
def test_attach_mock_return_value(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in Mock, MagicMock:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'return_value')
self.assertIs(m(), m2)
self.assertIn("name='mock()'", repr(m2))
m2.foo()
self.assertEqual(m.mock_calls, call().foo().call_list())
def test_attach_mock_patch_autospec(self):
parent = Mock()
with mock.patch(f'{__name__}.something', autospec=True) as mock_func:
self.assertEqual(mock_func.mock._extract_mock_name(), 'something')
parent.attach_mock(mock_func, 'child')
parent.child(1)
something(2)
mock_func(3)
parent_calls = [call.child(1), call.child(2), call.child(3)]
child_calls = [call(1), call(2), call(3)]
self.assertEqual(parent.mock_calls, parent_calls)
self.assertEqual(parent.child.mock_calls, child_calls)
self.assertEqual(something.mock_calls, child_calls)
self.assertEqual(mock_func.mock_calls, child_calls)
self.assertIn('mock.child', repr(parent.child.mock))
self.assertEqual(mock_func.mock._extract_mock_name(), 'mock.child')
def test_attach_mock_patch_autospec_signature(self):
with mock.patch(f'{__name__}.Something.meth', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_meth')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_meth(mock.ANY, 1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
mocked.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
with mock.patch(f'{__name__}.something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_func')
something(1)
manager.assert_has_calls([call.attach_func(1)])
something.assert_has_calls([call(1)])
mocked.assert_has_calls([call(1)])
with mock.patch(f'{__name__}.Something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_obj')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_obj(),
call.attach_obj().meth(1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(1, 2, 3, d=4)])
mocked.assert_has_calls([call(), call().meth(1, 2, 3, d=4)])
def test_attribute_deletion(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
self.assertTrue(hasattr(mock, 'm'))
del mock.m
self.assertFalse(hasattr(mock, 'm'))
del mock.f
self.assertFalse(hasattr(mock, 'f'))
self.assertRaises(AttributeError, getattr, mock, 'f')
def test_mock_does_not_raise_on_repeated_attribute_deletion(self):
# bpo-20239: Assigning and deleting twice an attribute raises.
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
mock.foo = 3
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 3)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
mock.foo = 4
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 4)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
def test_mock_raises_when_deleting_nonexistent_attribute(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
del mock.foo
with self.assertRaises(AttributeError):
del mock.foo
def test_reset_mock_does_not_raise_on_attr_deletion(self):
# bpo-31177: reset_mock should not raise AttributeError when attributes
# were deleted in a mock instance
mock = Mock()
mock.child = True
del mock.child
mock.reset_mock()
self.assertFalse(hasattr(mock, 'child'))
def test_class_assignable(self):
for mock in Mock(), MagicMock():
self.assertNotIsInstance(mock, int)
mock.__class__ = int
self.assertIsInstance(mock, int)
mock.foo
def test_name_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".name")
self.assertIsNotNone(call.name)
self.assertEqual(type(call.name), _Call)
self.assertEqual(type(call.name().name), _Call)
def test_parent_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".parent")
self.assertIsNotNone(call.parent)
self.assertEqual(type(call.parent), _Call)
self.assertEqual(type(call.parent().parent), _Call)
def test_parent_propagation_with_create_autospec(self):
def foo(a, b): pass
mock = Mock()
mock.child = create_autospec(foo)
mock.child(1, 2)
self.assertRaises(TypeError, mock.child, 1)
self.assertEqual(mock.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(mock.child.mock))
def test_parent_propagation_with_autospec_attach_mock(self):
def foo(a, b): pass
parent = Mock()
parent.attach_mock(create_autospec(foo, name='bar'), 'child')
parent.child(1, 2)
self.assertRaises(TypeError, parent.child, 1)
self.assertEqual(parent.child.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(parent.child.mock))
def test_isinstance_under_settrace(self):
# bpo-36593 : __class__ is not set for a class that has __class__
# property defined when it's used with sys.settrace(trace) set.
# Delete the module to force reimport with tracing function set
# restore the old reference later since there are other tests that are
# dependent on unittest.mock.patch. In testpatch.PatchTest
# test_patch_dict_test_prefix and test_patch_test_prefix not restoring
# causes the objects patched to go out of sync
old_patch = unittest.mock.patch
# Directly using __setattr__ on unittest.mock causes current imported
# reference to be updated. Use a lambda so that during cleanup the
# re-imported new reference is updated.
self.addCleanup(lambda patch: setattr(unittest.mock, 'patch', patch),
old_patch)
with patch.dict('sys.modules'):
del sys.modules['unittest.mock']
# This trace will stop coverage being measured ;-)
def trace(frame, event, arg): # pragma: no cover
return trace
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(trace)
from unittest.mock import (
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
)
mocks = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock, AsyncMock
]
for mock in mocks:
obj = mock(spec=Something)
self.assertIsInstance(obj, Something)
def test_bool_not_called_when_passing_spec_arg(self):
class Something:
def __init__(self):
self.obj_with_bool_func = unittest.mock.MagicMock()
obj = Something()
with unittest.mock.patch.object(obj, 'obj_with_bool_func', autospec=True): pass
self.assertEqual(obj.obj_with_bool_func.__bool__.call_count, 0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
h3biomed/ansible | test/units/modules/network/fortimanager/test_fmgr_ha.py | 38 | 7774 | # Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
import pytest
try:
from ansible.modules.network.fortimanager import fmgr_ha
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
def load_fixtures():
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format(
filename=os.path.splitext(os.path.basename(__file__))[0])
try:
with open(fixture_path, "r") as fixture_file:
fixture_data = json.load(fixture_file)
except IOError:
return []
return [fixture_data]
@pytest.fixture(autouse=True)
def module_mock(mocker):
connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule')
return connection_class_mock
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_ha.Connection')
return connection_class_mock
@pytest.fixture(scope="function", params=load_fixtures())
def fixture_data(request):
func_name = request.function.__name__.replace("test_", "")
return request.param.get(func_name, None)
fmg_instance = FortiManagerHandler(connection_mock, module_mock)
def test_fmgr_set_ha_mode(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Fixture sets used:###########################
##################################################
# fmgr_ha_peer_sn: None
# fmgr_ha_hb_threshold: 10
# fmgr_ha_cluster_pw: fortinet
# fmgr_ha_peer_ipv6: None
# fmgr_ha_peer_status: None
# fmgr_ha_file_quota: 2048
# fmgr_ha_cluster_id: 2
# fmgr_ha_peer_ipv4: None
# fmgr_ha_hb_interval: 15
# fmgr_ha_mode: master
# mode: set
##################################################
##################################################
# fmgr_ha_peer_sn: None
# fmgr_ha_hb_threshold: 3
# fmgr_ha_cluster_pw: fortinet
# fmgr_ha_hb_interval: 5
# fmgr_ha_cluster_id: 2
# fmgr_ha_file_quota: 4096
# fmgr_ha_peer_status: None
# fmgr_ha_peer_ipv4: None
# fmgr_ha_peer_ipv6: None
# fmgr_ha_mode: slave
# mode: set
##################################################
##################################################
# fmgr_ha_peer_sn: FMG-VMTM18001881
# fmgr_ha_hb_threshold: 3
# fmgr_ha_cluster_pw: fortinet
# fmgr_ha_peer_ipv6: None
# fmgr_ha_peer_status: enable
# fmgr_ha_file_quota: 4096
# fmgr_ha_cluster_id: 2
# fmgr_ha_peer_ipv4: 10.7.220.35
# fmgr_ha_hb_interval: 5
# fmgr_ha_mode: slave
# mode: set
##################################################
##################################################
# fmgr_ha_file_quota: 4096
# fmgr_ha_cluster_pw: None
# fmgr_ha_peer_sn: None
# fmgr_ha_hb_interval: 5
# fmgr_ha_cluster_id: 1
# fmgr_ha_mode: standalone
# fmgr_ha_peer_status: None
# fmgr_ha_hb_threshold: 3
# fmgr_ha_peer_ipv4: None
# fmgr_ha_peer_ipv6: None
# mode: set
##################################################
# Test using fixture 1 #
output = fmgr_ha.fmgr_set_ha_mode(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 2 #
output = fmgr_ha.fmgr_set_ha_mode(fmg_instance, fixture_data[1]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 3 #
output = fmgr_ha.fmgr_set_ha_mode(fmg_instance, fixture_data[2]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 4 #
output = fmgr_ha.fmgr_set_ha_mode(fmg_instance, fixture_data[3]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
def test_fmgr_get_ha_peer_list(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Fixture sets used:###########################
##################################################
# fmgr_ha_peer_sn: FMG-VMTM18001882
# fmgr_ha_hb_threshold: 3
# fmgr_ha_cluster_pw: None
# fmgr_ha_peer_ipv6: None
# fmgr_ha_peer_status: enable
# fmgr_ha_file_quota: 4096
# fmgr_ha_cluster_id: 1
# fmgr_ha_peer_ipv4: 10.7.220.36
# fmgr_ha_hb_interval: 5
# fmgr_ha_mode: None
# mode: get
##################################################
##################################################
# fmgr_ha_peer_sn: FMG-VMTM18001881
# fmgr_ha_hb_threshold: 3
# fmgr_ha_cluster_pw: fortinet
# fmgr_ha_hb_interval: 5
# fmgr_ha_cluster_id: 2
# fmgr_ha_file_quota: 4096
# fmgr_ha_peer_status: enable
# fmgr_ha_peer_ipv4: 10.7.220.35
# fmgr_ha_peer_ipv6: None
# fmgr_ha_mode: slave
# mode: get
##################################################
# Test using fixture 1 #
output = fmgr_ha.fmgr_get_ha_peer_list(fmg_instance)
assert isinstance(output['raw_response'], list) is True
# Test using fixture 2 #
output = fmgr_ha.fmgr_get_ha_peer_list(fmg_instance)
assert isinstance(output['raw_response'], list) is True
def test_fmgr_set_ha_peer(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Fixture sets used:###########################
##################################################
# fmgr_ha_peer_sn: FMG-VMTM18001882
# next_peer_id: 2
# fmgr_ha_hb_threshold: 3
# fmgr_ha_cluster_pw: None
# fmgr_ha_peer_ipv6: None
# fmgr_ha_peer_status: enable
# fmgr_ha_file_quota: 4096
# fmgr_ha_cluster_id: 1
# peer_id: 1
# fmgr_ha_peer_ipv4: 10.7.220.36
# fmgr_ha_hb_interval: 5
# fmgr_ha_mode: None
# mode: set
##################################################
##################################################
# fmgr_ha_peer_sn: FMG-VMTM18001881
# next_peer_id: 1
# fmgr_ha_hb_threshold: 3
# fmgr_ha_cluster_pw: fortinet
# fmgr_ha_hb_interval: 5
# fmgr_ha_cluster_id: 2
# fmgr_ha_file_quota: 4096
# fmgr_ha_peer_status: enable
# peer_id: 1
# fmgr_ha_peer_ipv4: 10.7.220.35
# fmgr_ha_peer_ipv6: None
# fmgr_ha_mode: slave
# mode: set
##################################################
# Test using fixture 1 #
output = fmgr_ha.fmgr_set_ha_peer(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 2 #
output = fmgr_ha.fmgr_set_ha_peer(fmg_instance, fixture_data[1]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
| gpl-3.0 |
GuilhermeGSousa/ardupilot | Tools/scripts/build_examples.py | 74 | 1075 | #!/usr/bin/env python
# useful script to test the build of all example code
# This helps when doing large merges
# Peter Barker, June 2016
# based on build_examples.sh, Andrew Tridgell, November 2012
import os
import sys
import optparse
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '../autotest/pysim'))
import util
class BuildExamples():
def __init__(self, targets=[], clean=False):
print("init")
self.targets = targets
self.clean = clean
def run(self):
for target in self.targets:
util.build_examples(target, clean=self.clean)
if __name__ == '__main__':
parser = optparse.OptionParser("build_examples.py")
parser.add_option("--target", type='string', default=['navio','px4-v2'], help='list of targets for which to build examples', action='append')
parser.add_option("--clean", action='store_true', default=False, help='clean build')
opts, args = parser.parse_args()
buildexamples = BuildExamples(targets=opts.target, clean=opts.clean)
buildexamples.run()
| gpl-3.0 |
TheTacoScott/GoAtThrottleUp | ServerRelay/cherrypy/lib/gctools.py | 40 | 7396 | import gc
import inspect
import os
import sys
import time
try:
import objgraph
except ImportError:
objgraph = None
import cherrypy
from cherrypy import _cprequest, _cpwsgi
from cherrypy.process.plugins import SimplePlugin
class ReferrerTree(object):
"""An object which gathers all referrers of an object to a given depth."""
peek_length = 40
def __init__(self, ignore=None, maxdepth=2, maxparents=10):
self.ignore = ignore or []
self.ignore.append(inspect.currentframe().f_back)
self.maxdepth = maxdepth
self.maxparents = maxparents
def ascend(self, obj, depth=1):
"""Return a nested list containing referrers of the given object."""
depth += 1
parents = []
# Gather all referrers in one step to minimize
# cascading references due to repr() logic.
refs = gc.get_referrers(obj)
self.ignore.append(refs)
if len(refs) > self.maxparents:
return [("[%s referrers]" % len(refs), [])]
try:
ascendcode = self.ascend.__code__
except AttributeError:
ascendcode = self.ascend.im_func.func_code
for parent in refs:
if inspect.isframe(parent) and parent.f_code is ascendcode:
continue
if parent in self.ignore:
continue
if depth <= self.maxdepth:
parents.append((parent, self.ascend(parent, depth)))
else:
parents.append((parent, []))
return parents
def peek(self, s):
"""Return s, restricted to a sane length."""
if len(s) > (self.peek_length + 3):
half = self.peek_length // 2
return s[:half] + '...' + s[-half:]
else:
return s
def _format(self, obj, descend=True):
"""Return a string representation of a single object."""
if inspect.isframe(obj):
filename, lineno, func, context, index = inspect.getframeinfo(obj)
return "<frame of function '%s'>" % func
if not descend:
return self.peek(repr(obj))
if isinstance(obj, dict):
return "{" + ", ".join(["%s: %s" % (self._format(k, descend=False),
self._format(v, descend=False))
for k, v in obj.items()]) + "}"
elif isinstance(obj, list):
return "[" + ", ".join([self._format(item, descend=False)
for item in obj]) + "]"
elif isinstance(obj, tuple):
return "(" + ", ".join([self._format(item, descend=False)
for item in obj]) + ")"
r = self.peek(repr(obj))
if isinstance(obj, (str, int, float)):
return r
return "%s: %s" % (type(obj), r)
def format(self, tree):
"""Return a list of string reprs from a nested list of referrers."""
output = []
def ascend(branch, depth=1):
for parent, grandparents in branch:
output.append((" " * depth) + self._format(parent))
if grandparents:
ascend(grandparents, depth + 1)
ascend(tree)
return output
def get_instances(cls):
return [x for x in gc.get_objects() if isinstance(x, cls)]
class RequestCounter(SimplePlugin):
def start(self):
self.count = 0
def before_request(self):
self.count += 1
def after_request(self):
self.count -=1
request_counter = RequestCounter(cherrypy.engine)
request_counter.subscribe()
def get_context(obj):
if isinstance(obj, _cprequest.Request):
return "path=%s;stage=%s" % (obj.path_info, obj.stage)
elif isinstance(obj, _cprequest.Response):
return "status=%s" % obj.status
elif isinstance(obj, _cpwsgi.AppResponse):
return "PATH_INFO=%s" % obj.environ.get('PATH_INFO', '')
elif hasattr(obj, "tb_lineno"):
return "tb_lineno=%s" % obj.tb_lineno
return ""
class GCRoot(object):
"""A CherryPy page handler for testing reference leaks."""
classes = [(_cprequest.Request, 2, 2,
"Should be 1 in this request thread and 1 in the main thread."),
(_cprequest.Response, 2, 2,
"Should be 1 in this request thread and 1 in the main thread."),
(_cpwsgi.AppResponse, 1, 1,
"Should be 1 in this request thread only."),
]
def index(self):
return "Hello, world!"
index.exposed = True
def stats(self):
output = ["Statistics:"]
for trial in range(10):
if request_counter.count > 0:
break
time.sleep(0.5)
else:
output.append("\nNot all requests closed properly.")
# gc_collect isn't perfectly synchronous, because it may
# break reference cycles that then take time to fully
# finalize. Call it thrice and hope for the best.
gc.collect()
gc.collect()
unreachable = gc.collect()
if unreachable:
if objgraph is not None:
final = objgraph.by_type('Nondestructible')
if final:
objgraph.show_backrefs(final, filename='finalizers.png')
trash = {}
for x in gc.garbage:
trash[type(x)] = trash.get(type(x), 0) + 1
if trash:
output.insert(0, "\n%s unreachable objects:" % unreachable)
trash = [(v, k) for k, v in trash.items()]
trash.sort()
for pair in trash:
output.append(" " + repr(pair))
# Check declared classes to verify uncollected instances.
# These don't have to be part of a cycle; they can be
# any objects that have unanticipated referrers that keep
# them from being collected.
allobjs = {}
for cls, minobj, maxobj, msg in self.classes:
allobjs[cls] = get_instances(cls)
for cls, minobj, maxobj, msg in self.classes:
objs = allobjs[cls]
lenobj = len(objs)
if lenobj < minobj or lenobj > maxobj:
if minobj == maxobj:
output.append(
"\nExpected %s %r references, got %s." %
(minobj, cls, lenobj))
else:
output.append(
"\nExpected %s to %s %r references, got %s." %
(minobj, maxobj, cls, lenobj))
for obj in objs:
if objgraph is not None:
ig = [id(objs), id(inspect.currentframe())]
fname = "graph_%s_%s.png" % (cls.__name__, id(obj))
objgraph.show_backrefs(
obj, extra_ignore=ig, max_depth=4, too_many=20,
filename=fname, extra_info=get_context)
output.append("\nReferrers for %s (refcount=%s):" %
(repr(obj), sys.getrefcount(obj)))
t = ReferrerTree(ignore=[objs], maxdepth=3)
tree = t.ascend(obj)
output.extend(t.format(tree))
return "\n".join(output)
stats.exposed = True
| mit |
moijes12/oh-mainline | vendor/packages/kombu/kombu/transport/django/__init__.py | 20 | 2175 | """Kombu transport using the Django database as a message store."""
from __future__ import absolute_import
from anyjson import loads, dumps
from django.conf import settings
from django.core import exceptions as errors
from kombu.five import Empty
from kombu.transport import virtual
from kombu.utils.encoding import bytes_to_str
from .models import Queue
try:
from django.apps import AppConfig
except ImportError: # pragma: no cover
pass
else:
class KombuAppConfig(AppConfig):
name = 'kombu.transport.django'
label = name.replace('.', '_')
verbose_name = 'Message queue'
default_app_config = 'kombu.transport.django.KombuAppConfig'
VERSION = (1, 0, 0)
__version__ = '.'.join(map(str, VERSION))
POLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',
getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))
class Channel(virtual.Channel):
def _new_queue(self, queue, **kwargs):
Queue.objects.get_or_create(name=queue)
def _put(self, queue, message, **kwargs):
Queue.objects.publish(queue, dumps(message))
def basic_consume(self, queue, *args, **kwargs):
qinfo = self.state.bindings[queue]
exchange = qinfo[0]
if self.typeof(exchange).type == 'fanout':
return
super(Channel, self).basic_consume(queue, *args, **kwargs)
def _get(self, queue):
m = Queue.objects.fetch(queue)
if m:
return loads(bytes_to_str(m))
raise Empty()
def _size(self, queue):
return Queue.objects.size(queue)
def _purge(self, queue):
return Queue.objects.purge(queue)
def refresh_connection(self):
from django import db
db.close_connection()
class Transport(virtual.Transport):
Channel = Channel
default_port = 0
polling_interval = POLLING_INTERVAL
channel_errors = (
virtual.Transport.channel_errors + (
errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)
)
driver_type = 'sql'
driver_name = 'django'
def driver_version(self):
import django
return '.'.join(map(str, django.VERSION))
| agpl-3.0 |
fangxingli/hue | apps/oozie/src/oozie/migrations/0023_auto__add_field_node_data__add_field_job_data.py | 37 | 26876 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Node.data'
db.add_column('oozie_node', 'data',
self.gf('django.db.models.fields.TextField')(default='{}', blank=True),
keep_default=False)
# Adding field 'Job.data'
db.add_column('oozie_job', 'data',
self.gf('django.db.models.fields.TextField')(default='{}', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Node.data'
db.delete_column('oozie_node', 'data')
# Deleting field 'Job.data'
db.delete_column('oozie_job', 'data')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'desktop.document': {
'Meta': {'object_name': 'Document'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'extra': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'default': "''"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'doc_owner'", 'to': "orm['auth.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['desktop.DocumentTag']", 'db_index': 'True', 'symmetrical': 'False'}),
'version': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
'desktop.documenttag': {
'Meta': {'object_name': 'DocumentTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'oozie.bundle': {
'Meta': {'object_name': 'Bundle', '_ormbases': ['oozie.Job']},
'coordinators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['oozie.Coordinator']", 'through': "orm['oozie.BundledCoordinator']", 'symmetrical': 'False'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'kick_off_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 17, 0, 0)'})
},
'oozie.bundledcoordinator': {
'Meta': {'object_name': 'BundledCoordinator'},
'bundle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Bundle']"}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 20, 0, 0)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 17, 0, 0)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 17, 0, 0)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie'] | apache-2.0 |
jbassen/edx-platform | lms/djangoapps/ccx/tests/test_models.py | 45 | 8622 | """
tests for the models
"""
from datetime import datetime, timedelta
from django.utils.timezone import UTC
from mock import patch
from nose.plugins.attrib import attr
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from student.tests.factories import ( # pylint: disable=import-error
AdminFactory,
)
from util.tests.test_date_utils import fake_ugettext
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import (
CourseFactory,
check_mongo_calls
)
from .factories import (
CcxFactory,
)
from ..overrides import override_field_for_ccx
@attr('shard_1')
class TestCCX(ModuleStoreTestCase):
"""Unit tests for the CustomCourseForEdX model
"""
def setUp(self):
"""common setup for all tests"""
super(TestCCX, self).setUp()
self.course = course = CourseFactory.create()
coach = AdminFactory.create()
role = CourseCcxCoachRole(course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=course.id, coach=coach)
def set_ccx_override(self, field, value):
"""Create a field override for the test CCX on <field> with <value>"""
override_field_for_ccx(self.ccx, self.course, field, value)
def test_ccx_course_is_correct_course(self):
"""verify that the course property of a ccx returns the right course"""
expected = self.course
actual = self.ccx.course
self.assertEqual(expected, actual)
def test_ccx_course_caching(self):
"""verify that caching the propery works to limit queries"""
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.course # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.course # pylint: disable=pointless-statement
def test_ccx_start_is_correct(self):
"""verify that the start datetime for a ccx is correctly retrieved
Note that after setting the start field override microseconds are
truncated, so we can't do a direct comparison between before and after.
For this reason we test the difference between and make sure it is less
than one second.
"""
expected = datetime.now(UTC())
self.set_ccx_override('start', expected)
actual = self.ccx.start # pylint: disable=no-member
diff = expected - actual
self.assertTrue(abs(diff.total_seconds()) < 1)
def test_ccx_start_caching(self):
"""verify that caching the start property works to limit queries"""
now = datetime.now(UTC())
self.set_ccx_override('start', now)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.start # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.start # pylint: disable=pointless-statement, no-member
def test_ccx_due_without_override(self):
"""verify that due returns None when the field has not been set"""
actual = self.ccx.due # pylint: disable=no-member
self.assertIsNone(actual)
def test_ccx_due_is_correct(self):
"""verify that the due datetime for a ccx is correctly retrieved"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
actual = self.ccx.due # pylint: disable=no-member
diff = expected - actual
self.assertTrue(abs(diff.total_seconds()) < 1)
def test_ccx_due_caching(self):
"""verify that caching the due property works to limit queries"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.due # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.due # pylint: disable=pointless-statement, no-member
def test_ccx_has_started(self):
"""verify that a ccx marked as starting yesterday has started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('start', then)
self.assertTrue(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_not_started(self):
"""verify that a ccx marked as starting tomorrow has not started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('start', then)
self.assertFalse(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_ended(self):
"""verify that a ccx that has a due date in the past has ended"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('due', then)
self.assertTrue(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_has_not_ended(self):
"""verify that a ccx that has a due date in the future has not eneded
"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('due', then)
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_without_due_date_has_not_ended(self):
"""verify that a ccx without a due date has not ended"""
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
# ensure that the expected localized format will be found by the i18n
# service
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_start_datetime_short_date(self):
"""verify that the start date for a ccx formats properly by default"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_start_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_end_datetime_short_date(self):
"""verify that the end date for a ccx formats properly by default"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_no_due_date(self):
"""verify that without a due date, the end date is an empty string"""
expected = ''
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
| agpl-3.0 |
poulpito/Flexget | flexget/utils/log.py | 9 | 2557 | """Logging utilities"""
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import hashlib
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime, Index
from flexget.utils.database import with_session
from flexget import db_schema
from flexget import logger as f_logger
from flexget.utils.sqlalchemy_utils import table_schema
from flexget.event import event
log = logging.getLogger('util.log')
Base = db_schema.versioned_base('log_once', 0)
@db_schema.upgrade('log_once')
def upgrade(ver, session):
if ver is None:
log.info('Adding index to md5sum column of log_once table.')
table = table_schema('log_once', session)
Index('log_once_md5sum', table.c.md5sum, unique=True).create()
ver = 0
return ver
class LogMessage(Base):
"""Declarative"""
__tablename__ = 'log_once'
id = Column(Integer, primary_key=True)
md5sum = Column(String, unique=True)
added = Column(DateTime, default=datetime.now())
def __init__(self, md5sum):
self.md5sum = md5sum
def __repr__(self):
return "<LogMessage('%s')>" % self.md5sum
@event('manager.db_cleanup')
def purge(manager, session):
"""Purge old messages from database"""
old = datetime.now() - timedelta(days=365)
result = session.query(LogMessage).filter(LogMessage.added < old).delete()
if result:
log.verbose('Purged %s entries from log_once table.' % result)
@with_session
def log_once(message, logger=logging.getLogger('log_once'), once_level=logging.INFO, suppressed_level=f_logger.VERBOSE,
session=None):
"""
Log message only once using given logger`. Returns False if suppressed logging.
When suppressed, `suppressed_level` level is still logged.
"""
# If there is no active manager, don't access the db
from flexget.manager import manager
if not manager:
log.warning('DB not initialized. log_once will not work properly.')
logger.log(once_level, message)
return
digest = hashlib.md5()
digest.update(message.encode('latin1', 'replace')) # ticket:250
md5sum = digest.hexdigest()
# abort if this has already been logged
if session.query(LogMessage).filter_by(md5sum=md5sum).first():
logger.log(suppressed_level, message)
return False
row = LogMessage(md5sum)
session.add(row)
logger.log(once_level, message)
return True
| mit |
iglpdc/nipype | nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py | 12 | 1372 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..gtract import gtractResampleCodeImage
def test_gtractResampleCodeImage_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputCodeVolume=dict(argstr='--inputCodeVolume %s',
),
inputReferenceVolume=dict(argstr='--inputReferenceVolume %s',
),
inputTransform=dict(argstr='--inputTransform %s',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
terminal_output=dict(nohash=True,
),
transformType=dict(argstr='--transformType %s',
),
)
inputs = gtractResampleCodeImage.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_gtractResampleCodeImage_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = gtractResampleCodeImage.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
rosswhitfield/mantid | scripts/BilbyCustomFunctions_Reduction.py | 3 | 17128 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import csv
import math
from itertools import product
import sys
from mantid.simpleapi import MoveInstrumentComponent, CropWorkspace
# values for att_pos 2 and 4 shall not make sense; those attenuators have not been in use that time
attenuation_correction_pre_2016 = {1.0: 0.007655, 2.0: -1.0, 3.0: 1.0, 4.0: -1.0, 5.0: 0.005886}
attenuation_correction_post_2016 = {1.0: 1.0, 2.0: 0.00955, 3.0: 0.005886, 4.0: 0.00290, 5.0: 0.00062}
##############################################################################
def string_boolean(line):
""" Convert string to boolean; needed to read "true" and "false" from the csv Data reduction settings table """
if line == 'false':
bool_string = False
elif line == 'true':
bool_string = True
else:
print("Check value of {}".format(line))
print("It must be either True or False")
sys.exit()
return bool_string
##############################################################################
def read_convert_to_float(array_strings):
""" Needed to convert binning parameters from the csv file into the float numbers """
array = [x.strip() for x in array_strings.split(',')]
array = [float(x) for x in array]
if (len(array) != 3):
print("Check input parameters; binning parameters shall be given in a format left_value, step, right_value.")
sys.exit()
return array
##############################################################################
def files_list_reduce(filename):
""" Creat array of input reduction settings """
parameters = []
with open(filename) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if row['index'] == '':
continue
if row['index'] == 'END':
break
parameters.append(row)
return parameters
##############################################################################
def evaluate_files_list(numbers):
""" Needed for FilesToReduce, see below """
expanded = []
for number in numbers.split(","):
if "-" in number:
start, end = number.split("-")
nrs = range(int(start), int(end) + 1)
expanded.extend(nrs)
else:
expanded.append(int(number))
return expanded
##############################################################################
def files_to_reduce(parameters, evaluate_files):
""" Create list of the files to reduce """
files_to_reduce = []
if len(evaluate_files) == 0:
files_to_reduce.extend(parameters)
else:
evaluate_files_l = evaluate_files_list(evaluate_files) # call function for retrieve the IDs list
for parameter in parameters:
if int(parameter['index']) in evaluate_files_l:
files_to_reduce.append(parameter)
return files_to_reduce
##############################################################################
def strip_NaNs(output_workspace, base_output_name):
""" Strip NaNs from the 1D OutputWorkspace """ # add isinf
data = output_workspace.readY(0)
start_index = next((index for index in range(len(data)) if not math.isnan(data[index])), None)
end_index = next((index for index in range(len(data)-1, -1, -1) if not math.isnan(data[index])), None)
q_values = output_workspace.readX(0)
start_q = q_values[start_index]
end_q = q_values[end_index]
CropWorkspace(InputWorkspace=output_workspace, XMin=start_q, XMax=end_q, OutputWorkspace=base_output_name)
return base_output_name
##############################################################################
def output_header(external_mode, used_wl_range, ws_sample, sample_thickness,
sample_transmission, empty_beam_transmission, blocked_beam, sample_mask, transmission_mask):
""" Creates header to be recorded into the output file """
header = []
wl_row = 'Velocity selector set wavelength: ' + str(format(float(ws_sample.run().getProperty("wavelength").value), '.3f')) + ' Angstrom'
header.append(wl_row)
if (external_mode):
choppers = 'Double choppers pair: ' + str(int(ws_sample.run().getProperty("master1_chopper_id").value)) + ' and ' \
+ str(int(ws_sample.run().getProperty("master2_chopper_id").value))
header.append(choppers)
frequency = 'Data defining pulse frequency (equal or slower than the Double pair frequency): ' \
+ str(format(1e6/float(ws_sample.run().getProperty("period").value), '.2f')) + ' Hz'
header.append(frequency)
wavelength_range = 'Wavelength range used for the data reduction: ' + str(format(float(used_wl_range[0]), '.2f')) + ' to ' \
+ str(format(float(used_wl_range[2]), '.2f')) + ' Angstrom'
header.append(wavelength_range)
resolution_value = float(used_wl_range[1])
if resolution_value < 0:
resolution = 'Resolution used for calculation of dQ: ' + str(format((-100 * resolution_value), '.2f')) + '%'
else:
resolution = 'Resolution taken as wavelength binning;' + '\n' + 'the value is set to ' + \
str(format(resolution_value, '.2f')) + '%' # on linear scale, hence the dQ calculation is meaningless'
header.append(resolution)
else:
resolution = "Nominal resolution: 10%"
header.append(resolution)
l1 = 'L1: ' + str(format(float(ws_sample.run().getProperty("L1").value), '.3f')) + ' m'
header.append(l1)
rear_l2_row = 'L2 to rear detector: ' + str(format(float(ws_sample.run().getProperty("L2_det_value").value), '.3f')) + ' m'
header.append(rear_l2_row)
curtain_ud_l2_row = 'L2 to horizontal curtains: ' \
+ str(format(float(ws_sample.run().getProperty("L2_curtainu_value").value), '.3f')) + ' m'
header.append(curtain_ud_l2_row)
curtain_lr_l2_row = 'L2 to vertical curtains: ' \
+ str(format(float(ws_sample.run().getProperty("L2_curtainr_value").value), '.3f')) + ' m'
header.append(curtain_lr_l2_row)
curtain_l_separation_row = 'Left curtain separation: ' \
+ str(format(float(ws_sample.run().getProperty("D_curtainl_value").value), '.3f')) + ' m'
header.append(curtain_l_separation_row)
curtain_r_separation_row = 'Right curtain separation: ' \
+ str(format(float(ws_sample.run().getProperty("D_curtainr_value").value), '.3f')) + ' m'
header.append(curtain_r_separation_row)
curtain_u_separation_row = 'Top curtain separation: ' \
+ str(format(float(ws_sample.run().getProperty("D_curtainu_value").value), '.3f')) + ' m'
header.append(curtain_u_separation_row)
curtain_d_separation_row = 'Bottom curtain separation: ' \
+ str(format(float(ws_sample.run().getProperty("D_curtaind_value").value), '.3f')) + ' m'
header.append(curtain_d_separation_row)
apertures = 'Source and sample apertures diameters: ' \
+ str(format(float(ws_sample.run().getProperty("source_aperture").value), '.1f')) + ' mm and ' \
+ str(format(float(ws_sample.run().getProperty("sample_aperture").value), '.1f')) + ' mm'
header.append(apertures)
sample_related_details = 'Sample thickness and transmission: ' \
+ format(float(sample_thickness), '.2f') + ' cm and ' + sample_transmission
header.append(sample_related_details)
corrections_related_details = 'Empty beam transmission and blocked beam scattering: ' \
+ empty_beam_transmission + ' and ' + blocked_beam
header.append(corrections_related_details)
masks = 'Sample and trasmission masks: ' + sample_mask + ' and ' + transmission_mask + '\n'
header.append(masks)
return header
##############################################################################
def get_pixel_size(): # reads current IDF and get pixelsize from there
""" To get pixel size for Bilby detectors from the Bilby_Definition.xml file """
from mantid.api import ExperimentInfo
import xml.etree.cElementTree as ET
currentIDF = ExperimentInfo.getInstrumentFilename("Bilby")
# print currentIDF
tree = ET.parse(currentIDF)
for node in tree.iter():
if node.tag == "{http://www.mantidproject.org/IDF/1.0}height":
name = node.attrib.get('val')
break
pixelsize = float(name)
return pixelsize
##############################################################################
def read_csv(filename):
""" Read cvs... """
parameters = []
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
parameters.append(row)
return parameters
##############################################################################
def attenuation_correction(att_pos, data_before_May_2016):
""" Bilby has four attenuators; before May 2016 there were only two.
Value of the attenuators are hard coded here and being used for the I(Q) scaling in Q1D """
if (data_before_May_2016):
print("You stated data have been collected before May, 2016, i.e. using old attenuators. Please double check.")
if (att_pos == 2.0 or att_pos == 4.0):
print(
"Wrong attenuators value; Either data have been collected after May, 2016, or something is wrong with hdf file")
sys.exit()
scale = attenuation_correction_pre_2016[att_pos]
else:
scale = attenuation_correction_post_2016[att_pos]
return scale
##############################################################################
def wavelengh_slices(wavelength_intervals, binning_wavelength_ini, wav_delta):
""" This function defined number of wavelenth slices and creates array of the binning parameters for each slice """
binning_wavelength = []
if not wavelength_intervals:
binning_wavelength.append(binning_wavelength_ini)
n = 1 # in this case, number of wavelength range intervals always will be 1
else: # reducing data on a several intervals of wavelengths
wav1 = float(binning_wavelength_ini[0])
wv_ini_step = float(binning_wavelength_ini[1])
wav2 = float(binning_wavelength_ini[2])
# check if chosen wavelenth interval is feasible
if (wav1 + wav_delta) > wav2:
raise ValueError("wav_delta is too large for the upper range of wavelength")
if math.fmod((wav2 - wav1), wav_delta) == 0.0: # if reminder is 0
n = (wav2 - wav1)/wav_delta
else: # if reminder is greater than 0, to trancate the maximum wavelength in the range
n = math.floor((wav2 - wav1)/wav_delta)
max_wave_length = wav1 + n*wav_delta
print ('\n WARNING: because of your set-up, maximum wavelength to consider \
for partial reduction is only %4.2f \n' % max_wave_length)
# number of wavelength range intervals
n = int(n)
binning_wavelength_interm = []
binning_wavelength_interm_1 = wv_ini_step # binning step is always the same
for i in range(n):
binning_wavelength_interm_0 = wav1 + wav_delta * i # left range
binning_wavelength_interm_2 = binning_wavelength_interm_0 + wav_delta # right range
binning_wavelength_interm = [binning_wavelength_interm_0, binning_wavelength_interm_1, binning_wavelength_interm_2]
binning_wavelength.append(binning_wavelength_interm)
binning_wavelength.append(binning_wavelength_ini) # reduce data on the full range
n = n + 1 # to include full range
return binning_wavelength, n
##############################################################################
def correction_tubes_shift(ws_to_correct, path_to_shifts_file):
""" This function moves each tube and then rear panels as a whole as per numbers recorded in the path_to_shifts_file csv file.
The values in the file are obtained from fitting of a few data sets collected using different masks.
It is a very good idea do not change the file. """
shifts = []
shifts = read_csv(path_to_shifts_file)
# shall be precisely sevel lines; shifts for rear left, rear right, left, right, top, bottom curtains
# [calculated from 296_Cd_lines_setup1 file] + value for symmetrical shift for entire rear panels
pixelsize = get_pixel_size()
correct_element_one_stripe("BackDetectorLeft", pixelsize, shifts[0], ws_to_correct)
correct_element_one_stripe("BackDetectorRight", pixelsize, shifts[1], ws_to_correct)
correct_element_one_stripe("CurtainLeft", pixelsize, shifts[2], ws_to_correct)
correct_element_one_stripe("CurtainRight", pixelsize, shifts[3], ws_to_correct)
correct_element_one_stripe("CurtainTop", pixelsize, shifts[4], ws_to_correct)
correct_element_one_stripe("CurtainBottom", pixelsize, shifts[5], ws_to_correct)
move_rear_panels(shifts[6][0], pixelsize, ws_to_correct)
correction_based_on_experiment(ws_to_correct)
return
##############################################################################
def correct_element_one_stripe(panel, pixelsize, shift, ws):
""" Technical for CorrectionTubesShift.
Sutable for one Cd stripe correction and for the stripes on BorAl mask on left curtain """
eightpack = ['eight_pack1', 'eight_pack2', 'eight_pack3', 'eight_pack4', 'eight_pack5']
tube = ['tube1', 'tube2', 'tube3', 'tube4', 'tube5', 'tube6', 'tube7', 'tube8']
i = 0
for ei_pack, t_tube in product(eightpack, tube):
if (panel == "BackDetectorLeft" or panel == "CurtainLeft"):
direction = 1.0
MoveInstrumentComponent(ws, panel + '/' + ei_pack + '/' + t_tube, X=0, Y=-float(shift[i])*pixelsize*direction, Z=0)
if (panel == "BackDetectorRight" or panel == "CurtainRight"):
direction = -1.0
MoveInstrumentComponent(ws, panel + '/' + ei_pack + '/' + t_tube, X=0, Y=-float(shift[i])*pixelsize*direction, Z=0)
if (panel == "CurtainBottom"):
direction = 1.0
MoveInstrumentComponent(ws, panel + '/' + ei_pack + '/' + t_tube, X=-float(shift[i])*pixelsize*direction, Y=0, Z=0)
if (panel == "CurtainTop"):
direction = -1.0
MoveInstrumentComponent(ws, panel + '/' + ei_pack + '/' + t_tube, X=-float(shift[i])*pixelsize*direction, Y=0, Z=0)
i = i + 1
return ws
##############################################################################
def move_rear_panels(shift, pixelsize, ws):
""" Technical for CorrectionTubesShift """
panel = "BackDetectorLeft"
direction = 1.0
MoveInstrumentComponent(ws, panel, X=0, Y=-float(shift)*pixelsize*direction, Z=0)
panel = "BackDetectorRight"
direction = -1.0
MoveInstrumentComponent(ws, panel, X=0, Y=-float(shift)*pixelsize*direction, Z=0)
return ws
##############################################################################
def correction_based_on_experiment(ws_to_correct):
""" The function to move curtains, based on fits/analysis of a massive set of AgBeh and liquid crystals data
collected on 6 Oct 2016. Previous Laser tracker data has not picked up these imperfections."""
MoveInstrumentComponent(ws_to_correct, 'CurtainLeft', X=-5.3/1000, Y=0, Z=13.0/1000)
MoveInstrumentComponent(ws_to_correct, 'CurtainRight', X=5.5/1000, Y=0, Z=17.0/1000)
MoveInstrumentComponent(ws_to_correct, 'CurtainTop', X=0, Y=-4.0/1000, Z=0)
MoveInstrumentComponent(ws_to_correct, 'CurtainBottom', X=0, Y=6.0/1000, Z=0)
MoveInstrumentComponent(ws_to_correct, 'BackDetectorRight', X=0, Y=-2.0/1000, Z=0)
MoveInstrumentComponent(ws_to_correct, 'BackDetectorLeft', X=0, Y=-2.0/1000, Z=0)
return
##############################################################################
""" Final detectors' alignement has been done using laser tracker in January, 2016.
To correct data collected before that, some extra shift hardcoded here, shall be applied """
def det_shift_before_2016 (ws_to_correct):
shift_curtainl = 0.74/1000
shift_curtainr = 6.92/1000
shift_curtainu = -7.50/1000
shift_curtaind = -1.59/1000
MoveInstrumentComponent(ws_to_correct, 'CurtainLeft', X = shift_curtainl, Y = 0 , Z = 0)
MoveInstrumentComponent(ws_to_correct, 'CurtainRight', X = shift_curtainr, Y = 0 , Z = 0)
MoveInstrumentComponent(ws_to_correct, 'CurtainTop', X = 0, Y=shift_curtainu , Z = 0)
MoveInstrumentComponent(ws_to_correct, 'CurtainBottom', X = 0, Y=shift_curtaind , Z = 0)
correction_based_on_experiment(ws_to_correct)
return ws_to_correct
| gpl-3.0 |
ccnmtl/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/model_formsets_regress/tests.py | 51 | 15361 | from django import forms
from django.forms.formsets import BaseFormSet, DELETION_FIELD_NAME
from django.forms.util import ErrorDict, ErrorList
from django.forms.models import modelform_factory, inlineformset_factory, modelformset_factory, BaseModelFormSet
from django.test import TestCase
from models import User, UserSite, Restaurant, Manager, Network, Host
class InlineFormsetTests(TestCase):
def test_formset_over_to_field(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #10243"
Form = modelform_factory(User)
FormSet = inlineformset_factory(User, UserSite)
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
'serial': u'1',
'username': u'apollo13',
'usersite_set-TOTAL_FORMS': u'1',
'usersite_set-INITIAL_FORMS': u'0',
'usersite_set-MAX_NUM_FORMS': u'0',
'usersite_set-0-data': u'10',
'usersite_set-0-user': u'apollo13'
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 10)
self.assertEqual(usersite[0]['user_id'], u'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the UserSite instance
data = {
'usersite_set-TOTAL_FORMS': u'1',
'usersite_set-INITIAL_FORMS': u'1',
'usersite_set-MAX_NUM_FORMS': u'0',
'usersite_set-0-id': unicode(usersite[0]['id']),
'usersite_set-0-data': u'11',
'usersite_set-0-user': u'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], u'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new UserSite instance
data = {
'usersite_set-TOTAL_FORMS': u'2',
'usersite_set-INITIAL_FORMS': u'1',
'usersite_set-MAX_NUM_FORMS': u'0',
'usersite_set-0-id': unicode(usersite[0]['id']),
'usersite_set-0-data': u'11',
'usersite_set-0-user': u'apollo13',
'usersite_set-1-data': u'42',
'usersite_set-1-user': u'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values().order_by('data')
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], u'apollo13')
self.assertEqual(usersite[1]['data'], 42)
self.assertEqual(usersite[1]['user_id'], u'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_over_inherited_model(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #11120"
Form = modelform_factory(Restaurant)
FormSet = inlineformset_factory(Restaurant, Manager)
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
'name': u"Guido's House of Pasta",
'manager_set-TOTAL_FORMS': u'1',
'manager_set-INITIAL_FORMS': u'0',
'manager_set-MAX_NUM_FORMS': u'0',
'manager_set-0-name': u'Guido Van Rossum'
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Guido Van Rossum')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the Manager instance
data = {
'manager_set-TOTAL_FORMS': u'1',
'manager_set-INITIAL_FORMS': u'1',
'manager_set-MAX_NUM_FORMS': u'0',
'manager_set-0-id': unicode(manager[0]['id']),
'manager_set-0-name': u'Terry Gilliam'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new Manager instance
data = {
'manager_set-TOTAL_FORMS': u'2',
'manager_set-INITIAL_FORMS': u'1',
'manager_set-MAX_NUM_FORMS': u'0',
'manager_set-0-id': unicode(manager[0]['id']),
'manager_set-0-name': u'Terry Gilliam',
'manager_set-1-name': u'John Cleese'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values().order_by('name')
self.assertEqual(manager[0]['name'], 'John Cleese')
self.assertEqual(manager[1]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User)
FormSet = inlineformset_factory(User, UserSite)
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
form = Form(instance=None)
formset = FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"No fields passed to modelformset_factory should result in no fields on returned forms except for the id. See #14119."
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertTrue('id' in form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host)
# Add a new host, modify previous host, and save-as-new
data = {
'host_set-TOTAL_FORMS': u'2',
'host_set-INITIAL_FORMS': u'1',
'host_set-MAX_NUM_FORMS': u'0',
'host_set-0-id': unicode(host1.id),
'host_set-0-hostname': u'tranquility.hub.dal.net',
'host_set-1-hostname': u'matrix.de.eu.dal.net'
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerysetEqual(
dalnet.host_set.order_by("hostname"),
["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"]
)
class FormsetTests(TestCase):
def test_error_class(self):
'''
Test the type of Formset and Form error attributes
'''
Formset = modelformset_factory(User)
data = {
'form-TOTAL_FORMS': u'2',
'form-INITIAL_FORMS': u'0',
'form-MAX_NUM_FORMS': u'0',
'form-0-id': '',
'form-0-username': u'apollo13',
'form-0-serial': u'1',
'form-1-id': '',
'form-1-username': u'apollo13',
'form-1-serial': u'2',
}
formset = Formset(data)
# check if the returned error classes are correct
# note: formset.errors returns a list as documented
self.assertTrue(isinstance(formset.errors, list))
self.assertTrue(isinstance(formset.non_form_errors(), ErrorList))
for form in formset.forms:
self.assertTrue(isinstance(form.errors, ErrorDict))
self.assertTrue(isinstance(form.non_field_errors(), ErrorList))
class CustomWidget(forms.CharField):
pass
class UserSiteForm(forms.ModelForm):
class Meta:
model = UserSite
widgets = {'data': CustomWidget}
class Callback(object):
def __init__(self):
self.log = []
def __call__(self, db_field, **kwargs):
self.log.append((db_field, kwargs))
return db_field.formfield(**kwargs)
class FormfieldCallbackTests(TestCase):
"""
Regression for #13095: Using base forms with widgets
defined in Meta should not raise errors.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(User, UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertTrue(isinstance(form['data'].field.widget, CustomWidget))
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertTrue(isinstance(form['data'].field.widget, CustomWidget))
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {}),
(user_field, {}),
(data_field, {'widget': CustomWidget}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(User, UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
class BaseCustomDeleteFormSet(BaseFormSet):
"""
A formset mix-in that lets a form decide if it's to be deleted.
Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed.
form.should_delete() is called. The formset delete field is also suppressed.
"""
def add_fields(self, form, index):
super(BaseCustomDeleteFormSet, self).add_fields(form, index)
self.can_delete = True
if DELETION_FIELD_NAME in form.fields:
del form.fields[DELETION_FIELD_NAME]
def _should_delete_form(self, form):
return hasattr(form, 'should_delete') and form.should_delete()
class FormfieldShouldDeleteFormTests(TestCase):
"""
Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form
"""
class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet):
""" Model FormSet with CustomDelete MixIn """
class CustomDeleteUserForm(forms.ModelForm):
""" A model form with a 'should_delete' method """
class Meta:
model = User
def should_delete(self):
""" delete form if odd PK """
return self.instance.id % 2 != 0
NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True)
DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet)
data = {
'form-TOTAL_FORMS': '4',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '4',
'form-0-username': 'John',
'form-0-serial': '1',
'form-1-username': 'Paul',
'form-1-serial': '2',
'form-2-username': 'George',
'form-2-serial': '3',
'form-3-username': 'Ringo',
'form-3-serial': '5',
}
delete_all_ids = {
'form-0-DELETE': '1',
'form-1-DELETE': '1',
'form-2-DELETE': '1',
'form-3-DELETE': '1',
}
def test_init_database(self):
""" Add test data to database via formset """
formset = self.NormalFormset(self.data)
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 4)
def test_no_delete(self):
""" Verify base formset doesn't modify database """
# reload database
self.test_init_database()
# pass standard data dict & see none updated
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.id)
for i,user in enumerate(User.objects.all())
))
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 4)
def test_all_delete(self):
""" Verify base formset honors DELETE field """
# reload database
self.test_init_database()
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.id)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 0)
def test_custom_delete(self):
""" Verify DeleteFormset ignores DELETE field and uses form method """
# reload database
self.test_init_database()
# Create formset with custom Delete function
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update(dict(
('form-%d-id' % i, user.id)
for i,user in enumerate(User.objects.all())
))
data.update(self.delete_all_ids)
formset = self.DeleteFormset(data, queryset=User.objects.all())
# verify two were deleted
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 2)
# verify no "odd" PKs left
odd_ids = [user.id for user in User.objects.all() if user.id % 2]
self.assertEqual(len(odd_ids), 0)
| gpl-3.0 |
BehavioralInsightsTeam/edx-platform | lms/djangoapps/courseware/features/problems.py | 23 | 6532 | '''
Steps for problem.feature lettuce tests
'''
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import step, world
from common import i_am_registered_for_the_course, visit_scenario_item
from problems_setup import PROBLEM_DICT, add_problem_to_course, answer_problem, problem_has_answer
def _view_problem(step, problem_type, problem_settings=None):
i_am_registered_for_the_course(step, 'model_course')
# Ensure that the course has this problem type
add_problem_to_course(world.scenario_dict['COURSE'].number, problem_type, problem_settings)
# Go to the one section in the factory-created course
# which should be loaded with the correct problem
visit_scenario_item('SECTION')
@step(u'I am viewing a "([^"]*)" problem with "([^"]*)" attempt')
def view_problem_with_attempts(step, problem_type, attempts):
_view_problem(step, problem_type, {'max_attempts': attempts})
@step(u'I am viewing a randomization "([^"]*)" "([^"]*)" problem with "([^"]*)" attempts with reset')
def view_problem_attempts_reset(step, randomization, problem_type, attempts, ):
_view_problem(step, problem_type, {'max_attempts': attempts,
'rerandomize': randomization,
'show_reset_button': True})
@step(u'I am viewing a "([^"]*)" that shows the answer "([^"]*)"')
def view_problem_with_show_answer(step, problem_type, answer):
_view_problem(step, problem_type, {'showanswer': answer})
@step(u'I am viewing a "([^"]*)" problem')
def view_problem(step, problem_type):
_view_problem(step, problem_type)
@step(u'I am viewing a randomization "([^"]*)" "([^"]*)" problem with reset button on')
def view_random_reset_problem(step, randomization, problem_type):
_view_problem(step, problem_type, {'rerandomize': randomization, 'show_reset_button': True})
@step(u'External graders respond "([^"]*)"')
def set_external_grader_response(step, correctness):
assert(correctness in ['correct', 'incorrect'])
response_dict = {
'correct': True if correctness == 'correct' else False,
'score': 1 if correctness == 'correct' else 0,
'msg': 'Your problem was graded {0}'.format(correctness)
}
# Set the fake xqueue server to always respond
# correct/incorrect when asked to grade a problem
world.xqueue.config['default'] = response_dict
@step(u'I answer a "([^"]*)" problem "([^"]*)ly"')
def answer_problem_step(step, problem_type, correctness):
""" Mark a given problem type correct or incorrect, then submit it.
*problem_type* is a string representing the type of problem (e.g. 'drop down')
*correctness* is in ['correct', 'incorrect']
"""
# Change the answer on the page
input_problem_answer(step, problem_type, correctness)
# Submit the problem
submit_problem(step)
@step(u'I input an answer on a "([^"]*)" problem "([^"]*)ly"')
def input_problem_answer(_, problem_type, correctness):
"""
Have the browser input an answer (either correct or incorrect)
"""
assert correctness in ['correct', 'incorrect']
assert problem_type in PROBLEM_DICT
answer_problem(world.scenario_dict['COURSE'].number, problem_type, correctness)
@step(u'I submit a problem')
# pylint: disable=unused-argument
def submit_problem(step):
# first scroll down so the loading mathjax button does not
# cover up the Submit button
world.browser.execute_script("window.scrollTo(0,1024)")
world.css_click("button.submit")
# Wait for the problem to finish re-rendering
world.wait_for_ajax_complete()
@step(u'The "([^"]*)" problem displays a "([^"]*)" answer')
def assert_problem_has_answer(step, problem_type, answer_class):
'''
Assert that the problem is displaying a particular answer.
These correspond to the same correct/incorrect
answers we set in answer_problem()
We can also check that a problem has been left blank
by setting answer_class='blank'
'''
assert answer_class in ['correct', 'incorrect', 'blank']
assert problem_type in PROBLEM_DICT
problem_has_answer(world.scenario_dict['COURSE'].number, problem_type, answer_class)
@step(u'I reset the problem')
def reset_problem(_step):
world.css_click('button.reset')
# Wait for the problem to finish re-rendering
world.wait_for_ajax_complete()
@step(u'I press the button with the label "([^"]*)"$')
def press_the_button_with_label(_step, buttonname):
button_css = 'button span.show-label'
elem = world.css_find(button_css).first
world.css_has_text(button_css, elem)
world.css_click(button_css)
@step(u'The "([^"]*)" button does( not)? appear')
def action_button_present(_step, buttonname, doesnt_appear):
button_css = 'div.action button[data-value*="%s"]' % buttonname
if bool(doesnt_appear):
assert world.is_css_not_present(button_css)
else:
assert world.is_css_present(button_css)
@step(u'I should see a score of "([^"]*)"$')
def see_score(_step, score):
# The problem progress is changed by
# cms/static/xmodule_js/src/capa/display.js
# so give it some time to render on the page.
score_css = 'div.problem-progress'
expected_text = '{}'.format(score)
world.wait_for(lambda _: world.css_has_text(score_css, expected_text))
@step(u'[Mm]y "([^"]*)" answer is( NOT)? marked "([^"]*)"')
def assert_answer_mark(_step, problem_type, isnt_marked, correctness):
"""
Assert that the expected answer mark is visible
for a given problem type.
*problem_type* is a string identifying the type of problem (e.g. 'drop down')
*correctness* is in ['correct', 'incorrect', 'unanswered']
"""
# Determine which selector(s) to look for based on correctness
assert correctness in ['correct', 'incorrect', 'unanswered']
assert problem_type in PROBLEM_DICT
# At least one of the correct selectors should be present
for sel in PROBLEM_DICT[problem_type][correctness]:
if bool(isnt_marked):
world.wait_for(lambda _: world.is_css_not_present(sel)) # pylint: disable=cell-var-from-loop
has_expected = world.is_css_not_present(sel)
else:
world.css_find(sel) # css_find includes a wait_for pattern
has_expected = world.is_css_present(sel)
# As soon as we find the selector, break out of the loop
if has_expected:
break
# Expect that we found the expected selector
assert has_expected
| agpl-3.0 |
vladimir-smirnov-sociomantic/graphite-api | tests/test_attime.py | 11 | 1100 | import datetime
import time
from graphite_api.render.attime import parseATTime
from . import TestCase
class AtTestCase(TestCase):
def test_parse(self):
for value in [
str(int(time.time())),
'20140319',
'20130319+1y',
'20130319+1mon',
'20130319+1w',
'12:12_20130319',
'3:05am_20130319',
'3:05pm_20130319',
'noon20130319',
'midnight20130319',
'teatime20130319',
'yesterday',
'tomorrow',
'03/19/2014',
'03/19/1800',
'03/19/1950',
'feb 27',
'mar 5',
'mon',
'tue',
'wed',
'thu',
'fri',
'sat',
'sun',
'10:00',
]:
self.assertIsInstance(parseATTime(value), datetime.datetime)
for value in [
'20130319+1foo',
'mar',
'wat',
]:
with self.assertRaises(Exception):
parseATTime(value)
| apache-2.0 |
xfournet/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_xrange.py | 326 | 2699 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes xrange(...) into range(...)."""
# Local imports
from .. import fixer_base
from ..fixer_util import Name, Call, consuming_calls
from .. import patcomp
class FixXrange(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
(name='range'|name='xrange') trailer< '(' args=any ')' >
rest=any* >
"""
def start_tree(self, tree, filename):
super(FixXrange, self).start_tree(tree, filename)
self.transformed_xranges = set()
def finish_tree(self, tree, filename):
self.transformed_xranges = None
def transform(self, node, results):
name = results["name"]
if name.value == u"xrange":
return self.transform_xrange(node, results)
elif name.value == u"range":
return self.transform_range(node, results)
else:
raise ValueError(repr(name))
def transform_xrange(self, node, results):
name = results["name"]
name.replace(Name(u"range", prefix=name.prefix))
# This prevents the new range call from being wrapped in a list later.
self.transformed_xranges.add(id(node))
def transform_range(self, node, results):
if (id(node) not in self.transformed_xranges and
not self.in_special_context(node)):
range_call = Call(Name(u"range"), [results["args"].clone()])
# Encase the range call in list().
list_call = Call(Name(u"list"), [range_call],
prefix=node.prefix)
# Put things that were after the range() call after the list call.
for n in results["rest"]:
list_call.append_child(n)
return list_call
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
| comparison< any 'in' node=any any*>
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in consuming_calls
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
| apache-2.0 |
halberom/ansible | hacking/metadata-tool.py | 20 | 21240 | #!/usr/bin/env python
import ast
import csv
import os
import sys
from collections import defaultdict
from distutils.version import StrictVersion
from pprint import pformat, pprint
import yaml
from ansible.module_utils._text import to_text
from ansible.plugins import module_loader
# There's a few files that are not new-style modules. Have to blacklist them
NONMODULE_PY_FILES = frozenset(('async_wrapper.py',))
NONMODULE_MODULE_NAMES = frozenset(os.path.splitext(p)[0] for p in NONMODULE_PY_FILES)
# Default metadata
DEFAULT_METADATA = {'version': '1.0', 'status': ['preview'], 'supported_by':'community'}
class ParseError(Exception):
"""Thrown when parsing a file fails"""
pass
class MissingModuleError(Exception):
"""Thrown when unable to find a plugin"""
pass
def usage():
print("""Usage:
metadata-tester.py report [--version X]
metadata-tester.py add [--version X] [--overwrite] CSVFILE
metadata-tester.py add-default [--version X] [--overwrite]""")
sys.exit(1)
def parse_args(arg_string):
if len(arg_string) < 1:
usage()
action = arg_string[0]
version = None
if '--version' in arg_string:
version_location = arg_string.index('--version')
arg_string.pop(version_location)
version = arg_string.pop(version_location)
overwrite = False
if '--overwrite' in arg_string:
overwrite = True
arg_string.remove('--overwrite')
csvfile = None
if len(arg_string) == 2:
csvfile = arg_string[1]
elif len(arg_string) > 2:
usage()
return action, {'version': version, 'overwrite': overwrite, 'csvfile': csvfile}
def seek_end_of_dict(module_data, start_line, start_col, next_node_line, next_node_col):
"""Look for the end of a dict in a set of lines
We know the starting position of the dict and we know the start of the
next code node but in between there may be multiple newlines and comments.
There may also be multiple python statements on the same line (separated
by semicolons)
Examples::
ANSIBLE_METADATA = {[..]}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} # Optional comments with confusing junk => {}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {
[..]
}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} ; DOCUMENTATION = [..]
ANSIBLE_METADATA = {}EOF
"""
if next_node_line is None:
# The dict is the last statement in the file
snippet = module_data.splitlines()[start_line:]
next_node_col = 0
# Include the last line in the file
last_line_offset = 0
else:
# It's somewhere in the middle so we need to separate it from the rest
snippet = module_data.splitlines()[start_line:next_node_line]
# Do not include the last line because that's where the next node
# starts
last_line_offset = 1
if next_node_col == 0:
# This handles all variants where there are only comments and blank
# lines between the dict and the next code node
# Step backwards through all the lines in the snippet
for line_idx, line in tuple(reversed(tuple(enumerate(snippet))))[last_line_offset:]:
end_col = None
# Step backwards through all the characters in the line
for col_idx, char in reversed(tuple(enumerate(c for c in line))):
if char == '}' and end_col is None:
# Potentially found the end of the dict
end_col = col_idx
elif char == '#' and end_col is not None:
# The previous '}' was part of a comment. Keep trying
end_col = None
if end_col is not None:
# Found the end!
end_line = start_line + line_idx
break
else:
# Harder cases involving multiple statements on one line
# Good Ansible Module style doesn't do this so we're just going to
# treat this as an error for now:
raise ParseError('Multiple statements per line confuses the module metadata parser.')
return end_line, end_col
def seek_end_of_string(module_data, start_line, start_col, next_node_line, next_node_col):
"""
This is much trickier than finding the end of a dict. A dict has only one
ending character, "}". Strings have four potential ending characters. We
have to parse the beginning of the string to determine what the ending
character will be.
Examples:
ANSIBLE_METADATA = '''[..]''' # Optional comment with confusing chars '''
# Optional comment with confusing chars '''
DOCUMENTATION = [..]
ANSIBLE_METADATA = '''
[..]
'''
DOCUMENTATIONS = [..]
ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
SHORT_NAME = ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
String marker variants:
* '[..]'
* "[..]"
* '''[..]'''
* \"\"\"[..]\"\"\"
Each of these come in u, r, and b variants:
* '[..]'
* u'[..]'
* b'[..]'
* r'[..]'
* ur'[..]'
* ru'[..]'
* br'[..]'
* b'[..]'
* rb'[..]'
"""
raise NotImplementedError('Finding end of string not yet implemented')
def extract_metadata(module_data):
"""Extract the metadata from a module
:arg module_data: Byte string containing a module's code
:returns: a tuple of metadata (a dict), line the metadata starts on,
column the metadata starts on, line the metadata ends on, column the
metadata ends on, and the names the metadata is assigned to. One of
the names the metadata is assigned to will be ANSIBLE_METADATA If no
metadata is found, the tuple will be (None, -1, -1, -1, -1, None)
"""
metadata = None
start_line = -1
start_col = -1
end_line = -1
end_col = -1
targets = None
mod_ast_tree = ast.parse(module_data)
for root_idx, child in enumerate(mod_ast_tree.body):
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'ANSIBLE_METADATA':
if isinstance(child.value, ast.Dict):
metadata = ast.literal_eval(child.value)
try:
# Determine where the next node starts
next_node = mod_ast_tree.body[root_idx+1]
next_lineno = next_node.lineno
next_col_offset = next_node.col_offset
except IndexError:
# Metadata is defined in the last node of the file
next_lineno = None
next_col_offset = None
# Determine where the current metadata ends
end_line, end_col = seek_end_of_dict(module_data,
child.lineno - 1, child.col_offset, next_lineno,
next_col_offset)
elif isinstance(child.value, ast.Str):
metadata = yaml.safe_load(child.value.s)
end_line = seek_end_of_string(module_data)
elif isinstance(child.value, ast.Bytes):
metadata = yaml.safe_load(to_text(child.value.s, errors='surrogate_or_strict'))
end_line = seek_end_of_string(module_data)
else:
# Example:
# ANSIBLE_METADATA = 'junk'
# ANSIBLE_METADATA = { [..the real metadata..] }
continue
# Do these after the if-else so we don't pollute them in
# case this was a false positive
start_line = child.lineno - 1
start_col = child.col_offset
targets = [t.id for t in child.targets]
break
if metadata is not None:
# Once we've found the metadata we're done
break
return metadata, start_line, start_col, end_line, end_col, targets
def find_documentation(module_data):
"""Find the DOCUMENTATION metadata for a module file"""
start_line = -1
mod_ast_tree = ast.parse(module_data)
for child in mod_ast_tree.body:
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'DOCUMENTATION':
start_line = child.lineno - 1
break
return start_line
def remove_metadata(module_data, start_line, start_col, end_line, end_col):
"""Remove a section of a module file"""
lines = module_data.split('\n')
new_lines = lines[:start_line]
if start_col != 0:
new_lines.append(lines[start_line][:start_col])
next_line = lines[end_line]
if len(next_line) - 1 != end_col:
new_lines.append(next_line[end_col:])
if len(lines) > end_line:
new_lines.extend(lines[end_line + 1:])
return '\n'.join(new_lines)
def insert_metadata(module_data, new_metadata, insertion_line, targets=('ANSIBLE_METADATA',)):
"""Insert a new set of metadata at a specified line"""
assignments = ' = '.join(targets)
pretty_metadata = pformat(new_metadata, width=1).split('\n')
new_lines = []
new_lines.append('{} = {}'.format(assignments, pretty_metadata[0]))
if len(pretty_metadata) > 1:
for line in pretty_metadata[1:]:
new_lines.append('{}{}'.format(' ' * (len(assignments) - 1 + len(' = {')), line))
old_lines = module_data.split('\n')
lines = old_lines[:insertion_line] + new_lines + [''] + old_lines[insertion_line:]
return '\n'.join(lines)
def parse_assigned_metadata_initial(csvfile):
"""
Fields:
:0: Module name
:1: Core (x if so)
:2: Extras (x if so)
:3: Category
:4: Supported/SLA
:5: Committer
:6: Stable
:7: Deprecated
:8: Notes
:9: Team Notes
:10: Notes 2
:11: final supported_by field
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
if record[12] == 'core':
supported_by = 'core'
elif record[12] == 'curated':
supported_by = 'committer'
elif record[12] == 'community':
supported_by = 'community'
else:
print('Module %s has no supported_by field. Using community' % record[0])
supported_by = 'community'
supported_by = DEFAULT_METADATA['supported_by']
status = []
if record[6]:
status.append('stableinterface')
if record[7]:
status.append('deprecated')
if not status:
status.extend(DEFAULT_METADATA['status'])
yield (module, {'version': DEFAULT_METADATA['version'], 'supported_by': supported_by, 'status': status})
def parse_assigned_metadata(csvfile):
"""
Fields:
:0: Module name
:1: supported_by string. One of the valid support fields
core, community, unmaintained, committer
:2: stableinterface
:3: preview
:4: deprecated
:5: removed
:6: tested
https://github.com/ansible/proposals/issues/30
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
supported_by = record[1]
status = []
if record[2]:
status.append('stableinterface')
if record[4]:
status.append('deprecated')
if record[5]:
status.append('removed')
if record[6]:
status.append('tested')
if not status or record[3]:
status.append('preview')
yield (module, {'version': '1.0', 'supported_by': supported_by, 'status': status})
def write_metadata(filename, new_metadata, version=None, overwrite=False):
with open(filename, 'rb') as f:
module_data = f.read()
try:
current_metadata, start_line, start_col, end_line, end_col, targets = extract_metadata(module_data)
except SyntaxError:
if filename.endswith('.py'):
raise
# Probably non-python modules. These should all have python
# documentation files where we can place the data
raise ParseError('Could not add metadata to {}'.format(filename))
if current_metadata is None:
# No curent metadata so we can just add it
start_line = find_documentation(module_data)
if start_line < 0:
if os.path.basename(filename) in NONMODULE_PY_FILES:
# These aren't new-style modules
return
raise Exception('Module file {} had no ANSIBLE_METADATA or DOCUMENTATION'.format(filename))
module_data = insert_metadata(module_data, new_metadata, start_line, targets=('ANSIBLE_METADATA',))
elif overwrite or (version is not None and ('version' not in current_metadata or StrictVersion(current_metadata['version']) < StrictVersion(version))):
# Current metadata that we do not want. Remove the current
# metadata and put the new version in its place
module_data = remove_metadata(module_data, start_line, start_col, end_line, end_col)
module_data = insert_metadata(module_data, new_metadata, start_line, targets=targets)
else:
# Current metadata and we don't want to overwrite it
return
# Save the new version of the module
with open(filename, 'wb') as f:
f.write(module_data)
def return_metadata(plugins):
metadata = {}
for name, filename in plugins:
# There may be several files for a module (if it is written in another
# language, for instance) but only one of them (the .py file) should
# contain the metadata.
if name not in metadata or metadata[name] is not None:
with open(filename, 'rb') as f:
module_data = f.read()
metadata[name] = extract_metadata(module_data)[0]
return metadata
def metadata_summary(plugins, version=None):
"""Compile information about the metadata status for a list of modules
:arg plugins: List of plugins to look for. Each entry in the list is
a tuple of (module name, full path to module)
:kwarg version: If given, make sure the modules have this version of
metadata or higher.
:returns: A tuple consisting of a list of modules with no metadata at the
required version and a list of files that have metadata at the
required version.
"""
no_metadata = {}
has_metadata = {}
supported_by = defaultdict(set)
status = defaultdict(set)
plugins = list(plugins)
all_mods_metadata = return_metadata(plugins)
for name, filename in plugins:
# Does the module have metadata?
if name not in no_metadata and name not in has_metadata:
metadata = all_mods_metadata[name]
if metadata is None:
no_metadata[name] = filename
elif version is not None and ('version' not in metadata or StrictVersion(metadata['version']) < StrictVersion(version)):
no_metadata[name] = filename
else:
has_metadata[name] = filename
# What categories does the plugin belong in?
if all_mods_metadata[name] is None:
# No metadata for this module. Use the default metadata
supported_by[DEFAULT_METADATA['supported_by']].add(filename)
status[DEFAULT_METADATA['status'][0]].add(filename)
else:
supported_by[all_mods_metadata[name]['supported_by']].add(filename)
for one_status in all_mods_metadata[name]['status']:
status[one_status].add(filename)
return list(no_metadata.values()), list(has_metadata.values()), supported_by, status
#
# Subcommands
#
def add_from_csv(csv_file, version=None, overwrite=False):
"""Implement the subcommand to add metadata from a csv file
"""
# Add metadata for everything from the CSV file
diagnostic_messages = []
for module_name, new_metadata in parse_assigned_metadata_initial(csv_file):
filename = module_loader.find_plugin(module_name, mod_type='.py')
if filename is None:
diagnostic_messages.append('Unable to find the module file for {}'.format(module_name))
continue
try:
write_metadata(filename, new_metadata, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def add_default(version=None, overwrite=False):
"""Implement the subcommand to add default metadata to modules
Add the default metadata to any plugin which lacks it.
:kwarg version: If given, the metadata must be at least this version.
Otherwise, treat the module as not having existing metadata.
:kwarg overwrite: If True, overwrite any existing metadata. Otherwise,
do not modify files which have metadata at an appropriate version
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
# Iterate through each plugin
processed = set()
diagnostic_messages = []
for name, filename in (info for info in plugins if info[0] not in processed):
try:
write_metadata(filename, DEFAULT_METADATA, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
processed.add(name)
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def report(version=None):
"""Implement the report subcommand
Print out all the modules that have metadata and all the ones that do not.
:kwarg version: If given, the metadata must be at least this version.
Otherwise return it as not having metadata
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = list(plugins)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] != NONMODULE_MODULE_NAMES)
plugins = list(plugins)
no_metadata, has_metadata, support, status = metadata_summary(plugins, version=version)
print('== Has metadata ==')
pprint(sorted(has_metadata))
print('')
print('== Has no metadata ==')
pprint(sorted(no_metadata))
print('')
print('== Supported by core ==')
pprint(sorted(support['core']))
print('== Supported by committers ==')
pprint(sorted(support['committer']))
print('== Supported by community ==')
pprint(sorted(support['community']))
print('')
print('== Status: stableinterface ==')
pprint(sorted(status['stableinterface']))
print('== Status: preview ==')
pprint(sorted(status['preview']))
print('== Status: deprecated ==')
pprint(sorted(status['deprecated']))
print('== Status: removed ==')
pprint(sorted(status['removed']))
print('')
print('== Summary ==')
print('No Metadata: {0} Has Metadata: {1}'.format(len(no_metadata), len(has_metadata)))
print('Supported by core: {0} Supported by community: {1} Supported by committer: {2}'.format(len(support['core']), len(support['community']),
len(support['committer'])))
print('Status StableInterface: {0} Status Preview: {1} Status Deprecated: {2} Status Removed: {3}'.format(len(status['stableinterface']),
len(status['preview']),
len(status['deprecated']),
len(status['removed'])))
return 0
if __name__ == '__main__':
action, args = parse_args(sys.argv[1:])
### TODO: Implement upgrade metadata and upgrade metadata from csvfile
if action == 'report':
rc = report(version=args['version'])
elif action == 'add':
rc = add_from_csv(args['csvfile'], version=args['version'], overwrite=args['overwrite'])
elif action == 'add-default':
rc = add_default(version=args['version'], overwrite=args['overwrite'])
sys.exit(rc)
| gpl-3.0 |
PGer/incubator-hawq | tools/sbin/gpsegstop.py | 9 | 5806 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
#
# Internal Use Function.
#
#
#
# THIS IMPORT MUST COME FIRST
#
# import mainUtils FIRST to get python version check
from gppylib.mainUtils import *
import os, sys, time, signal
from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib import gplog
from gppylib.commands import base
from gppylib.commands import unix
from gppylib.commands import gp
from gppylib.commands.gp import SEGMENT_TIMEOUT_DEFAULT
from gppylib.commands import pg
from gppylib.db import catalog
from gppylib.db import dbconn
from gppylib import pgconf
from gppylib.gpcoverage import GpCoverage
description = ("""
This utility is NOT SUPPORTED and is for internal-use only.
stops a set of one or more segment databases.
""")
logger = gplog.get_default_logger()
#-------------------------------------------------------------------------
class SegStopStatus:
def __init__(self,datadir,stopped,reason):
self.datadir=datadir
self.stopped=stopped
self.reason=reason
def __str__(self):
return "STATUS--DIR:%s--STOPPED:%s--REASON:%s" % (self.datadir,self.stopped,self.reason)
#-------------------------------------------------------------------------
class GpSegStop:
######
def __init__(self,dblist,mode,gpversion,timeout=SEGMENT_TIMEOUT_DEFAULT):
self.dblist=dblist
self.mode=mode
self.expected_gpversion=gpversion
self.timeout=timeout
self.gphome=os.path.abspath(os.pardir)
self.actual_gpversion=gp.GpVersion.local('local GP software version check',self.gphome)
if self.actual_gpversion != self.expected_gpversion:
raise Exception("Local Software Version does not match what is expected.\n"
"The local software version is: '%s'\n"
"But we were expecting it to be: '%s'\n"
"Please review and correct" % (self.actual_gpversion,self.expected_gpversion))
self.logger = logger
######
def run(self):
results = []
failures = []
self.logger.info("Issuing shutdown commands to local segments...")
for db in self.dblist:
datadir, port = db.split(':')[0:2]
cmd = gp.SegmentStop('segment shutdown', datadir, mode=self.mode, timeout=self.timeout)
cmd.run()
res = cmd.get_results()
if res.rc == 0:
# MPP-15208
#
cmd2 = gp.SegmentIsShutDown('check if shutdown', datadir)
cmd2.run()
if cmd2.is_shutdown():
status = SegStopStatus(datadir, True, "Shutdown Succeeded")
results.append(status)
continue
# MPP-16171
#
if self.mode == 'immediate':
status = SegStopStatus(datadir, True, "Shutdown Immediate")
results.append(status)
continue
# read pid and datadir from /tmp/.s.PGSQL.<port>.lock file
name = "failed segment '%s'" % db
(succeeded, mypid, file_datadir) = pg.ReadPostmasterTempFile.local(name,port).getResults()
if succeeded and file_datadir == datadir:
# now try to terminate the process, first trying with
# SIGTERM and working our way up to SIGABRT sleeping
# in between to give the process a moment to exit
#
unix.kill_sequence(mypid)
if not unix.check_pid(mypid):
lockfile = "/tmp/.s.PGSQL.%s" % port
if os.path.exists(lockfile):
self.logger.info("Clearing segment instance lock files")
os.remove(lockfile)
status = SegStopStatus(datadir,False,"Shutdown failed: rc: %d stdout: %s stderr: %s" % (res.rc,res.stdout,res.stderr))
failures.append(status)
results.append(status)
#Log the results!
status = '\nCOMMAND RESULTS\n'
for result in results:
status += str(result) + "\n"
self.logger.info(status)
return 1 if failures else 0
######
def cleanup(self):
pass
@staticmethod
def createParser():
parser = OptParser(option_class=OptChecker,
description=' '.join(description.split()),
version='%prog version $Revision: #12 $')
parser.setHelp([])
addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False)
parser.add_option("-D","--db",dest="dblist", action="append", type="string")
parser.add_option("-V", "--gp-version", dest="gpversion",metavar="GP_VERSION",
help="expected software version")
parser.add_option("-m", "--mode", dest="mode",metavar="<MODE>",
help="how to shutdown. modes are smart,fast, or immediate")
parser.add_option("-t", "--timeout", dest="timeout", type="int", default=SEGMENT_TIMEOUT_DEFAULT,
help="seconds to wait")
return parser
@staticmethod
def createProgram(options, args):
return GpSegStop(options.dblist,options.mode,options.gpversion,options.timeout)
#-------------------------------------------------------------------------
if __name__ == '__main__':
mainOptions = { 'setNonuserOnToolLogger':True}
simple_main( GpSegStop.createParser, GpSegStop.createProgram, mainOptions)
| apache-2.0 |
CollabQ/CollabQ | vendor/django/db/models/sql/subqueries.py | 7 | 17710 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import Date
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import Query
from django.db.models.sql.where import AND, Constraint
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.tables) == 1, \
"Can only delete from one table at a time."
result = ['DELETE FROM %s' % self.quote_name_unless_alias(self.tables[0])]
where, params = self.where.as_sql()
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
def do_query(self, table, where):
self.tables = [table]
self.where = where
self.execute_sql(None)
def delete_batch_related(self, pk_list):
"""
Set up and execute delete queries for all the objects related to the
primary key values in pk_list. To delete the objects themselves, use
the delete_batch() method.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
from django.contrib.contenttypes import generic
cls = self.model
for related in cls._meta.get_all_related_many_to_many_objects():
if not isinstance(related.field, generic.GenericRelation):
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((Constraint(None,
related.field.m2m_reverse_name(), related.field),
'in',
pk_list[offset : offset+GET_ITERATOR_CHUNK_SIZE]),
AND)
self.do_query(related.field.m2m_db_table(), where)
for f in cls._meta.many_to_many:
w1 = self.where_class()
if isinstance(f, generic.GenericRelation):
from django.contrib.contenttypes.models import ContentType
field = f.rel.to._meta.get_field(f.content_type_field_name)
w1.add((Constraint(None, field.column, field), 'exact',
ContentType.objects.get_for_model(cls).id), AND)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((Constraint(None, f.m2m_column_name(), f), 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
if w1:
where.add(w1, AND)
self.do_query(f.m2m_db_table(), where)
def delete_batch(self, pk_list):
"""
Set up and execute delete queries for all the objects in pk_list. This
should be called after delete_batch_related(), if necessary.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
field = self.model._meta.pk
where.add((Constraint(None, field.column, field), 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), AND)
self.do_query(self.model._meta.db_table, where)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def execute_sql(self, result_type=None):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(UpdateQuery, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.get_related_updates():
aux_rows = query.execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.values:
return '', ()
table = self.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for name, val, placeholder in self.values:
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
result.append(', '.join(values))
where, params = self.where.as_sql()
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.select_related = False
self.clear_ordering(True)
super(UpdateQuery, self).pre_sql_setup()
count = self.count_active_tables()
if not self.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.where = self.where_class()
if self.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.add_filter(('pk__in', idents))
self.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.add_filter(('pk__in', query))
for alias in self.tables[1:]:
self.alias_refcount[alias] = 0
def clear_related(self, related_field, pk_list):
"""
Set up and execute an update query that clears related entries for the
keys in pk_list.
This is used by the QuerySet.delete_objects() method.
"""
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
f = self.model._meta.pk
self.where.add((Constraint(None, f.column, f), 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
self.values = [(related_field.column, None, '%s')]
self.execute_sql(None)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.iteritems():
field, model, direct, m2m = self.model._meta.get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
from django.db.models.base import Model
for field, model, val in values_seq:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self, allow_joins=False)
if model:
self.add_related_update(model, field.column, val, placeholder)
else:
self.values.append((field.column, val, placeholder))
def add_related_update(self, model, column, value, placeholder):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
try:
self.related_updates[model].append((column, value, placeholder))
except KeyError:
self.related_updates[model] = [(column, value, placeholder)]
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.iteritems():
query = UpdateQuery(model, self.connection)
query.values = values
if self.related_ids:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.columns = []
self.values = []
self.params = ()
self.return_id = False
def clone(self, klass=None, **kwargs):
extras = {'columns': self.columns[:], 'values': self.values[:],
'params': self.params, 'return_id': self.return_id}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.columns]))
result.append('VALUES (%s)' % ', '.join(self.values))
params = self.params
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
def execute_sql(self, return_id=False):
self.return_id = return_id
cursor = super(InsertQuery, self).execute_sql(None)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.model._meta.db_table, self.model._meta.pk.column)
def insert_values(self, insert_values, raw_values=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
placeholders, values = [], []
for field, val in insert_values:
if hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
placeholders.append(field.get_placeholder(val))
else:
placeholders.append('%s')
self.columns.append(field.column)
values.append(val)
if raw_values:
self.values.extend(values)
else:
self.params += tuple(values)
self.values.extend(placeholders)
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
def __getstate__(self):
"""
Special DateQuery-specific pickle handling.
"""
for elt in self.select:
if isinstance(elt, Date):
# Eliminate a method reference that can't be pickled. The
# __setstate__ method restores this.
elt.date_sql_func = None
return super(DateQuery, self).__getstate__()
def __setstate__(self, obj_dict):
super(DateQuery, self).__setstate__(obj_dict)
for elt in self.select:
if isinstance(elt, Date):
self.date_sql_func = self.connection.ops.date_trunc_sql
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def add_date_select(self, field, lookup_type, order='ASC'):
"""
Converts the query into a date extraction query.
"""
result = self.setup_joins([field.name], self.get_meta(),
self.get_initial_alias(), False)
alias = result[3][-1]
select = Date((alias, field.column), lookup_type,
self.connection.ops.date_trunc_sql)
self.select = [select]
self.select_fields = [None]
self.select_related = False # See #7097.
self.extra = {}
self.distinct = True
self.order_by = order == 'ASC' and [1] or [-1]
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
def add_subquery(self, query):
self.subquery, self.sub_params = query.as_sql(with_col_aliases=True)
def as_sql(self, quote_func=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql()
for aggregate in self.aggregate_select.values()
]),
self.subquery)
)
params = self.sub_params
return (sql, params)
| apache-2.0 |
VinceZK/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/commit.py | 124 | 5399 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import sys
from webkitpy.common.checkout.scm import AuthenticationError, AmbiguousCommitError
from webkitpy.common.config import urls
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.user import User
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class Commit(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
]
def _commit_warning(self, error):
return ('There are %s local commits (and possibly changes in the working directory. '
'Everything will be committed as a single commit. '
'To avoid this prompt, set "git config webkit-patch.commit-should-always-squash true".' % (
error.num_local_commits))
def _check_test_expectations(self, changed_files):
test_expectations_files = [filename for filename in changed_files if filename.endswith('TestExpectations')]
if not test_expectations_files:
return
args = ["--diff-files"]
args.extend(test_expectations_files)
try:
self._tool.executive.run_and_throw_if_fail(self._tool.deprecated_port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
if self._options.non_interactive:
raise
if not self._tool.user.confirm("Are you sure you want to continue?", default="n"):
self._exit(1)
def run(self, state):
self._commit_message = self._tool.checkout().commit_message_for_this_commit(self._options.git_commit).message()
if len(self._commit_message) < 10:
raise Exception("Attempted to commit with a commit message shorter than 10 characters. Either your patch is missing a ChangeLog or webkit-patch may have a bug.")
self._check_test_expectations(self._changed_files(state))
self._state = state
username = None
password = None
force_squash = self._options.non_interactive
num_tries = 0
while num_tries < 3:
num_tries += 1
try:
scm = self._tool.scm()
commit_text = scm.commit_with_message(self._commit_message, git_commit=self._options.git_commit, username=username, password=password, force_squash=force_squash, changed_files=self._changed_files(state))
svn_revision = scm.svn_revision_from_commit_text(commit_text)
_log.info("Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision)))
self._state["commit_text"] = commit_text
break;
except AmbiguousCommitError, e:
if self._tool.user.confirm(self._commit_warning(e)):
force_squash = True
else:
# This will correctly interrupt the rest of the commit process.
raise ScriptError(message="Did not commit")
except AuthenticationError, e:
if self._options.non_interactive:
raise ScriptError(message="Authentication required")
username = self._tool.user.prompt("%s login: " % e.server_host, repeat=5)
if not username:
raise ScriptError("You need to specify the username on %s to perform the commit as." % e.server_host)
if e.prompt_for_password:
password = self._tool.user.prompt_password("%s password for %s: " % (e.server_host, username), repeat=5)
if not password:
raise ScriptError("You need to specify the password for %s on %s to perform the commit." % (username, e.server_host))
| bsd-3-clause |
tensorflow/probability | tensorflow_probability/python/vi/optimization.py | 1 | 14806 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for fitting variational distributions."""
from __future__ import absolute_import
from __future__ import division
# [internal] enable type annotations
from __future__ import print_function
import functools
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python.vi import csiszar_divergence
_trace_loss = lambda traceable_quantities: traceable_quantities.loss
# Silent fallback to score-function gradients leads to difficult-to-debug
# failures, so we force reparameterization gradients by default.
_reparameterized_elbo = functools.partial(
csiszar_divergence.monte_carlo_variational_loss,
discrepancy_fn=csiszar_divergence.kl_reverse,
use_reparameterization=True)
def fit_surrogate_posterior(target_log_prob_fn,
surrogate_posterior,
optimizer,
num_steps,
convergence_criterion=None,
trace_fn=_trace_loss,
variational_loss_fn=_reparameterized_elbo,
sample_size=1,
trainable_variables=None,
jit_compile=None,
seed=None,
name='fit_surrogate_posterior'):
"""Fit a surrogate posterior to a target (unnormalized) log density.
The default behavior constructs and minimizes the negative variational
evidence lower bound (ELBO), given by
```python
q_samples = surrogate_posterior.sample(num_draws)
elbo_loss = -tf.reduce_mean(
target_log_prob_fn(q_samples) - surrogate_posterior.log_prob(q_samples))
```
This corresponds to minimizing the 'reverse' Kullback-Liebler divergence
(`KL[q||p]`) between the variational distribution and the unnormalized
`target_log_prob_fn`, and defines a lower bound on the marginal log
likelihood, `log p(x) >= -elbo_loss`. [1]
More generally, this function supports fitting variational distributions that
minimize any
[Csiszar f-divergence](https://en.wikipedia.org/wiki/F-divergence).
Args:
target_log_prob_fn: Python callable that takes a set of `Tensor` arguments
and returns a `Tensor` log-density. Given
`q_sample = surrogate_posterior.sample(sample_size)`, this
will be called as `target_log_prob_fn(*q_sample)` if `q_sample` is a list
or a tuple, `target_log_prob_fn(**q_sample)` if `q_sample` is a
dictionary, or `target_log_prob_fn(q_sample)` if `q_sample` is a `Tensor`.
It should support batched evaluation, i.e., should return a result of
shape `[sample_size]`.
surrogate_posterior: A `tfp.distributions.Distribution`
instance defining a variational posterior (could be a
`tfd.JointDistribution`). Crucially, the distribution's `log_prob` and
(if reparameterized) `sample` methods must directly invoke all ops
that generate gradients to the underlying variables. One way to ensure
this is to use `tfp.util.TransformedVariable` and/or
`tfp.util.DeferredTensor` to represent any parameters defined as
transformations of unconstrained variables, so that the transformations
execute at runtime instead of at distribution creation.
optimizer: Optimizer instance to use. This may be a TF1-style
`tf.train.Optimizer`, TF2-style `tf.optimizers.Optimizer`, or any Python
object that implements `optimizer.apply_gradients(grads_and_vars)`.
num_steps: Python `int` number of steps to run the optimizer.
convergence_criterion: Optional instance of
`tfp.optimizer.convergence_criteria.ConvergenceCriterion`
representing a criterion for detecting convergence. If `None`,
the optimization will run for `num_steps` steps, otherwise, it will run
for at *most* `num_steps` steps, as determined by the provided criterion.
Default value: `None`.
trace_fn: Python callable with signature `traced_values = trace_fn(
traceable_quantities)`, where the argument is an instance of
`tfp.math.MinimizeTraceableQuantities` and the returned `traced_values`
may be a `Tensor` or nested structure of `Tensor`s. The traced values are
stacked across steps and returned.
The default `trace_fn` simply returns the loss. In general, trace
functions may also examine the gradients, values of parameters,
the state propagated by the specified `convergence_criterion`, if any (if
no convergence criterion is specified, this will be `None`),
as well as any other quantities captured in the closure of `trace_fn`,
for example, statistics of a variational distribution.
Default value: `lambda traceable_quantities: traceable_quantities.loss`.
variational_loss_fn: Python `callable` with signature
`loss = variational_loss_fn(target_log_prob_fn, surrogate_posterior,
sample_size, seed)` defining a variational loss function. The default is
a Monte Carlo approximation to the standard evidence lower bound (ELBO),
equivalent to minimizing the 'reverse' `KL[q||p]` divergence between the
surrogate `q` and true posterior `p`. [1]
Default value: `functools.partial(
tfp.vi.monte_carlo_variational_loss,
discrepancy_fn=tfp.vi.kl_reverse,
use_reparameterization=True)`.
sample_size: Python `int` number of Monte Carlo samples to use
in estimating the variational divergence. Larger values may stabilize
the optimization, but at higher cost per step in time and memory.
Default value: `1`.
trainable_variables: Optional list of `tf.Variable` instances to optimize
with respect to. If `None`, defaults to the set of all variables accessed
during the computation of the variational bound, i.e., those defining
`surrogate_posterior` and the model `target_log_prob_fn`.
Default value: `None`
jit_compile: If True, compiles the loss function and gradient update using
XLA. XLA performs compiler optimizations, such as fusion, and attempts to
emit more efficient code. This may drastically improve the performance.
See the docs for `tf.function`. (In JAX, this will apply `jax.jit`).
Default value: `None`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: Python `str` name prefixed to ops created by this function.
Default value: 'fit_surrogate_posterior'.
Returns:
results: `Tensor` or nested structure of `Tensor`s, according to the return
type of `trace_fn`. Each `Tensor` has an added leading dimension of size
`num_steps`, packing the trajectory of the result over the course of the
optimization.
#### Examples
**Normal-Normal model**. We'll first consider a simple model
`z ~ N(0, 1)`, `x ~ N(z, 1)`, where we suppose we are interested in the
posterior `p(z | x=5)`:
```python
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
def log_prob(z, x):
return tfd.Normal(0., 1.).log_prob(z) + tfd.Normal(z, 1.).log_prob(x)
conditioned_log_prob = lambda z: log_prob(z, x=5.)
```
The posterior is itself normal by [conjugacy](
https://en.wikipedia.org/wiki/Conjugate_prior), and can be computed
analytically (it's `N(loc=5/2., scale=1/sqrt(2)`). But suppose we don't want
to bother doing the math: we can use variational inference instead!
```python
q_z = tfd.Normal(loc=tf.Variable(0., name='q_z_loc'),
scale=tfp.util.TransformedVariable(1., tfb.Softplus(),
name='q_z_scale'),
name='q_z')
losses = tfp.vi.fit_surrogate_posterior(
conditioned_log_prob,
surrogate_posterior=q,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=100)
print(q_z.mean(), q_z.stddev()) # => approximately [2.5, 1/sqrt(2)]
```
Note that we ensure positive scale by using a softplus transformation of
the underlying variable, invoked via `TransformedVariable`. Deferring the
transformation causes it to be applied upon evaluation of the distribution's
methods, creating a gradient to the underlying variable. If we
had simply specified `scale=tf.nn.softplus(scale_var)` directly,
without the `TransformedVariable`, fitting would fail because calls to
`q.log_prob` and `q.sample` would never access the underlying variable. In
general, transformations of trainable parameters must be deferred to runtime,
using either `TransformedVariable` or `DeferredTensor` or by the callable
mechanisms available in joint distribution classes (demonstrated below).
**Custom loss function**. Suppose we prefer to fit the same model using
the forward KL divergence `KL[p||q]`. We can pass a custom loss function:
```python
import functools
forward_kl_loss = functools.partial(
tfp.vi.monte_carlo_variational_loss, discrepancy_fn=tfp.vi.kl_forward)
losses = tfp.vi.fit_surrogate_posterior(
conditioned_log_prob,
surrogate_posterior=q,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=100,
variational_loss_fn=forward_kl_loss)
```
Note that in practice this may have substantially higher-variance gradients
than the reverse KL.
**Inhomogeneous Poisson Process**. For a more interesting example, let's
consider a model with multiple latent variables as well as trainable
parameters in the model itself. Given observed counts `y` from spatial
locations `X`, consider an inhomogeneous Poisson process model
`log_rates = GaussianProcess(index_points=X); y = Poisson(exp(log_rates))`
in which the latent (log) rates are spatially correlated following a Gaussian
process. We'll fit a variational model to the latent rates while also
optimizing the GP kernel hyperparameters (largely for illustration; in
practice we might prefer to 'be Bayesian' about these parameters and include
them as latents in our model and variational posterior). First we define
the model, including trainable variables:
```python
# Toy 1D data.
index_points = np.array([-10., -7.2, -4., -0.1, 0.1, 4., 6.2, 9.]).reshape(
[-1, 1]).astype(np.float32)
observed_counts = np.array(
[100, 90, 60, 13, 18, 37, 55, 42]).astype(np.float32)
# Trainable GP hyperparameters.
kernel_log_amplitude = tf.Variable(0., name='kernel_log_amplitude')
kernel_log_lengthscale = tf.Variable(0., name='kernel_log_lengthscale')
observation_noise_log_scale = tf.Variable(
0., name='observation_noise_log_scale')
# Generative model.
Root = tfd.JointDistributionCoroutine.Root
def model_fn():
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude=tf.exp(kernel_log_amplitude),
length_scale=tf.exp(kernel_log_lengthscale))
latent_log_rates = yield Root(tfd.GaussianProcess(
kernel,
index_points=index_points,
observation_noise_variance=tf.exp(observation_noise_log_scale),
name='latent_log_rates'))
y = yield tfd.Independent(tfd.Poisson(log_rate=latent_log_rates, name='y'),
reinterpreted_batch_ndims=1)
model = tfd.JointDistributionCoroutine(model_fn)
```
Next we define a variational distribution. We incorporate the observations
directly into the variational model using the 'trick' of representing them
by a deterministic distribution (observe that the true posterior on an
observed value is in fact a point mass at the observed value).
```
logit_locs = tf.Variable(tf.zeros(observed_counts.shape), name='logit_locs')
logit_softplus_scales = tf.Variable(tf.ones(observed_counts.shape) * -4,
name='logit_softplus_scales')
def variational_model_fn():
latent_rates = yield Root(tfd.Independent(
tfd.Normal(loc=logit_locs, scale=tf.nn.softplus(logit_softplus_scales)),
reinterpreted_batch_ndims=1))
y = yield tfd.VectorDeterministic(observed_counts)
q = tfd.JointDistributionCoroutine(variational_model_fn)
```
Note that here we could apply transforms to variables without using
`DeferredTensor` because the `JointDistributionCoroutine` argument is a
function, i.e., executed "on demand." (The same is true when
distribution-making functions are supplied to `JointDistributionSequential`
and `JointDistributionNamed`. That is, as long as variables are transformed
*within* the callable, they will appear on the gradient tape when
`q.log_prob()` or `q.sample()` are invoked.
Finally, we fit the variational posterior and model variables jointly: by not
explicitly specifying `trainable_variables`, the optimization will
automatically include all variables accessed. We'll
use a custom `trace_fn` to see how the kernel amplitudes and a set of sampled
latent rates with fixed seed evolve during the course of the optimization:
```python
losses, log_amplitude_path, sample_path = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn=lambda *args: model.log_prob(args),
surrogate_posterior=q,
optimizer=tf.optimizers.Adam(learning_rate=0.1),
sample_size=1,
num_steps=500,
trace_fn=lambda loss, grads, vars: (loss, kernel_log_amplitude,
q.sample(5, seed=42)[0]))
```
#### References
[1]: Bishop, Christopher M. Pattern Recognition and Machine Learning.
Springer, 2006.
"""
def complete_variational_loss_fn(seed=None):
return variational_loss_fn(
target_log_prob_fn,
surrogate_posterior,
sample_size=sample_size,
seed=seed)
return tfp_math.minimize(complete_variational_loss_fn,
num_steps=num_steps,
optimizer=optimizer,
convergence_criterion=convergence_criterion,
trace_fn=trace_fn,
trainable_variables=trainable_variables,
jit_compile=jit_compile,
seed=seed,
name=name)
| apache-2.0 |
aver803bath5/igene_bot | bot.py | 1 | 1802 | # -*- coding: utf-8 -*-
from telegram.ext import Updater
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
ConversationHandler)
from bs4 import BeautifulSoup
from configparser import ConfigParser
from models import *
import logging
import re
import requests
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="I'm a bot, please talk to me!")
def main():
cfg = ConfigParser()
cfg.read('config')
token = cfg.get('auth', 'token')
updater = Updater(token)
dp = updater.dispatcher
dp.add_handler(CommandHandler('start', start))
dp.add_handler(RegexHandler(u'(?i)google (?i)image .*', g_image))
dp.add_handler(RegexHandler('^(?i)google .*', google))
dp.add_handler(RegexHandler('^(?i)speak .*', tts))
dp.add_handler(RegexHandler(u'.*ๅฅณๆๅ.*', girlfriend))
dp.add_handler(RegexHandler(u'.*ๅฆนๅฆน.*', sister))
dp.add_handler(RegexHandler(u'.*ๆฒ้ข.*', no_money))
dp.add_handler(RegexHandler(u'.*ๆ่ฆบๅพ.*ไธ่ก.*', i_think_no))
dp.add_handler(RegexHandler(u'ๅคฉๆฐฃ .*', weather))
dp.add_handler(RegexHandler(u'.*ๆ่ฆบๅพ.*ๅฏไปฅ.*', i_think_ok))
dp.add_handler(RegexHandler(u'.*ๆฏๆๅฆ.*', wuyiulin))
dp.add_handler(RegexHandler(u'.*(?i)python.*', python_better))
dp.add_handler(RegexHandler(u'.*(?i)js.*', python_better))
dp.add_handler(RegexHandler(u'.*(?i)javascript.*', python_better))
dp.add_handler(RegexHandler('^(?i)image .*', images))
dp.add_handler(RegexHandler('.*', correct))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| mit |
uxebu/tddbin-backend | src/tddbin/settings.py | 1 | 3937 | """
Django settings for tddbin project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7ee1f+x2ggnk8^ys2nzzvn*#k()c%xz=5&&o_l8n9#hdb4%0@#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tddbin.urls'
WSGI_APPLICATION = 'tddbin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'tddbin-backend',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
#
# tddbin specific stuff
#
ALLOWED_HOSTS = [
'localhost:8080',
'tddbin.com',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'tddbin-backend',
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
}
}
INSTALLED_APPS += (
'core',
'django_extensions',
'werkzeug',
'rest_framework', # see http://www.django-rest-framework.org
'rest_framework_swagger', # An API documentation generator for Django REST Framework version see https://github.com/marcgibbons/django-rest-swagger
'rest_framework.authtoken', # so we can use token auth for the REST API see http://www.django-rest-framework.org/api-guide/authentication#tokenauthentication
'corsheaders',
)
MIDDLEWARE_CLASSES += (
# 'werkzeug.debug.DebuggedApplication',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
SWAGGER_SETTINGS = {
'exclude_namespaces': [], # List URL namespaces to ignore
'api_version': '0.1', # Specify your API's version
'api_path': '/', # Specify the path to your API not a root level
'enabled_methods': [ # Specify which methods to enable in Swagger UI
'get',
'post',
'put',
'patch',
'delete'
],
'api_key': '', # An API key
'is_authenticated': False, # Set to True to enforce user authentication,
'is_superuser': False, # Set to True to enforce admin only access
}
CORS_ORIGIN_WHITELIST = (
'localhost:8080', # for local dev
'tddbin.com'
)
| mit |
WoodNeck/tataru | cogs/sound.py | 1 | 7698 | import os
import asyncio
import logging
from discord import opus, ClientException
from discord.ext import commands
from discord.opus import OpusNotLoaded
from cogs.utils.music import Music
from cogs.utils.music_type import MusicType
from cogs.utils.music_player import MusicPlayer
OPUS_LIBS = ['libopus-0.x86.dll', 'libopus-0.x64.dll', 'libopus-0.dll', 'libopus.so.0', 'libopus.0.dylib']
class Sound:
instance = None
def __init__(self, bot):
Sound.instance = self
self.bot = bot
self.loop = bot.loop
self.lock = asyncio.Lock()
self.musicPlayers = dict()
self.SOUND_PATH = "./data/mutable"
async def joinVoice(self, ctx):
try:
voiceClient = self.bot.voice_client_in(ctx.message.server)
voiceChannel = ctx.message.author.voice.voice_channel
if voiceClient is None:
return await self.bot.join_voice_channel(voiceChannel)
else:
if voiceClient.channel != voiceChannel:
await voiceClient.move_to(voiceChannel)
return voiceClient
except asyncio.TimeoutError:
await self.bot.send_message(ctx.message.channel, "์์ฑ ์ฑ๋์ ์ ์ํ๋๋ฐ ๋๋ฌด ์ค๋ ๊ฑธ๋ ค์ ๋ชป๋ค์ด๊ฐ๊ฒ ์ด์ฉ")
except ClientException: # join_voice_channel์ด ๋์์ ํธ์ถ๋์ด ๋ค๋ฅธ ์ชฝ์ด ๋จผ์ ์ฒ๋ฆฌ๋ ๊ฒฝ์ฐ
return self.bot.voice_client_in(ctx.message.server)
except OpusNotLoaded:
await self.bot.send_message(ctx.message.channel, "Opus ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ๋ก๋๋์ง ์์์ด์ฉ")
logging.error("Opus Library Not Loaded")
except Exception as e:
await self.bot.send_message(ctx.message.channel, "๋ฌธ์ ๊ฐ ๋ฐ์ํ์ฌ ์์ฑ ์ฑ๋์ ์ ์ํ ์ ์์ด์ฉ")
logging.error(str(e))
return None
async def leaveVoice(self, server):
player = self.musicPlayers.get(server.id)
if player:
player.stop()
self.musicPlayers.pop(server.id)
voiceClient = self.bot.voice_client_in(server)
if voiceClient:
await voiceClient.disconnect()
@commands.command(pass_context=True)
async def ๋ค์ด์(self, ctx):
await self.joinVoice(ctx)
@commands.command(pass_context=True)
async def ๋๊ฐ(self, ctx):
await self.leaveVoice(ctx.message.server)
@commands.command(pass_context=True)
async def ์ฌ์ํด์ค(self, ctx, *args):
if len(args) == 0:
await self.bot.say("์ฌ์ํ ์ฌ์ด๋๋ฅผ ์ถ๊ฐ๋ก ์
๋ ฅํด์ฃผ์ธ์ฉ")
return
soundName = " ".join([arg for arg in args])
if soundName == "๋ชฉ๋ก":
await self.printSoundList(ctx.message)
else:
soundPath = "{}/{}/sound/{}.mp3".format(self.SOUND_PATH, ctx.message.server.id, soundName) # Only .mp3 file is allowed
if os.path.exists(soundPath):
await self.play(ctx, MusicType.LOCAL, soundPath, soundName)
else:
await self.bot.say("์๋ ์ฌ์ด๋์์ฉ")
async def play(self, ctx, dataType, fileDir, name, length=None):
await self.lock.acquire()
voiceClient = await self.joinVoice(ctx)
if voiceClient is not None:
await self.bot.send_typing(ctx.message.channel)
musicPlayer = self.musicPlayers.get(ctx.message.server.id)
if not musicPlayer:
musicPlayer = MusicPlayer(self, voiceClient, ctx.message.server, ctx.message.channel)
self.musicPlayers[ctx.message.server.id] = musicPlayer
song = Music(dataType, fileDir, name, ctx.message.author, length)
if musicPlayer.currentSong is not None:
await self.bot.say("{}์(๋ฅผ) ์ฌ์๋ชฉ๋ก์ ์ถ๊ฐํ์ด์ฉ".format(song.desc()))
musicPlayer.add(song)
await musicPlayer.play()
self.lock.release()
async def addList(self, ctx, dataType, videos):
await self.lock.acquire()
voiceClient = await self.joinVoice(ctx)
if voiceClient is not None:
await self.bot.send_typing(ctx.message.channel)
musicPlayer = self.musicPlayers.get(ctx.message.server.id)
if not musicPlayer:
musicPlayer = MusicPlayer(self, voiceClient, ctx.message.server, ctx.message.channel)
self.musicPlayers[ctx.message.server.id] = musicPlayer
for video in videos:
song = Music(dataType, video.videoUrl, video.videoTitle, ctx.message.author, video.videoTime)
musicPlayer.add(song)
await musicPlayer.play()
await self.bot.send_message(ctx.message.channel, "{}๊ฐ์ ์ฌ์๋ชฉ๋ก์ ์ถ๊ฐํ์ด์ฉ".format(len(videos)))
self.lock.release()
@commands.command(pass_context=True)
async def ์ ์ง(self, ctx):
musicPlayer = self.musicPlayers.get(ctx.message.server.id)
if musicPlayer:
musicPlayer.stop()
self.musicPlayers.pop(ctx.message.server.id)
@commands.command(pass_context=True)
async def ์คํต(self, ctx):
musicPlayer = self.musicPlayers.get(ctx.message.server.id)
if musicPlayer:
await musicPlayer.skip()
@commands.command(pass_context=True)
async def ์ทจ์(self, ctx, index):
musicPlayer = self.musicPlayers.get(ctx.message.server.id)
if not musicPlayer:
return
try:
index = int(index) - 1
except ValueError:
self.bot.say("์ฌ์๋ชฉ๋ก์ ๋ช๋ฒ์งธ์ธ์ง ์ซ์๋ฅผ ์
๋ ฅํด์ฃผ์ธ์ฉ")
return
await musicPlayer.skipIndex(ctx, index)
async def printSoundList(self, message):
soundPath = "{}/{}/sound".format(self.SOUND_PATH, message.server.id)
soundList = os.listdir(soundPath)
soundList = ["๐ถ" + sound.split(".")[0] for sound in soundList]
if soundList:
await self.bot.send_message(message.channel, "```{}```".format(" ".join(soundList)))
else:
await self.bot.send_message(message.channel, "์ฌ์ํ ์ ์๋ ์์
์ด ํ๋๋ ์์ด์ฉ")
@commands.command(pass_context=True)
async def ์ฌ์๋ชฉ๋ก(self, ctx):
musicPlayer = self.musicPlayers.get(ctx.message.server.id)
if musicPlayer:
await musicPlayer.printSongList(ctx.message.channel)
@commands.command(pass_context=True)
async def ํ์ฌ๊ณก(self, ctx):
musicPlayer = self.musicPlayers.get(ctx.message.server.id)
if musicPlayer and musicPlayer.currentSong is not None:
await musicPlayer.displayCurrentStatus(ctx.message.channel)
else:
await self.bot.say("์ฌ์์ค์ธ ๊ณก์ด ์์ด์ฉ")
@commands.command(pass_context=True)
async def ๋ฃจํ(self, ctx):
musicPlayer = self.musicPlayers.get(ctx.message.server.id)
if musicPlayer:
musicPlayer.loop = not musicPlayer.loop
if musicPlayer.loop:
await self.bot.say("๋ฃจํ๋ฅผ ์ค์ ํ์ด์ฉ")
else:
await self.bot.say("๋ฃจํ๋ฅผ ํด์ ํ์ด์ฉ")
def load_opus_lib(opus_libs=OPUS_LIBS):
if opus.is_loaded():
return True
for opus_lib in opus_libs:
try:
opus.load_opus(opus_lib)
return
except OSError:
pass
raise RuntimeError("OPUS ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ๋ก๋ํ๋๋ฐ ์คํจํ์ด์ฉ. ์ด๊ฒ๋ค์ ์๋ํด๋ดค์ด์ฉ: {}".format(", ".join(opus_libs)))
def setup(bot):
cog = Sound(bot)
if not __debug__:
load_opus_lib()
bot.add_cog(cog)
| mit |
gskachkov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/newstringio.py | 132 | 1757 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""'with'-compliant StringIO implementation."""
import StringIO as OldStringIO
class StringIO(OldStringIO.StringIO):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
| bsd-3-clause |
ronaldhan/python-data | sqlite2mysql_test.py | 1 | 3652 | # -*- coding: utf-8 -*-
import sqlite3 as lite
import MySQLdb
import python_mysql as mydb
try:
dbPath="C:/Program Files (x86)/็ซ่ฝฆ้้ๅจV8/Data"
fileName="/5/SpiderResult.db3"
tableName='yhxxl'
conn=lite.connect(dbPath+fileName)
conn.row_factory=lite.Row
cur=conn.cursor()
#cur.execute('select * from Content')
#col_name_list=[tuple[0] for tuple in cur.description]
#print col_name_list
## for item in col_name_list:
## print item,
## print
## res=cur.fetchone()
## for item in res:
## print item,
## print
cur.execute('select * from sqlite_master where type="table" and name="Content"')
row=cur.fetchone()
#the 4th field is the create sql
#print row[4]
sql=row[4].replace('[','`').replace(']','`').replace('autoincrement','auto_increment').replace('Content',tableName)
sql=sql.replace('ๅทฒ้','yc').replace('ๅทฒๅ','yf').replace('ๅ็งฐ','name').replace('ๅฐๅ','address').replace('ๆ ็ญพ','tags').replace('ๆ็บง','grade').replace('็น่ฏๆฐ','comment')
#print sql
#connect to mysql
#check if the table exists
#if the table not exists result is []
#mysqlconn=mydb.Connection(host="localhost",user="root",passwd="ronald",db="ftpoi",port=3306)
#mycur=mysqlconn.cursor()
#exist=mycur.execute('show tables like %s','%'+tableName+'%')
#print exist
## if exist ==0:
## mycur.execute(sql)
## else:
## pass
## cur.execute('select * from Content')
## rows=cur.fetchall()
## for row in rows:
## sqlstr="insert into " + tableName + " values(%s,%s,%s,%s,%s,%s,%s,%s)"
## values=[row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8]]
## cur.execute(sqlstr,values)
mysqlconn=mydb.Connection(host="localhost",database="ftpoi",user="root",password="ronald")
exist=mysqlconn.query('show tables like %s','%'+tableName+'%')
if exist==[]:
#create new table
mysqlconn.execute(sql)
else:
pass
mysqlconn2=MySQLdb.Connection(host="localhost",user="root",passwd="ronald",db="ftpoi",port=3306,charset="utf8")
mycur=mysqlconn2.cursor()
#get all data in sqlite
#insert into mysql
cur.execute('select * from Content')
rows=cur.fetchall()
for row in rows:
## #sqlstr="insert into " + tableName + " ('ๅทฒ้','ๅทฒๅ','ๅ็งฐ','ๅฐๅ','ๆ ็ญพ','ๆ็บง','็น่ฏๆฐ','PageUrl') values (row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8])"
## #mysqlconn.insert(tableName,"ๅทฒ้"=row[1],"ๅทฒๅ"=row[2],"ๅ็งฐ"=row[3],"ๅฐๅ"=row[4],"ๆ ็ญพ"=row[5],"ๆ็บง"=row[6],"็น่ฏๆฐ"=row[7],"PageUrl"=row[8])
## #mysqlconn.commit()
sqlstr="insert into " + tableName + " ('ๅทฒ้','ๅทฒๅ','ๅ็งฐ','ๅฐๅ','ๆ ็ญพ','ๆ็บง','็น่ฏๆฐ','PageUrl') values(%s,%s,%s,%s,%s,%s,%s,%s)"
values=[row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8]]
mycur.execute(sqlstr,values)
## for items in rows:
## for item in items:
## print item,
## print
#print exist
cur.close()
conn.close()
mycur.close()
mysqlconn.close()
mysqlconn2.close()
except Exception, e:
raise
else:
pass
finally:
pass
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.