code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
__author__ = "Simon Oldfield"
import logging
from datacube.api import parse_date_min, parse_date_max, Satellite, DatasetType
from datacube.api.query import list_cells_as_list, list_tiles_as_list
from datacube.api.query import list_cells_vector_file_as_list
from datacube.api.query import MONTHS_BY_SEASON, Season
from datacube.api.query import LS7_SLC_OFF_EXCLUSION, LS7_SLC_OFF_ACQ_MIN
from datacube.api.query import LS8_PRE_WRS_2_EXCLUSION, LS8_PRE_WRS_2_ACQ_MAX
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
_log = logging.getLogger()
TEST_CELL_X = 120
TEST_CELL_Y = -25
TEST_YEAR = 2005
TEST_YEAR_STR = str(TEST_YEAR)
TEST_MONTHS = MONTHS_BY_SEASON[Season.SUMMER]
TEST_VECTOR_FILE = "Mainlands.shp"
TEST_VECTOR_LAYER = 0
TEST_VECTOR_FEATURE = 4
def test_list_cells_120_020_2005_ls578(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_no_ls7_slc(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
exclude=[LS7_SLC_OFF_EXCLUSION],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_no_ls8_pre_wrs_2(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
exclude=[LS8_PRE_WRS_2_EXCLUSION],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_summer(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
months=TEST_MONTHS,
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_tiles_120_020_2005_ls578(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and ds in tile.datasets for ds in dataset_types)
def test_list_tiles_120_020_2005_ls578_no_ls7_slc(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
exclude=[LS7_SLC_OFF_EXCLUSION],
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
dataset = tile.datasets[DatasetType.ARG25]
assert dataset
_log.info("Found ARG25 dataset [%s]", dataset.path)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and (ds in tile.datasets for ds in dataset_types)
and (dataset.satellite != Satellite.LS7 or tile.end_datetime.date() <= LS7_SLC_OFF_ACQ_MIN))
def test_list_tiles_120_020_2005_ls578_no_ls8_pre_wrs_2(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
exclude=[LS8_PRE_WRS_2_EXCLUSION],
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
dataset = tile.datasets[DatasetType.ARG25]
assert dataset
_log.info("Found ARG25 dataset [%s]", dataset.path)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and (ds in tile.datasets for ds in dataset_types)
and (dataset.satellite != Satellite.LS8 or tile.end_datetime.date() >= LS8_PRE_WRS_2_ACQ_MAX))
def test_list_tiles_120_020_2005_ls578_summer(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
months=TEST_MONTHS,
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and (ds in tile.datasets for ds in dataset_types)
and tile.end_datetime_month in [m.value for m in TEST_MONTHS])
# AOI
def test_list_cells_act_2005_ls578(config=None):
cells = list_cells_vector_file_as_list(vector_file=TEST_VECTOR_FILE,
vector_layer=TEST_VECTOR_LAYER,
vector_feature=TEST_VECTOR_FEATURE,
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
dataset_types=[DatasetType.ARG25], config=None)
assert(cells and len(list(cells)) == 2)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert((cell.x == 148 or cell.x == 149) and cell.y == -36)
# def test_list_tiles_act_2005_ls578(config=None):
#
# dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
#
# tiles = list_tiles_vector_file_as_list(vector_file="Mainlands.shp", vector_layer=0, vector_feature=4,
# acq_min=parse_date_min("2005"), acq_max=parse_date_max("2005"),
# satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
# dataset_types=dataset_types,
# config=config)
#
# assert(tiles and len(list(tiles)) > 0)
#
# for tile in tiles:
# _log.info("Found tile xy = %s", tile.xy)
# assert((tile.x == 148 or tile.x == 149) and tile.y == -36
# and tile.end_datetime_year == 2005
# and (ds in tile.datasets for ds in dataset_types)
# and tile.end_datetime_month in [m.value for m in MONTHS_BY_SEASON[Season.SUMMER]])
# def test_list_tiles_act_2005_ls578_summer(config=None):
#
# dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
#
# tiles = list_tiles_vector_file_as_list(vector_file="Mainlands.shp", vector_layer=0, vector_feature=4,
# acq_min=parse_date_min("2005"), acq_max=parse_date_max("2005"),
# satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
# dataset_types=dataset_types,
# months=MONTHS_BY_SEASON[Season.SUMMER],
# config=config)
#
# assert(tiles and len(list(tiles)) > 0)
#
# for tile in tiles:
# _log.info("Found tile xy = %s", tile.xy)
# assert((tile.x == 148 or tile.x == 149) and tile.y == -36
# and tile.end_datetime_year == 2005
# and (ds in tile.datasets for ds in dataset_types)
# and tile.end_datetime_month in [m.value for m in MONTHS_BY_SEASON[Season.SUMMER]])
| alex-ip/agdc | api/source/test/python/datacube/api/test_query.py | Python | bsd-3-clause | 12,587 |
# -*- coding: utf-8 -*-
import glob
import os
import polib
from django import VERSION as DJANGO_VERSION
from django.core.management.commands.makemessages import (
Command as OriginalMakeMessagesCommand)
from django.utils import translation
from django.utils.translation.trans_real import CONTEXT_SEPARATOR
class Command(OriginalMakeMessagesCommand):
# Django version 1.7+ requires_model_validation is deprecated
# and the value of 'requires_system_checks' is used (which is defined in
# the original command). The attribute is completely removed in Django 1.9.
if DJANGO_VERSION < (1, 7):
requires_model_validation = False
can_import_settings = True
def handle_noargs(self, *args, **options):
from django.conf import settings
super(Command, self).handle_noargs(*args, **options)
locale = options.get('locale')
domain = options.get('domain')
verbosity = int(options.get('verbosity'))
process_all = options.get('all')
# now that we've built the regular po files, we mark any translations that are already translated elsewhere
# as obsolete. If there is already a translation in the local po file, we keep it.
localedir = os.path.abspath('locale')
locales = []
if locale is not None:
locales.append(locale)
elif process_all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
locales = [os.path.basename(l) for l in locale_dirs]
# monkeypatch settings to not include the project locale directory
localepaths = [os.path.normpath(path) for path in settings.LOCALE_PATHS]
# remove the locale we're currently writing to from the settings, so that we can check for existing translations
# NOT in this file
localepaths = [path for path in localepaths if not path == localedir]
settings.LOCALE_PATHS = list(localepaths)
missing = object()
for locale in locales:
translation.activate(locale)
catalog = translation.trans_real.catalog()
# catalog = trans.translation(locale)
# catalog = translation.trans_real.translation._fetch(locale)
# catalog._fallback = False
if verbosity > 0:
self.stdout.write("cleaning translations for language %s " % locale)
if locale in ['en', 'en-us']:
self.stdout.write(" (unreliable because %s is usually not translated) " % locale)
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
pofile = os.path.join(basedir, '%s.po' % domain)
mofile = os.path.join(basedir, '%s.mo' % domain)
po = polib.pofile(pofile)
obsolete_count = 0
for entry in po:
# if entry.msgid_plural and locale == 'de': import ipdb; ipdb.set_trace()
# if entry.msgid == 'one translation' and locale == 'de': import ipdb; ipdb.set_trace()
context = entry.msgctxt or None
if entry.msgid_plural:
if context:
msg = catalog._catalog.get((u"%s%s%s" % (context, CONTEXT_SEPARATOR, entry.msgid), True), missing)
else:
msg = catalog._catalog.get((entry.msgid, True), missing)
else:
if context:
msg = catalog._catalog.get(u"%s%s%s" % (context, CONTEXT_SEPARATOR, entry.msgid), missing)
else:
msg = catalog._catalog.get(entry.msgid, missing)
is_already_translated_elsewhere = not msg is missing
if not entry.msgstr and is_already_translated_elsewhere:
entry.obsolete = 1
obsolete_count += 1
if verbosity > 0:
self.stdout.write(".")
po.save(pofile)
# po.save_as_mofile(mofile) # should be done by regular compilemessages
self.stdout.write(u" marked %s obsolete translations\n" % obsolete_count)
| divio/django-commontranslations | django_commontranslations/management/commands/makemessages_unique.py | Python | bsd-3-clause | 4,136 |
from distutils.core import setup
import py2exe
setup(console=['server.py'])
| mbeloshitsky/syslog2eventlog | setup.py | Python | bsd-3-clause | 77 |
import logging; logger = logging.getLogger("morse." + __name__)
import socket
import select
import json
import morse.core.middleware
from functools import partial
from morse.core import services
class MorseSocketServ:
def __init__(self, port, component_name):
# List of socket clients
self._client_sockets = []
self._message_size = 1024
self._component_name = component_name
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server.bind((str(socket.INADDR_ANY), port))
self._server.listen(1)
logger.info("Socket Mw Server now listening on port " + str(port) + \
" for component " + str(component_name) + ".")
def __del__(self):
""" Terminate the ports used to accept requests """
if self._client_sockets:
logger.info("Closing client sockets...")
for s in self._client_sockets:
s.close()
if self._server:
logger.info("Shutting down connections to server...")
self._server.shutdown(socket.SHUT_RDWR)
logger.info("Closing socket server...")
self._server.close()
del self._server
def main_export(self, encode, component_instance):
sockets = self._client_sockets + [self._server]
try:
inputready, outputready, exceptready = select.select(sockets, sockets, [], 0)
except select.error:
pass
except socket.error:
pass
if self._server in inputready:
sock, addr = self._server.accept()
self._client_sockets.append(sock)
if outputready != []:
message = encode(component_instance)
for o in outputready:
try:
o.send(message)
except socket.error:
self.close_socket(o)
def main_read(self, decode, component_instance):
sockets = self._client_sockets + [self._server]
try:
inputready, outputready, exceptready = select.select(sockets, [], [], 0)
except select.error:
pass
except socket.error:
pass
for i in inputready:
if i == self._server:
sock, addr = self._server.accept()
if self._client_sockets != []:
logger.warning("More than one clients for an actuator!!")
self._client_sockets.append(sock)
else:
try:
msg = i.recv(self._message_size)
logger.debug("received msg %s" % msg)
if msg == b'':
self.close_socket(i)
else:
component_instance.local_data = decode(msg)
except socket.error as detail:
self.close_socket(i)
def close_socket(self, sock):
self._client_sockets.remove(sock)
try:
sock.close()
except socket.error as error_info:
logger.warning("Socket error catched while closing: " + str(error_info))
class MorseSocketClass(morse.core.middleware.MorseMiddlewareClass):
""" External communication using sockets. """
def __init__(self):
""" Initialize the socket connections """
# Call the constructor of the parent class
super(self.__class__,self).__init__()
# port -> MorseSocketServ
self._server_dict = {}
# component name (string) -> Port (int)
self._component_nameservice = {}
self._base_port = 60000
# Register two special services in the socket service manager:
# TODO To use a new special component instead of 'simulation',
# uncomment the line :-)
# bge.logic.morse_services.register_request_manager_mapping("streams", "SocketRequestManager")
services.do_service_registration(self.list_streams, 'simulation')
services.do_service_registration(self.get_stream_port, 'simulation')
services.do_service_registration(self.get_all_stream_ports, 'simulation')
def list_streams(self):
""" List all publish streams.
"""
return list(self._component_nameservice.keys())
def get_stream_port(self, name):
""" Get stream port for stream name.
"""
port = -1
try:
port = self._component_nameservice[name]
except KeyError:
pass
return port
def get_all_stream_ports(self):
""" Get stream ports for all streams.
"""
return self._component_nameservice
def register_component(self, component_name, component_instance, mw_data):
""" Open the port used to communicate by the specified component.
"""
# Create a socket server for this component
serv = MorseSocketServ(self._base_port, component_name)
self._server_dict[self._base_port] = serv
self._component_nameservice[component_name] = self._base_port
self._base_port = self._base_port + 1
# Extract the information for this middleware
# This will be tailored for each middleware according to its needs
function_name = mw_data[1]
fun = self._check_function_exists(function_name)
if fun != None:
# Choose what to do, depending on the function being used
# Data read functions
if function_name == "read_message":
component_instance.input_functions.append(partial(MorseSocketServ.main_read, serv, fun))
# Data write functions
elif function_name == "post_message":
component_instance.output_functions.append(partial(MorseSocketServ.main_export, serv, fun))
# If the function is external and has already been loaded before
else:
# Pass by mw_data the generated server
mw_data.append(serv)
self._add_method(mw_data, component_instance)
else:
# Pass by mw_data the generated server
mw_data.append(serv)
self._add_method(mw_data, component_instance)
def post_message(self, component_instance):
return (json.dumps(component_instance.local_data) + '\n').encode()
def read_message(self, msg):
return json.loads(msg.decode('utf-8'))
def print_open_sockets(self):
""" Display a list of all currently opened sockets."""
logger.info("Socket Mid: Currently opened sockets:")
for name, socket in self._socket_dict.iteritems():
logger.info(" - Port name '{0}' = '{1}'".format(name, socket))
| Arkapravo/morse-0.6 | src/morse/middleware/socket_mw.py | Python | bsd-3-clause | 6,797 |
import unittest
from autosklearn.pipeline.components.classification.extra_trees import \
ExtraTreesClassifier
from autosklearn.pipeline.util import _test_classifier, \
_test_classifier_iterative_fit, _test_classifier_predict_proba
import numpy as np
import sklearn.metrics
import sklearn.ensemble
class ExtraTreesComponentTest(unittest.TestCase):
def test_default_configuration(self):
for i in range(10):
predictions, targets = \
_test_classifier(ExtraTreesClassifier)
self.assertAlmostEqual(0.95999999999999996,
sklearn.metrics.accuracy_score(targets, predictions))
def test_default_configuration_predict_proba(self):
for i in range(10):
predictions, targets = \
_test_classifier_predict_proba(ExtraTreesClassifier)
self.assertAlmostEqual(0.12052046298054782,
sklearn.metrics.log_loss(
targets, predictions))
def test_default_configuration_sparse(self):
for i in range(10):
predictions, targets = \
_test_classifier(ExtraTreesClassifier, sparse=True)
self.assertAlmostEqual(0.71999999999999997,
sklearn.metrics.accuracy_score(targets,
predictions))
def test_default_configuration_iterative_fit(self):
for i in range(10):
predictions, targets = \
_test_classifier_iterative_fit(ExtraTreesClassifier)
self.assertAlmostEqual(0.93999999999999995,
sklearn.metrics.accuracy_score(targets,
predictions))
def test_default_configuration_binary(self):
for i in range(10):
predictions, targets = \
_test_classifier(ExtraTreesClassifier, make_binary=True)
self.assertAlmostEqual(1,
sklearn.metrics.accuracy_score(targets,
predictions))
def test_default_configuration_multilabel(self):
for i in range(10):
predictions, targets = \
_test_classifier(ExtraTreesClassifier, make_multilabel=True)
self.assertAlmostEqual(0.97060428849902536,
sklearn.metrics.average_precision_score(
targets, predictions))
def test_default_configuration_predict_proba_multilabel(self):
for i in range(10):
predictions, targets = \
_test_classifier_predict_proba(ExtraTreesClassifier,
make_multilabel=True)
self.assertEqual(predictions.shape, ((50, 3)))
self.assertAlmostEqual(0.98976738180772728,
sklearn.metrics.average_precision_score(
targets, predictions))
def test_target_algorithm_multioutput_multiclass_support(self):
cls = sklearn.ensemble.ExtraTreesClassifier()
X = np.random.random((10, 10))
y = np.random.randint(0, 1, size=(10, 10))
# Running this without an exception is the purpose of this test!
cls.fit(X, y) | hmendozap/auto-sklearn | test/test_pipeline/components/classification/test_extra_trees.py | Python | bsd-3-clause | 3,415 |
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El, time, random
n0 = n1 = 50
numRowsB = 5
numRHS = 1
display = False
output = False
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# NOTE: Increasing the magnitudes of the off-diagonal entries by an order of
# magnitude makes the condition number vastly higher.
def FD2D(N0,N1):
A = El.DistSparseMatrix()
height = N0*N1
width = N0*N1
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(6*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
x0 = s % N0
x1 = s / N0
A.QueueLocalUpdate( sLoc, s, 11 )
if x0 > 0:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if x0+1 < N0:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if x1 > 0:
A.QueueLocalUpdate( sLoc, s-N0, -3 )
if x1+1 < N1:
A.QueueLocalUpdate( sLoc, s+N0, 4 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -10/height );
A.ProcessQueues()
return A
def Constraints(numRows,N0,N1):
B = El.DistSparseMatrix()
El.Zeros( B, numRows, N0*N1 )
localHeight = B.LocalHeight()
B.Reserve( localHeight*N0*N1 )
for sLoc in xrange(localHeight):
s = B.GlobalRow(sLoc)
for j in xrange(N0*N1):
B.QueueLocalUpdate( sLoc, j, random.uniform(0,1) )
B.ProcessQueues()
return B
A = FD2D(n0,n1)
B = Constraints(numRowsB,n0,n1)
if display:
El.Display( A, "A" )
El.Display( B, "B" )
if output:
El.Print( A, "A" )
El.Print( B, "B" )
C = El.DistMultiVec()
D = El.DistMultiVec()
El.Uniform( C, A.Height(), numRHS )
El.Uniform( D, B.Height(), numRHS )
if display:
El.Display( C, "C" )
El.Display( D, "D" )
if output:
El.Print( C, "C" )
El.Print( D, "D" )
CNorm = El.FrobeniusNorm( C )
DNorm = El.FrobeniusNorm( D )
baseAlpha = 1e-4
ctrl = El.LeastSquaresCtrl_d()
ctrl.alpha = baseAlpha
ctrl.progress = True
ctrl.equilibrate = True
ctrl.qsdCtrl.relTol = 1e-10
ctrl.qsdCtrl.relTolRefine = 1e-12
ctrl.qsdCtrl.progress = True
startLSE = time.clock()
X = El.LSE(A,B,C,D,ctrl)
endLSE = time.clock()
if worldRank == 0:
print "LSE time:", endLSE-startLSE, "seconds"
if display:
El.Display( X, "X" )
if output:
El.Print( X, "X" )
E = El.DistMultiVec()
El.Copy( C, E )
El.SparseMultiply( El.NORMAL, -1., A, X, 1., E )
residNorm = El.FrobeniusNorm( E )
if display:
El.Display( E, "C - A X" )
if output:
El.Print( E, "C - A X" )
if worldRank == 0:
print "|| C - A X ||_F / || C ||_F =", residNorm/CNorm
El.Copy( D, E )
El.SparseMultiply( El.NORMAL, -1., B, X, 1., E )
equalNorm = El.FrobeniusNorm( E )
if display:
El.Display( E, "D - B X" )
if output:
El.Print( E, "D - B X" )
if worldRank == 0:
print "|| D - B X ||_F / || D ||_F =", equalNorm/DNorm
# Now try solving a weighted least squares problem
# (as lambda -> infinity, the exact solution converges to that of LSE)
def SolveWeighted(A,B,C,D,lambd):
BScale = El.DistSparseMatrix()
El.Copy( B, BScale )
El.Scale( lambd, BScale )
DScale = El.DistMultiVec()
El.Copy( D, DScale )
El.Scale( lambd, DScale )
AEmb = El.VCat(A,BScale)
CEmb = El.VCat(C,DScale)
if output:
El.Print( AEmb, "AEmb" )
ctrl.alpha = baseAlpha
if worldRank == 0:
print "lambda=", lambd, ": ctrl.alpha=", ctrl.alpha
X=El.LeastSquares(AEmb,CEmb,ctrl)
El.Copy( C, E )
El.SparseMultiply( El.NORMAL, -1., A, X, 1., E )
residNorm = El.FrobeniusNorm( E )
if display:
El.Display( E, "C - A X" )
if output:
El.Print( E, "C - A X" )
if worldRank == 0:
print "lambda=", lambd, ": || C - A X ||_F / || C ||_F =", residNorm/CNorm
El.Copy( D, E )
El.SparseMultiply( El.NORMAL, -1., B, X, 1., E )
equalNorm = El.FrobeniusNorm( E )
if display:
El.Display( E, "D - B X" )
if output:
El.Print( E, "D - B X" )
if worldRank == 0:
print "lambda=", lambd, ": || D - B X ||_F / || D ||_F =", equalNorm/DNorm
SolveWeighted(A,B,C,D,1)
SolveWeighted(A,B,C,D,10)
SolveWeighted(A,B,C,D,100)
SolveWeighted(A,B,C,D,1000)
SolveWeighted(A,B,C,D,10000)
SolveWeighted(A,B,C,D,100000)
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
| justusc/Elemental | examples/interface/LSE.py | Python | bsd-3-clause | 4,375 |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-Screenshot',
'Author': ['@obscuresec', '@harmj0y'],
'Description': ('Takes a screenshot of the current desktop and '
'returns the output as a .PNG.'),
'Background' : False,
'OutputExtension' : 'png',
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Exfiltration/Get-TimedScreenshot.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Ratio' : {
'Description' : "JPEG Compression ratio: 1 to 100.",
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
script = """
function Get-Screenshot
{
param
(
[Parameter(Mandatory = $False)]
[string]
$Ratio
)
Add-Type -Assembly System.Windows.Forms;
$ScreenBounds = [Windows.Forms.SystemInformation]::VirtualScreen;
$ScreenshotObject = New-Object Drawing.Bitmap $ScreenBounds.Width, $ScreenBounds.Height;
$DrawingGraphics = [Drawing.Graphics]::FromImage($ScreenshotObject);
$DrawingGraphics.CopyFromScreen( $ScreenBounds.Location, [Drawing.Point]::Empty, $ScreenBounds.Size);
$DrawingGraphics.Dispose();
$ms = New-Object System.IO.MemoryStream;
if ($Ratio) {
try {
$iQual = [convert]::ToInt32($Ratio);
} catch {
$iQual=80;
}
if ($iQual -gt 100){
$iQual=100;
} elseif ($iQual -lt 1){
$iQual=1;
}
$encoderParams = New-Object System.Drawing.Imaging.EncoderParameters;
$encoderParams.Param[0] = New-Object Drawing.Imaging.EncoderParameter ([System.Drawing.Imaging.Encoder]::Quality, $iQual);
$jpegCodec = [Drawing.Imaging.ImageCodecInfo]::GetImageEncoders() | Where-Object { $_.FormatDescription -eq \"JPEG\" }
$ScreenshotObject.save($ms, $jpegCodec, $encoderParams);
} else {
$ScreenshotObject.save($ms, [Drawing.Imaging.ImageFormat]::Png);
}
$ScreenshotObject.Dispose();
[convert]::ToBase64String($ms.ToArray());
}
Get-Screenshot"""
if self.options['Ratio']['Value']:
if self.options['Ratio']['Value']!='0':
self.info['OutputExtension'] = 'jpg'
else:
self.options['Ratio']['Value'] = ''
self.info['OutputExtension'] = 'png'
else:
self.info['OutputExtension'] = 'png'
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
return script
| pierce403/EmpirePanel | lib/modules/collection/screenshot.py | Python | bsd-3-clause | 3,876 |
# -*- coding: utf-8 -*-
__docformat__="restructuredtext"
from rest import RestClient, Result, ResponseFormats
from datetime import datetime
class NeocortexRestClient(object):
BASE_URL = "http://api.meaningtool.com/0.2/neocortex"
__builder__ = None
class Builder(RestClient):
_functions = {}
_params = {}
_input = None
_format = None
_tree_key = None
def format(self, value):
self._format = value
return self
def input(self, text):
self._input = self._params["input"] = text
return self
def categories(self, tree_key=None, additionals=None):
params = dict(additionals or [])
if tree_key is not None:
params.update(dict(tree_key=tree_key))
self._functions["categories"] = params
return self
def keywords(self):
self._functions["keywords"] = True
return self
def entities(self):
self._functions["entities"] = True
return self
def language(self):
self._functions["language"] = True
return self
def meaningfy(self):
fs = []
for k,v in self._functions.items():
kk = k
if isinstance(v, dict):
if v.has_key("additionals"):
for a in v["additionals"]:
kk = "%s+%s" % (kk, a)
if v.has_key("tree_key") and v["tree_key"] is not None and kk == 'categories':
self._params["tree_key"] = v["tree_key"]
fs.append(kk)
fs = ";".join(fs)
url = "%s.%s" % (fs, self._format)
try:
res = self.post(url, self._params, response_format=self._format)
except Exception, e:
raise e
finally:
self._reset()
return res
def _reset(self):
self._functions = {}
self._params = {}
def __init__(self, api_key, base_url=None):
self.api_key = api_key
self.BASE_URL = base_url or self.BASE_URL
def get_builder(self):
if self.__builder__ is None:
self.__builder__ = NeocortexRestClient.Builder(self.BASE_URL, self.api_key).format(ResponseFormats.JSON)
self.__builder__._reset()
return self.__builder__
def categories(self, input, tree_key=None, additionals=None):
builder = self.get_builder()
return builder.format(ResponseFormats.JSON).input(input).categories(tree_key, additionals).meaningfy().payload["categories"]
def keywords(self, input):
builder = self.get_builder()
return builder.format(ResponseFormats.JSON).input(input).keywords().meaningfy().payload["keywords"]
def entities(self, input):
builder = self.get_builder()
return builder.format(ResponseFormats.JSON).input(input).entities().meaningfy().payload["entities"]
def language(self, input):
builder = self.get_builder()
return builder.format(ResponseFormats.JSON).input(input).language().meaningfy().payload["language"]
| popego/neocortex-api-python | src/neocortex/client.py | Python | bsd-3-clause | 3,380 |
# -*- coding:utf-8 -*-
from django import test
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.management import call_command
from mock import patch
from nose.tools import eq_
class BcryptTests(test.TestCase):
def setUp(self):
super(BcryptTests, self).setUp()
User.objects.create_user('john', '[email protected]',
password='123456')
User.objects.create_user('jane', '[email protected]',
password='abc')
User.objects.create_user('jude', '[email protected]',
password=u'abcéäêëôøà')
def test_bcrypt_used(self):
"""Make sure bcrypt was used as the hash."""
eq_(User.objects.get(username='john').password[:7], 'bcrypt$')
eq_(User.objects.get(username='jane').password[:7], 'bcrypt$')
eq_(User.objects.get(username='jude').password[:7], 'bcrypt$')
def test_bcrypt_auth(self):
"""Try authenticating."""
assert authenticate(username='john', password='123456')
assert authenticate(username='jane', password='abc')
assert not authenticate(username='jane', password='123456')
assert authenticate(username='jude', password=u'abcéäêëôøà')
assert not authenticate(username='jude', password=u'çççbbbààà')
@patch.object(settings._wrapped, 'HMAC_KEYS', dict())
def test_nokey(self):
"""With no HMAC key, no dice."""
assert not authenticate(username='john', password='123456')
assert not authenticate(username='jane', password='abc')
assert not authenticate(username='jane', password='123456')
assert not authenticate(username='jude', password=u'abcéäêëôøà')
assert not authenticate(username='jude', password=u'çççbbbààà')
def test_password_from_django14(self):
"""Test that a password generated by django_sha2 with django 1.4 is
recognized and changed to a 1.3 version"""
# We can't easily call 1.4's hashers so we hardcode the passwords as
# returned with the specific salts and hmac_key in 1.4.
prefix = 'bcrypt2011_01_01$2a$12$'
suffix = '$2011-01-01'
raw_hashes = {
'john': '02CfJWdVwLK80jlRe/Xx1u8sTHAR0JUmKV9YB4BS.Os4LK6nsoLie',
'jane': '.ipDt6gRL3CPkVH7FEyR6.8YXeQFXAMyiX3mXpDh4YDBonrdofrcG',
'jude': '6Ol.vgIFxMQw0LBhCLtv7OkV.oyJjen2GVMoiNcLnbsljSfYUkQqe',
}
u = User.objects.get(username="john")
django14_style_password = "%s%s%s" % (prefix, raw_hashes['john'],
suffix)
u.password = django14_style_password
assert u.check_password('123456')
eq_(u.password[:7], 'bcrypt$')
u = User.objects.get(username="jane")
django14_style_password = "%s%s%s" % (prefix, raw_hashes['jane'],
suffix)
u.password = django14_style_password
assert u.check_password('abc')
eq_(u.password[:7], 'bcrypt$')
u = User.objects.get(username="jude")
django14_style_password = "%s%s%s" % (prefix, raw_hashes['jude'],
suffix)
u.password = django14_style_password
assert u.check_password(u'abcéäêëôøà')
eq_(u.password[:7], 'bcrypt$')
def test_hmac_autoupdate(self):
"""Auto-update HMAC key if hash in DB is outdated."""
# Get HMAC key IDs to compare
old_key_id = max(settings.HMAC_KEYS.keys())
new_key_id = '2020-01-01'
# Add a new HMAC key
new_keys = settings.HMAC_KEYS.copy()
new_keys[new_key_id] = 'a_new_key'
with patch.object(settings._wrapped, 'HMAC_KEYS', new_keys):
# Make sure the database has the old key ID.
john = User.objects.get(username='john')
eq_(john.password.rsplit('$', 1)[1], old_key_id)
# Log in.
assert authenticate(username='john', password='123456')
# Make sure the DB now has a new password hash.
john = User.objects.get(username='john')
eq_(john.password.rsplit('$', 1)[1], new_key_id)
def test_rehash(self):
"""Auto-upgrade to stronger hash if needed."""
# Set a sha256 hash for a user. This one is "123".
john = User.objects.get(username='john')
john.password = ('sha256$7a49025f024ad3dcacad$aaff1abe5377ffeab6ccc68'
'709d94c1950edf11f02d8acb83c75d8fcac1ebeb1')
john.save()
# The hash should be sha256 now.
john = User.objects.get(username='john')
eq_(john.password.split('$', 1)[0], 'sha256')
# Log in (should rehash transparently).
assert authenticate(username='john', password='123')
# Make sure the DB now has a bcrypt hash.
john = User.objects.get(username='john')
eq_(john.password.split('$', 1)[0], 'bcrypt')
# Log in again with the new hash.
assert authenticate(username='john', password='123')
def test_management_command(self):
"""Test password update flow via management command, from default
Django hashes, to hardened hashes, to bcrypt on log in."""
john = User.objects.get(username='john')
john.password = 'sha1$3356f$9fd40318e1de9ecd3ab3a5fe944ceaf6a2897eef'
john.save()
# The hash should be sha1 now.
john = User.objects.get(username='john')
eq_(john.password.split('$', 1)[0], 'sha1')
# Simulate calling management command
call_command('strengthen_user_passwords')
# The hash should be 'hh' now.
john = User.objects.get(username='john')
eq_(john.password.split('$', 1)[0], 'hh')
# Logging in will convert the hardened hash to bcrypt.
assert authenticate(username='john', password='123')
# Make sure the DB now has a bcrypt hash.
john = User.objects.get(username='john')
eq_(john.password.split('$', 1)[0], 'bcrypt')
# Log in again with the new hash.
assert authenticate(username='john', password='123')
| fwenzel/django-sha2 | test/django13/tests/test_bcrypt.py | Python | bsd-3-clause | 6,294 |
from getpass import getuser
from Job import Job
from datetime import datetime
import os.path
import time
import re
import Database
class Upload( object ):
@classmethod
def CreateUpload( cls, filename=None ):
if not filename:
filename = Upload.defaultNewFilename()
id = Database.executeScalar('elc_CreateUpload',getuser(),filename)
return Upload(id)
@classmethod
def list( cls ):
uploads=[]
for r in Database.execute('select upl_id, created_by, creation_time, filename, n_insert, n_delete from elc_GetUploadDetails(NULL)'):
created_date = r[2].strftime('%d-%b-%Y')
uploads.append(dict(
upl_id=r[0],
created_by=r[1],
creation_time=r[2],
created_date=created_date,
filename=r[3],
n_insert=r[4],
n_delete=r[5]
))
return uploads
def __init__( self, id ):
r = Database.executeRow('select created_by, creation_time, filename, n_insert, n_delete from elc_GetUploadDetails(%s)',id)
self._id = id
self._created_by = r[0]
self._creation_time = r[1]
self._filename = r[2]
self._n_insert = r[3]
self._n_delete = r[4]
@classmethod
def defaultNewFilename( cls, upload_date=None ):
if not isinstance(upload_date,datetime):
upload_date = datetime.now()
return 'sad_'+upload_date.strftime('%d%b%y')+'.sql'
def id( self ): return self._id
def created_by( self ): return self._created_by
def creation_time( self ): return self._creation_time
def filename( self ): return self._filename
def n_insert( self ): return self._n_insert
def n_delete( self ): return self._n_delete
def defaultFilename( self ):
return Upload.defaultNewFilename( self._creation_time )
def addJob( self, job ):
if type(job) == int:
job = Job(job)
job.addToUpload( self )
job.save()
def writeSql( self, filename ):
sqlfile = open(filename,'w')
basename = os.path.splitext(os.path.basename(filename))[0]
txtfilename = os.path.splitext(filename)[0] + '.txt'
if txtfilename == filename:
txtfilename = txtfilename + '.txt'
txtfile = open(txtfilename,'w')
# Header
sqlfile.write("-- Bulk update of crs_street_address\n")
sqlfile.write("-- Upload id: %d\n" % (self._id,))
sqlfile.write("-- Created by: %s\n" % (self._created_by,))
sqlfile.write("-- Created on: %s\n" %
(self._creation_time.strftime('%d %B %Y at %H:%M'),))
sqlfile.write("\n")
# Insertions
sqlfile.write("\n")
nins = 0
for r in Database.execute('SELECT housenumber, range_low, range_high, status, rcl_id, rna_id, wkt, sufi from elc_UploadNewAddresses(%s)',self._id):
m = re.search(r"(\d+)(\.?\d*)\s+(\-\d+\.?\d*)",r[6])
wkt = '1 POINT(%d%s %s)'%(int(m.group(1))-160,m.group(2),m.group(3))
range_high = r[2] if r[2] != None else 'null'
if r[3] == "DELE": status = "HIST"
else: status = "CURR"
if r[3] == 'NEWA': sufi = 'null'
else: sufi = r[7]
unofficial_flag = "N"
sqlfile.write('''
INSERT INTO crs_street_address_stage(house_number, range_low, range_high, status, unofficial_flag, rcl_id, rna_id, shape, sufi) VALUES
('%s',%s,%s,'%s','%s',%d,%d,'%s', %s);''' % (r[0],r[1], range_high,status,unofficial_flag,r[4],r[5],wkt, sufi))
nins += 1
sqlfile.write("\n")
sqlfile.write("\n")
sqlfile.write(" EXECUTE PROCEDURE cp_cel_AddressStageUpdate();\n")
sqlfile.write("\n")
sqlfile.close()
txtfile.write('''
FTP the attached "%s" file to the production database server (crsprd1).
As the user "crsprd" run the script as follows:
sqf %s
The expected output is:
Database selected.
(constant)
Bulk insert of street addresses: id %d
1 row(s) retrieved.
1 row(s) inserted. ... repeated %d times
(constant)
Bulk update completed: id %d
1 row(s) retrieved.
Database closed.
''' % (basename,basename,self._id,nins,self._id))
txtfile.close()
Database.execute('elc_SetUploadFilename',self._id,basename)
| SPlanzer/AIMS | ElectoralAddress/Upload.py | Python | bsd-3-clause | 4,556 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 0, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_0/ar_12/test_artificial_1024_Anscombe_PolyTrend_0_12_20.py | Python | bsd-3-clause | 265 |
import csv
import osgeo.ogr
from osgeo import ogr, osr
EPSG_LAT_LON = 4326
def read_tazs_from_csv(csv_zone_locs_fname):
taz_tuples = []
tfile = open(csv_zone_locs_fname, 'rb')
treader = csv.reader(tfile, delimiter=',', quotechar="'")
for ii, row in enumerate(treader):
if ii == 0: continue
else:
taz_tuple = (row[0], row[1], row[2])
taz_tuples.append(taz_tuple)
return taz_tuples
def read_tazs_from_shp(shp_zone_locs_fname):
taz_tuples = []
tazs_shp = osgeo.ogr.Open(shp_zone_locs_fname)
tazs_layer = tazs_shp.GetLayer(0)
src_srs = tazs_layer.GetSpatialRef()
target_srs = osr.SpatialReference()
target_srs.ImportFromEPSG(EPSG_LAT_LON)
transform_to_lat_lon = osr.CoordinateTransformation(src_srs,
target_srs)
for taz_feat in tazs_layer:
taz_id = taz_feat.GetField("N")
taz_geom = taz_feat.GetGeometryRef()
taz_geom.Transform(transform_to_lat_lon)
taz_lat = taz_geom.GetX()
taz_lon = taz_geom.GetY()
taz_tuples.append((taz_id, taz_lat, taz_lon))
taz_feat.Destroy()
tazs_shp.Destroy()
return taz_tuples
| PatSunter/pyOTPA | TAZs-OD-Matrix/taz_files.py | Python | bsd-3-clause | 1,176 |
from functools import wraps
from django.utils.translation import ugettext as _
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.views import login
from django.contrib.auth import REDIRECT_FIELD_NAME
def staff_member_required(backoffice):
def decorate(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "Advanced Reports Backoffice requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
defaults = {
'template_name': backoffice.login_template,
'authentication_form': AdminAuthenticationForm,
'extra_context': {
'backoffice': backoffice,
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return login(request, **defaults)
return _checklogin
return decorate
| vikingco/django-advanced-reports | advanced_reports/backoffice/decorators.py | Python | bsd-3-clause | 1,384 |
import contextlib
import os
import shutil
import tempfile
import numpy
from PIL import Image
from kiva.fonttools import Font
from kiva.constants import MODERN
class DrawingTester(object):
""" Basic drawing tests for graphics contexts.
"""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.filename = os.path.join(self.directory, 'rendered')
self.gc = self.create_graphics_context(300, 300)
self.gc.clear()
self.gc.set_stroke_color((1.0, 0.0, 0.0))
self.gc.set_fill_color((1.0, 0.0, 0.0))
self.gc.set_line_width(5)
def tearDown(self):
del self.gc
shutil.rmtree(self.directory)
def test_line(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(107, 204)
self.gc.line_to(107, 104)
self.gc.stroke_path()
def test_rectangle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(107, 104)
self.gc.line_to(107, 184)
self.gc.line_to(187, 184)
self.gc.line_to(187, 104)
self.gc.line_to(107, 104)
self.gc.stroke_path()
def test_rect(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.rect(0, 0, 200, 200)
self.gc.stroke_path()
def test_circle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.stroke_path()
def test_quarter_circle(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, numpy.pi / 2)
self.gc.stroke_path()
def test_text(self):
with self.draw_and_check():
font = Font(family=MODERN)
font.size = 24
self.gc.set_font(font)
self.gc.set_text_position(23, 67)
self.gc.show_text("hello kiva")
def test_circle_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
def test_star_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.fill_path()
def test_star_eof_fill(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.eof_fill_path()
def test_circle_clip(self):
with self.draw_and_check():
self.gc.clip_to_rect(150, 150, 100, 100)
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
def test_text_clip(self):
with self.draw_and_check():
self.gc.clip_to_rect(23, 77, 100, 23)
font = Font(family=MODERN)
font.size = 24
self.gc.set_font(font)
self.gc.set_text_position(23, 67)
self.gc.show_text("hello kiva")
def test_star_clip(self):
with self.draw_and_check():
self.gc.begin_path()
self.gc.move_to(100, 100)
self.gc.line_to(150, 200)
self.gc.line_to(200, 100)
self.gc.line_to(100, 150)
self.gc.line_to(200, 150)
self.gc.line_to(100, 100)
self.gc.close_path()
self.gc.clip()
self.gc.begin_path()
self.gc.arc(150, 150, 100, 0.0, 2 * numpy.pi)
self.gc.fill_path()
#### Required methods ####################################################
@contextlib.contextmanager
def draw_and_check(self):
""" A context manager to check the result.
"""
raise NotImplementedError()
def create_graphics_context(self, width, length):
""" Create the desired graphics context
"""
raise NotImplementedError()
class DrawingImageTester(DrawingTester):
""" Basic drawing tests for graphics contexts of gui toolkits.
"""
@contextlib.contextmanager
def draw_and_check(self):
yield
filename = "{0}.png".format(self.filename)
self.gc.save(filename)
self.assertImageSavedWithContent(filename)
def assertImageSavedWithContent(self, filename):
""" Load the image and check that there is some content in it.
"""
image = numpy.array(Image.open(filename))
# default is expected to be a totally white image
self.assertEqual(image.shape[:2], (300, 300))
if image.shape[2] == 3:
check = numpy.sum(image == [255, 0, 0], axis=2) == 3
elif image.shape[2] == 4:
check = numpy.sum(image == [255, 0, 0, 255], axis=2) == 4
else:
self.fail(
'Pixel size is not 3 or 4, but {0}'.format(image.shape[2]))
if check.any():
return
self.fail('The image looks empty, no red pixels where drawn')
| tommy-u/enable | kiva/tests/drawing_tester.py | Python | bsd-3-clause | 5,452 |
from django.shortcuts import get_object_or_404, render_to_response
from django.http import Http404, HttpResponseRedirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
from basic.messages.models import Message, TO_STATUS_READ, TO_STATUS_DELETED, FROM_STATUS_DELETED
from basic.messages.forms import MessageForm
from basic.tools.baseconv import base62
@login_required
def message_list(request, mailbox=None, template_name='messages/message_list.html'):
"""
Returns a list of user messages.
Template:: ``messages/message_list.html``
Context:
message_list
List of Message objects
mailbox
String representing the current 'mailbox'
"""
if mailbox == 'sent':
message_list = Message.objects.sent(request.user)
elif mailbox == 'inbox':
message_list = Message.objects.new(request.user)
elif mailbox == 'trash':
message_list = Message.objects.trash(request.user)
else:
message_list = Message.objects.archive(request.user)
return render_to_response(template_name, {
'message_list': message_list,
'mailbox': mailbox or 'archive'
}, context_instance=RequestContext(request))
@login_required
def message_create(request, content_type_id=None, object_id=None,
template_name='messages/message_form.html'):
"""
Handles a new message and displays a form.
Template:: ``messages/message_form.html``
Context:
form
MessageForm object
"""
next = request.GET.get('next', None)
if request.GET.get('to', None):
to_user = get_object_or_404(User, username=request.GET['to'])
else:
to_user = None
if content_type_id and object_id:
content_type = ContentType.objects.get(pk=base62.to_decimal(content_type_id))
Model = content_type.model_class()
try:
related_object = Model.objects.get(pk=base62.to_decimal(object_id))
except ObjectDoesNotExist:
raise Http404, "The object ID was invalid."
else:
related_object = None
form = MessageForm(request.POST or None, initial={'to_user': to_user})
if form.is_valid():
message = form.save(commit=False)
if related_object:
message.object = related_object
message.from_user = request.user
message = form.save()
return HttpResponseRedirect(next or reverse('messages:messages'))
return render_to_response(template_name, {
'form': form,
'to_user': to_user,
'related_object': related_object,
'next': next,
}, context_instance=RequestContext(request))
def message_reply(request, object_id, template_name='messages/message_form.html'):
"""
Handles a reply to a specific message.
"""
original_message = get_object_or_404(Message, pk=object_id)
next = request.GET.get('next', None)
initial = {
'to_user': original_message.from_user,
'subject': 'Re: %s' % original_message.subject
}
form = MessageForm(request.POST or None, initial=initial)
if form.is_valid():
message = form.save(commit=False)
message.object = original_message.object
message.from_user = request.user
message = form.save()
return HttpResponseRedirect(next or reverse('messages:messages'))
return render_to_response(template_name, {
'form': form,
'message': original_message,
'next': next,
}, context_instance=RequestContext(request))
@login_required
def message_remove(request, object_id, template_name='messages/message_remove_confirm.html'):
"""
Remove a message.
"""
message = get_object_or_404(Message, pk=object_id)
next = request.GET.get('next', None)
if request.method == 'POST':
if message.to_user == request.user:
message.to_status = TO_STATUS_DELETED
else:
message.from_status = FROM_STATUS_DELETED
message.save()
return HttpResponseRedirect(next or reverse('messages:messages'))
return render_to_response(template_name, {
'message': message,
'next': next,
}, context_instance=RequestContext(request))
@login_required
def message_detail(request, object_id, template_name='messages/message_detail.html'):
"""
Return a message.
"""
message = get_object_or_404(Message, pk=object_id)
content_type = ContentType.objects.get_for_model(message)
thread_list = Message.objects.filter(object_id=message.object.pk, content_type=content_type).order_by('id')
if message.to_user == request.user:
message.to_status = TO_STATUS_READ
message.save()
return render_to_response(template_name, {
'message': message,
'thread_list': thread_list
}, context_instance=RequestContext(request))
| sedden/django-basic-apps | basic/messages/views.py | Python | bsd-3-clause | 5,111 |
def extractNotoriousOnlineBlogspotCom(item):
'''
Parser for 'notorious-online.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractNotoriousOnlineBlogspotCom.py | Python | bsd-3-clause | 569 |
import cx_Logging
import os
import sys
import threading
if len(sys.argv) > 1:
numThreads = int(sys.argv[1])
else:
numThreads = 5
if len(sys.argv) > 2:
numIterations = int(sys.argv[2])
else:
numIterations = 1000
def Run(threadNum):
cx_Logging.Debug("Thread-%d: starting", threadNum)
iterationsLeft = numIterations
while iterationsLeft > 0:
numFiles = len(os.listdir("."))
cx_Logging.Debug("Thread-%d: counted %d files, %d iterations left",
threadNum, numFiles, iterationsLeft)
iterationsLeft -= 1
cx_Logging.StartLogging("test_threading.log", level = cx_Logging.DEBUG,
maxFiles = 10, maxFileSize = 5 * 1024 * 1024, prefix = "[%i] %t")
cx_Logging.Debug("Testing logging with %s threads.", numThreads)
threads = []
for i in range(numThreads):
thread = threading.Thread(target = Run, args = (i + 1,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
| marhar/cx_OracleTools | cx_Logging/test/test_threading.py | Python | bsd-3-clause | 968 |
"""
Prototype demo:
python holoviews/ipython/convert.py Conversion_Example.ipynb | python
"""
import ast
from nbconvert.preprocessors import Preprocessor
def comment_out_magics(source):
"""
Utility used to make sure AST parser does not choke on unrecognized
magics.
"""
filtered = []
for line in source.splitlines():
if line.strip().startswith('%'):
filtered.append('# ' + line)
else:
filtered.append(line)
return '\n'.join(filtered)
def wrap_cell_expression(source, template='{expr}'):
"""
If a cell ends in an expression that could be displaying a HoloViews
object (as determined using the AST), wrap it with a given prefix
and suffix string.
If the cell doesn't end in an expression, return the source unchanged.
"""
cell_output_types = (ast.IfExp, ast.BoolOp, ast.BinOp, ast.Call,
ast.Name, ast.Attribute)
try:
node = ast.parse(comment_out_magics(source))
except SyntaxError:
return source
filtered = source.splitlines()
if node.body != []:
last_expr = node.body[-1]
if not isinstance(last_expr, ast.Expr):
pass # Not an expression
elif isinstance(last_expr.value, cell_output_types):
# CAREFUL WITH UTF8!
expr_end_slice = filtered[last_expr.lineno-1][:last_expr.col_offset]
expr_start_slice = filtered[last_expr.lineno-1][last_expr.col_offset:]
start = '\n'.join(filtered[:last_expr.lineno-1]
+ ([expr_end_slice] if expr_end_slice else []))
ending = '\n'.join(([expr_start_slice] if expr_start_slice else [])
+ filtered[last_expr.lineno:])
# BUG!! Adds newline for 'foo'; <expr>
return start + '\n' + template.format(expr=ending)
return source
def filter_magic(source, magic, strip=True):
"""
Given the source of a cell, filter out the given magic and collect
the lines using the magic into a list.
If strip is True, the IPython syntax part of the magic (e.g %magic
or %%magic) is stripped from the returned lines.
"""
filtered, magic_lines=[],[]
for line in source.splitlines():
if line.strip().startswith(magic):
magic_lines.append(line)
else:
filtered.append(line)
if strip:
magic_lines = [el.replace(magic,'') for el in magic_lines]
return '\n'.join(filtered), magic_lines
def strip_magics(source):
"""
Given the source of a cell, filter out all cell and line magics.
"""
filtered=[]
for line in source.splitlines():
if not line.startswith('%') or line.startswith('%%'):
filtered.append(line)
return '\n'.join(filtered)
def replace_line_magic(source, magic, template='{line}'):
"""
Given a cell's source, replace line magics using a formatting
template, where {line} is the string that follows the magic.
"""
filtered = []
for line in source.splitlines():
if line.strip().startswith(magic):
substitution = template.format(line=line.replace(magic, ''))
filtered.append(substitution)
else:
filtered.append(line)
return '\n'.join(filtered)
class OptsMagicProcessor(Preprocessor):
"""
Preprocessor to convert notebooks to Python source to convert use of
opts magic to use the util.opts utility instead.
"""
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
source = replace_line_magic(cell['source'], '%opts',
template='hv.util.opts({line!r})')
source, opts_lines = filter_magic(source, '%%opts')
if opts_lines:
# Escape braces e.g normalization options as they pass through format
template = 'hv.util.opts({options!r}, {{expr}})'.format(
options=' '.join(opts_lines).replace('{','{{').replace('}','}}'))
source = wrap_cell_expression(source, template)
cell['source'] = source
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
class OutputMagicProcessor(Preprocessor):
"""
Preprocessor to convert notebooks to Python source to convert use of
output magic to use the util.output utility instead.
"""
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
source = replace_line_magic(cell['source'], '%output',
template='hv.util.output({line!r})')
source, output_lines = filter_magic(source, '%%output')
if output_lines:
template = 'hv.util.output({options!r}, {{expr}})'.format(
options=output_lines[-1])
source = wrap_cell_expression(source, template)
cell['source'] = source
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
class StripMagicsProcessor(Preprocessor):
"""
Preprocessor to convert notebooks to Python source to strips out all
magics. To be applied after the preprocessors that can handle
holoviews magics appropriately.
"""
def preprocess_cell(self, cell, resources, index):
if cell['cell_type'] == 'code':
cell['source'] = strip_magics(cell['source'])
return cell, resources
def __call__(self, nb, resources): return self.preprocess(nb,resources)
class Substitute(Preprocessor):
"""
An nbconvert preprocessor that substitutes one set of HTML data
output for another, adding annotation to the output as required.
The constructor accepts the notebook format version and a
substitutions dictionary:
{source_html:(target_html, annotation)}
Where the annotation may be None (i.e. no annotation).
"""
annotation = '<center><b>%s</b></center>'
def __init__(self, version, substitutions, **kw):
self.nbversion = version
self.substitutions = substitutions
super(Preprocessor, self).__init__(**kw)
def __call__(self, nb, resources): # Temporary hack around 'enabled' flag
return self.preprocess(nb,resources)
def replace(self, src):
"Given some source html substitute and annotated as applicable"
for html in self.substitutions.keys():
if src == html:
annotation = self.annotation % self.substitutions[src][1]
return annotation + self.substitutions[src][0]
return src
def preprocess_cell(self, cell, resources, index):
v4 = (self.nbversion[0] == 4)
if cell['cell_type'] == 'code':
for outputs in cell['outputs']:
output_key = ('execute_result' if v4 else 'pyout')
if outputs['output_type'] == output_key:
# V1-3
if not v4 and 'html' in outputs:
outputs['html'] = self.replace(outputs['html'])
# V4
for data in outputs.get('data',[]):
if v4 and data == 'text/html':
substitution = self.replace(outputs['data']['text/html'])
outputs['data']['text/html'] = substitution
return cell, resources
| basnijholt/holoviews | holoviews/ipython/preprocessors.py | Python | bsd-3-clause | 7,474 |
import os
from fabric.api import env, run, cd, sudo, settings
from fabric.contrib.files import upload_template
def get_env_variable(var_name):
""" Get the environment variable or return exception """
try:
return os.environ[var_name]
except KeyError:
error_msg = "Variable %s is not set in the environment" % var_name
raise Exception(error_msg)
env.user = "ubuntu"
#env.hosts = [get_env_variable('TESSERACT_HOST')]
#env.key_filename = [get_env_variable('TESSERACT_AWS_KEYFILE')]
env.repo_url = 'https://github.com/setaris/django-tesseract2.git'
env.root = "/home/ubuntu/webapps/djangotesseract2"
env.virtualenv = "/home/ubuntu/envs/djangotesseract2env"
env.project = "%s/djangotesseract2" % env.root
env.servicename = "djangotesseract2"
def deploy():
"Full deploy: push, buildout, and reload."
push()
update_dependencies()
syncdb()
update_services()
reload()
def push():
"Push out new code to the server."
with cd("%(project)s" % env):
run("git pull origin master")
def update_services():
upload_template('./nginx.conf',
'/etc/nginx/sites-enabled/default', use_sudo=True)
upload_template('./service.conf',
'/etc/init/djangotesseract2.conf', use_sudo=True)
with cd("/etc/nginx/sites-enabled"):
sudo('rm *.bak')
def update_dependencies():
run("%(virtualenv)s/bin/pip install -r %(root)s/requirements.txt" % env)
def createsuperuser():
with cd("%(project)s" % env):
run("%(virtualenv)s/bin/python manage.py createsuperuser --settings=settings.production" % env)
def syncdb():
with cd("%(project)s" % env):
run("%(virtualenv)s/bin/python manage.py syncdb --noinput --settings=settings.production" % env)
def collectstatic():
with cd("%(project)s" % env):
run("%(virtualenv)s/bin/python manage.py collectstatic --settings=settings.production" % env)
def reload():
with settings(warn_only=True):
sudo("sudo initctl stop djangotesseract2")
sudo("sudo initctl start djangotesseract2")
sudo('/etc/init.d/nginx reload')
def setup():
run("mkdir -p %(root)s" % env)
sudo("aptitude update")
sudo("aptitude -y install git-core python-dev python-setuptools "
"build-essential subversion mercurial nginx "
"libjpeg62 libjpeg62-dev zlib1g-dev libfreetype6 libfreetype6-dev "
"ghostscript imagemagick "
"tesseract-ocr libtesseract-dev")
sudo("easy_install virtualenv")
run("virtualenv %(virtualenv)s" % env)
run("%(virtualenv)s/bin/pip install -U pip" % env)
with cd("~/webapps/"):
run("git clone %(repo_url)s djangotesseract2" % env)
with cd("%(project)s" % env):
run('mkdir assets')
run('mkdir media')
run('mkdir static')
deploy()
| setaris/django-tesseract2 | deployment/fabfile.py | Python | bsd-3-clause | 2,815 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes the HRV stitcher data source class
"""
import pandas as pd
import numpy as np
from scipy.io import loadmat
from scipy.interpolate import UnivariateSpline
from data_source import DataSource
from schema import Schema, Or, Optional
def _val(x, pos, label_bin):
return np.mean(x)
def _std(x, pos, label_bin):
return x.std(axis=0)
def _sem(x, pos, label_bin):
return x.sem(axis=0)
def _var(x, pos, label_bin):
return np.var(x)
class HRVStitcher(DataSource):
def __init__(self, config, schedule):
# Call the parent class init
super(HRVStitcher, self).__init__(config, schedule)
self.panels = {'bpm': {'VAL': _val,
'SEM': _sem},
'rr': {'VAL': _val,
'STD': _std},
'twave': {'VAL': _val,
'SEM': _sem}}
def load(self, file_paths):
"""Override for load method to include .mat compatibility."""
self.data['samples'] = pd.read_csv(file_paths['samples'],
comment="#",
delimiter="\t",
skipinitialspace=True,
header=False,
index_col=False,
names=['bpm', 'rr', 'twave'])
raw_mat = loadmat(file_paths['labels'])
events = raw_mat['events'][:, 0]
self.data['labels'] = pd.DataFrame({'flag': events},
index=np.arange(events.size))
def merge_data(self):
"""
Clean and merge the samples and labels data.
"""
# TODO(janmtl): return an error if the files have not been loaded yet.
# Clean the samples data frame and the labels data frame
self.data['samples'] = self._clean_samples(self.data['samples'])
self.data['labels'] = self._clean_labels(self.data['labels'])
self.label_config = self._label_config_to_df(self.config)
# Combine the labels data with the labels configuration
self.data['labels'] = self._merge_labels_and_config(
labels=self.data['labels'],
config=self.label_config)
def bin_data(self):
"""Makes a dict of dicts of pd.Panels at self.output."""
label_bins = self.create_label_bins(self.data['labels'])
major_axis = label_bins.index.values
minor_axis = label_bins.drop(['Start_Time', 'End_Time'], axis=1).columns
minor_axis = minor_axis.append(pd.Index(['stat']))
raw = self.data['samples']
output = {channel: pd.Panel(items=statistics.keys(),
major_axis=major_axis,
minor_axis=minor_axis)
for channel, statistics in self.panels.iteritems()}
for channel, statistics in self.panels.iteritems():
for stat_name, stat_fun in statistics.iteritems():
new_panel = label_bins.copy(deep=True)
new_panel.drop(['Start_Time', 'End_Time'], axis=1, inplace=True)
new_panel['stat'] = np.nan
cond_lbls = pd.Series(data=zip(label_bins.loc[:, 'Condition'],
label_bins.loc[:, 'Label'])
).unique()
for cond_lbl in cond_lbls:
sel = (label_bins.loc[:, 'Condition'] == cond_lbl[0]) \
& (label_bins.loc[:, 'Label'] == cond_lbl[1])
sel_bins = label_bins.loc[sel, :]
samples = pd.Series(name=channel)
pos = pd.Series(name='pos')
for _, label_bin in sel_bins.iterrows():
selector = (raw.index.values >= label_bin['Start_Time']) \
& (raw.index.values < label_bin['End_Time'])
samples = samples.append(raw.loc[selector, channel])
pos = pos.append(raw.loc[selector, 'pos'])
stat = stat_fun(samples, pos)
new_panel.loc[sel, 'stat'] = stat
output[channel][stat_name] = new_panel.sort('Bin_Order')
self.output = output
@staticmethod
def _label_config_to_df(config):
"""Convert the label configuration dictionary to a data frame."""
labels_list = []
for event_type, label_config in config.iteritems():
pattern = label_config['pattern']
if isinstance(pattern, dict):
for event_group, flag in label_config['pattern'].iteritems():
labels_list.append({
'Label': event_type,
'Condition': event_group,
'Duration': label_config['duration'],
'N_Bins': label_config['bins'],
'Left_Trim': label_config.get('left_trim', 0),
'Right_Trim': label_config.get('right_trim', 0),
'flag': flag})
elif isinstance(pattern, int):
labels_list.append({
'Label': event_type,
'Condition': np.nan,
'Duration': label_config['duration'],
'N_Bins': label_config['bins'],
'Left_Trim': label_config.get('left_trim', 0),
'Right_Trim': label_config.get('right_trim', 0),
'flag': pattern})
else:
raise Exception('Bad Biopac config flag {}'.format(pattern))
return pd.DataFrame(labels_list)
@staticmethod
def _clean_labels(labels):
"""
Turn the Biopac flag channel into a data frame of label flags and start
times.
"""
# TODO(janmtl): finish this docstring
flags = labels['flag'].values
low_offset = np.append(-255, flags)
high_offset = np.append(flags, flags[-1])
event_flags = flags[(low_offset-high_offset) != 0]
start_times = np.where((low_offset-high_offset) != 0)[0]
labels = pd.DataFrame({'flag': event_flags,
'Start_Time': start_times})
labels = labels[(labels['flag'] != 255)]
return labels
@staticmethod
def _clean_samples(samples):
"""
.
"""
scale = 0.55
samples.index = samples.index*100
for col_name, col in samples.iteritems():
x = col.index
y = col.values
spl = UnivariateSpline(x, y, k=5, s=scale*len(x))
samples[col_name] = spl(x)
samples['pos'] = True
return samples
@staticmethod
def _merge_labels_and_config(labels, config):
"""
Merge together the contents of the labels file with the label
configuration dictionary.
"""
labels = pd.merge(labels, config, on='flag')
labels.sort('Start_Time', inplace=True)
return labels
def create_label_bins(self, labels):
"""Replace the N_Bins column with Bin_Index and the Duration column
with End_Time. This procedure grows the number of rows in the labels
data frame."""
total_bins = labels['N_Bins'].sum()
label_bins = pd.DataFrame(columns=['Order', 'ID', 'Label',
'Condition', 'Bin_Order',
'Start_Time', 'End_Time',
'Bin_Index'],
index=np.arange(0, total_bins))
idx = 0
for _, label in labels.iterrows():
n_bins = label['N_Bins']
cuts = np.linspace(start=label['Start_Time'] + label['Left_Trim'],
stop=(label['Start_Time']
+ label['Duration']
- label['Right_Trim']),
num=n_bins+1)
label_info = np.tile(label.as_matrix(columns=['Label',
'Condition']),
(n_bins, 1))
# Order and ID
label_bins.iloc[idx:idx+n_bins, 0:2] = np.nan
# Label, Condition
label_bins.iloc[idx:idx+n_bins, 2:4] = label_info
# Bin_Order
label_bins.iloc[idx:idx+n_bins, 4] = idx+np.arange(0, n_bins, 1)
# Start_Time
label_bins.iloc[idx:idx+n_bins, 5] = cuts[0:n_bins]
# End_Time
label_bins.iloc[idx:idx+n_bins, 6] = cuts[1:n_bins+1]
# Bin_Index
label_bins.iloc[idx:idx+n_bins, 7] = np.arange(0, n_bins, 1)
idx = idx + n_bins
# Add the Order by iterating over Labels and Bin indices
for lc, group in label_bins.groupby(['Label', 'Bin_Index']):
selector = (label_bins['Label'] == lc[0]) & \
(label_bins['Bin_Index'] == lc[1])
label_bins.loc[selector, 'Order'] = \
np.arange(0, np.sum(selector), 1)
return label_bins
@staticmethod
def _validate_config(raw):
"""
Validates the label configuration dict passed to the Data Source.
Args:
raw (dict): must match the following schema
{event_type (str):
{
duration: (float or int),
bins: (int),
pattern: dictionary of flags keyed by group
}
}
"""
# TODO(janmtl): improve this docstring
schema = Schema({str: {'duration': Or(float, int),
'bins': int,
'pattern': Or(int, {str: int}),
Optional('left_trim'): Or(float, int),
Optional('right_trim'): Or(float, int)}})
return schema.validate(raw)
@staticmethod
def _validate_schedule(raw):
"""
Validates the schedule configuration dict passed to the Data Source.
Args:
raw (dict): must match the following schema
{file_type (str): pattern (str)}
"""
schema = Schema({str: str})
return schema.validate(raw)
| janmtl/pypsych | pypsych/data_sources/hrvstitcher.py | Python | bsd-3-clause | 10,502 |
import os
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase
from .utils import ROOT, TEMPLATE_DIR
OTHER_DIR = os.path.join(ROOT, 'other_templates')
class RenderToStringTest(SimpleTestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_basic_context(self):
self.assertEqual(
self.engine.render_to_string('test_context.html', {'obj': 'test'}),
'obj:test\n',
)
class LoaderTests(SimpleTestCase):
def test_origin(self):
engine = Engine(dirs=[TEMPLATE_DIR], debug=True)
template = engine.get_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
def test_loader_priority(self):
"""
#21460 -- Check that the order of template loader works.
"""
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
def test_cached_loader_priority(self):
"""
Check that the order of template loader works. Refs #21460.
"""
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
| yephper/django | tests/template_tests/test_engine.py | Python | bsd-3-clause | 1,984 |
from django.db import models
from machina.models.fields import ExtendedImageField, MarkupTextField
RESIZED_IMAGE_WIDTH = 100
RESIZED_IMAGE_HEIGHT = 100
VALIDATED_IMAGE_MIN_WIDTH = 100
VALIDATED_IMAGE_MAX_WIDTH = 120
VALIDATED_IMAGE_MIN_HEIGHT = 100
VALIDATED_IMAGE_MAX_HEIGHT = 120
VALIDATED_IMAGE_MAX_SIZE = 12000
class DummyModel(models.Model):
"""
This model will be used for testing purposes only.
"""
content = MarkupTextField(null=True, blank=True)
resized_image = ExtendedImageField(
upload_to='machina/test_images', width=RESIZED_IMAGE_WIDTH, height=RESIZED_IMAGE_HEIGHT,
null=True, blank=True)
validated_image = ExtendedImageField(
upload_to='machina/test_images', min_width=VALIDATED_IMAGE_MIN_WIDTH,
max_width=VALIDATED_IMAGE_MAX_WIDTH, min_height=VALIDATED_IMAGE_MIN_HEIGHT,
max_height=VALIDATED_IMAGE_MAX_HEIGHT, max_upload_size=VALIDATED_IMAGE_MAX_SIZE, null=True,
blank=True)
class Meta:
app_label = 'tests'
| ellmetha/django-machina | tests/models.py | Python | bsd-3-clause | 1,013 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['Lag1Trend'] , ['Seasonal_Hour'] , ['LSTM'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_Lag1Trend_Seasonal_Hour_LSTM.py | Python | bsd-3-clause | 154 |
#!/usr/bin/env python3
"""Python binding of UART wrapper of LetMeCreate library."""
import ctypes
_LIB = ctypes.CDLL('libletmecreate_core.so')
# Baudrates
UART_BD_1200 = 1200
UART_BD_2400 = 2400
UART_BD_4800 = 4800
UART_BD_9600 = 9600
UART_BD_19200 = 19200
UART_BD_38400 = 38400
UART_BD_57600 = 57600
def init():
"""Initialise UART on all mikrobus.
UART buses are configured:
- baudrate: 9600
- one stop bit
- no parity bit
The current bus is set to MIKROBUS_1.
Note: An exception is thrown if an error occurs during initialisation.
"""
ret = _LIB.uart_init()
if ret < 0:
raise Exception("uart init failed")
def select_bus(mikrobus_index):
"""Selects the current UART bus.
mikrobus_index: must be 0 (MIKROBUS_1) or 1 (MIKROBUS_2)
Note: If the mikrobus index is invalid, then this function does not nothing.
"""
_LIB.uart_select_bus(mikrobus_index)
def get_current_bus():
"""Returns the current UART bus: 0 (MIKROBUS_1) or 1 (MIKROBUS_2)."""
return _LIB.uart_get_current_bus()
def set_baudrate(baudrate):
""""Set the baudrate of the current UART bus.
baudrate: Must be one of the predefined baudrates.
Note: An exception is thrown, if it fails to set the baudrate.
"""
ret = _LIB.uart_set_baudrate(baudrate)
if ret < 0:
raise Exception("uart set baudrate failed")
def get_baudrate():
"""Returns the baudrate of the current UART bus.
Note: An exception is thrown if it fails to retrieve the baudrate.
"""
baudrate = ctypes.c_uint32(0)
ret = _LIB.uart_get_baudrate(ctypes.byref(baudrate))
if ret < 0:
raise Exception("uart get baudrate failed")
return baudrate.value
def send(data):
"""Sends data using the current UART bus.
data: A list of bytes.
Note: An exception is thrown if an error occurs during the transmission.
"""
arr = (ctypes.c_uint8 * len(data))(*data)
ret = _LIB.uart_send(arr, len(data))
if ret < 0:
raise Exception("uart send failed")
def receive(length):
"""Returns a list of bytes.
This function is blocking and will not returned until length bytes have
been received.
length: Number of bytes to receive.
Note: An exception is thrown if it fails to receive data.
"""
arr = (ctypes.c_uint8 * length)()
ret = _LIB.uart_receive(arr, length)
if ret < 0:
raise Exception("uart receive failed")
return [arr[i] for i in range(length)]
def release():
"""Releases all UART bus.
Note: An exception is thrown if it fails to release all UART buses.
"""
ret = _LIB.uart_release()
if ret < 0:
raise Exception("uart release failed")
| francois-berder/PyLetMeCreate | letmecreate/core/uart.py | Python | bsd-3-clause | 2,730 |
""" A module to contain utility ISO-19115 metadata parsing helpers """
from _collections import OrderedDict
from copy import deepcopy
from frozendict import frozendict as FrozenOrderedDict
from parserutils.collections import filter_empty, reduce_value, wrap_value
from parserutils.elements import get_element_name, get_element_text, get_elements_text
from parserutils.elements import get_elements, get_remote_element, insert_element, remove_element
from parserutils.elements import XPATH_DELIM
from gis_metadata.exceptions import InvalidContent
from gis_metadata.metadata_parser import MetadataParser
from gis_metadata.utils import DATE_TYPE, DATE_TYPE_SINGLE, DATE_TYPE_MULTIPLE
from gis_metadata.utils import DATE_TYPE_RANGE, DATE_TYPE_RANGE_BEGIN, DATE_TYPE_RANGE_END
from gis_metadata.utils import ATTRIBUTES
from gis_metadata.utils import CONTACTS
from gis_metadata.utils import BOUNDING_BOX
from gis_metadata.utils import DATES
from gis_metadata.utils import DIGITAL_FORMS
from gis_metadata.utils import KEYWORDS_PLACE, KEYWORDS_STRATUM, KEYWORDS_TEMPORAL, KEYWORDS_THEME
from gis_metadata.utils import LARGER_WORKS
from gis_metadata.utils import PROCESS_STEPS
from gis_metadata.utils import RASTER_DIMS, RASTER_INFO
from gis_metadata.utils import COMPLEX_DEFINITIONS, ParserProperty
from gis_metadata.utils import format_xpaths, get_default_for_complex, get_default_for_complex_sub
from gis_metadata.utils import parse_complex_list, parse_property, update_complex_list, update_property
ISO_ROOTS = ('MD_Metadata', 'MI_Metadata')
KEYWORD_PROPS = (KEYWORDS_PLACE, KEYWORDS_STRATUM, KEYWORDS_TEMPORAL, KEYWORDS_THEME)
KEYWORD_TYPES = FrozenOrderedDict({
KEYWORDS_PLACE: 'place',
KEYWORDS_STRATUM: 'stratum',
KEYWORDS_TEMPORAL: 'temporal',
KEYWORDS_THEME: 'theme'
})
# For appending digital form content to ISO distribution format specs
ISO_DIGITAL_FORMS_DELIM = '@------------------------------@'
# Define backup locations for attribute sub-properties and dimension type property
ISO_DEFINITIONS = dict({k: dict(v) for k, v in dict(COMPLEX_DEFINITIONS).items()})
ISO_DEFINITIONS[ATTRIBUTES].update({
'_definition_source': '{_definition_src}',
'__definition_source': '{__definition_src}',
'___definition_source': '{___definition_src}'
})
ISO_DEFINITIONS[RASTER_DIMS]['_type'] = '{_type}'
ISO_DEFINITIONS = FrozenOrderedDict({k: FrozenOrderedDict(v) for k, v in ISO_DEFINITIONS.items()})
ISO_TAG_ROOTS = OrderedDict((
# First process private dependency tags (order enforced by key sorting)
('_content_coverage', 'contentInfo/MD_CoverageDescription'),
('_dataqual', 'dataQualityInfo/DQ_DataQuality'),
('_dataqual_lineage', '{_dataqual}/lineage/LI_Lineage'),
('_dataqual_report', '{_dataqual}/report'),
('_distinfo', 'distributionInfo/MD_Distribution'),
('_distinfo_dist', '{_distinfo}/distributor/MD_Distributor'),
('_distinfo_proc', '{_distinfo_dist}/distributionOrderProcess/MD_StandardOrderProcess'),
('_distinfo_resp', '{_distinfo_dist}/distributorContact/CI_ResponsibleParty'),
('_distinfo_resp_contact', '{_distinfo_resp}/contactInfo/CI_Contact'),
('_distinfo_rsrc', '{_distinfo}/transferOptions/MD_DigitalTransferOptions/onLine/CI_OnlineResource'),
('_idinfo', 'identificationInfo/MD_DataIdentification'),
('_idinfo_aggregate', '{_idinfo}/aggregationInfo/MD_AggregateInformation'),
('_idinfo_aggregate_citation', '{_idinfo_aggregate}/aggregateDataSetName/CI_Citation'),
('_idinfo_aggregate_contact', '{_idinfo_aggregate_citation}/citedResponsibleParty/CI_ResponsibleParty'),
('_idinfo_citation', '{_idinfo}/citation/CI_Citation'),
('_idinfo_citresp', '{_idinfo_citation}/citedResponsibleParty/CI_ResponsibleParty'),
('_idinfo_extent', '{_idinfo}/extent/EX_Extent'),
('_idinfo_keywords', '{_idinfo}/descriptiveKeywords/MD_Keywords'),
('_idinfo_resp', '{_idinfo}/pointOfContact/CI_ResponsibleParty'),
('_idinfo_resp_contact', '{_idinfo_resp}/contactInfo/CI_Contact'),
('_srinfo_grid_rep', 'spatialRepresentationInfo/MD_GridSpatialRepresentation'),
('_srinfo_grid_dim', '{_srinfo_grid_rep}/axisDimensionProperties/MD_Dimension'),
# Supported in separate file ISO-19110: FC_FeatureCatalog
('_attr_root', 'FC_FeatureCatalogue'),
('_attr_base', 'featureType/FC_FeatureType/carrierOfCharacteristics/FC_FeatureAttribute'),
('_attr_def', '{_attr_base}/definitionReference/FC_DefinitionReference/definitionSource/FC_DefinitionSource'),
('_attr_src', '{_attr_def}/source/CI_Citation/citedResponsibleParty/CI_ResponsibleParty'),
# References to separate file ISO-19110 from: MD_Metadata
('_attr_citation', 'contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation'),
('_attr_contact', '{_attr_citation}/CI_Citation/citedResponsibleParty/CI_ResponsibleParty/contactInfo/CI_Contact'),
('_attr_contact_url', '{_attr_contact}/onlineResource/CI_OnlineResource/linkage/URL')
))
# Two passes required because of self references within roots dict
ISO_TAG_ROOTS.update(format_xpaths(ISO_TAG_ROOTS, **ISO_TAG_ROOTS))
ISO_TAG_ROOTS.update(format_xpaths(ISO_TAG_ROOTS, **ISO_TAG_ROOTS))
ISO_TAG_ROOTS = FrozenOrderedDict(ISO_TAG_ROOTS)
ISO_TAG_FORMATS = {
# Property-specific xpath roots: the base from which each element repeats
'_attribute_accuracy_root': '{_dataqual_report}',
'_attributes_root': 'featureType/FC_FeatureType/carrierOfCharacteristics',
'_bounding_box_root': '{_idinfo_extent}/geographicElement',
'_contacts_root': '{_idinfo}/pointOfContact',
'_dataset_completeness_root': '{_dataqual_report}',
'_dates_root': '{_idinfo_extent}/temporalElement',
'_digital_forms_root': '{_distinfo}/distributionFormat',
'_dist_liability_root': '{_idinfo}/resourceConstraints',
'_transfer_options_root': '{_distinfo}/transferOptions/MD_DigitalTransferOptions/onLine',
'_keywords_root': '{_idinfo}/descriptiveKeywords',
'_larger_works_root': '{_idinfo_aggregate_citation}',
'_process_steps_root': '{_dataqual_lineage}/processStep',
'_raster_info_root': '{_srinfo_grid_rep}/axisDimensionProperties',
'_use_constraints_root': '{_idinfo}/resourceConstraints',
# Then process public dependent tags
'title': '{_idinfo_citation}/title/CharacterString',
'abstract': '{_idinfo}/abstract/CharacterString',
'purpose': '{_idinfo}/purpose/CharacterString',
'supplementary_info': '{_idinfo}/supplementalInformation/CharacterString',
'online_linkages': '{_idinfo_citresp}/contactInfo/CI_Contact/onlineResource/CI_OnlineResource/linkage/URL',
'originators': '{_idinfo_citresp}/organisationName/CharacterString',
'publish_date': '{_idinfo_citation}/date/CI_Date/date/Date',
'publish_date_type': '{_idinfo_citation}/date/CI_Date/dateType/CI_DateTypeCode',
'data_credits': '{_idinfo}/credit/CharacterString',
CONTACTS: '{_idinfo_resp}/{{ct_path}}',
'dist_contact_org': '{_distinfo_resp}/organisationName/CharacterString',
'dist_contact_person': '{_distinfo_resp}/individualName/CharacterString',
'dist_address_type': '{_distinfo_resp_contact}/address/@type',
'dist_address': '{_distinfo_resp_contact}/address/CI_Address/deliveryPoint/CharacterString',
'dist_city': '{_distinfo_resp_contact}/address/CI_Address/city/CharacterString',
'dist_state': '{_distinfo_resp_contact}/address/CI_Address/administrativeArea/CharacterString',
'dist_postal': '{_distinfo_resp_contact}/address/CI_Address/postalCode/CharacterString',
'dist_country': '{_distinfo_resp_contact}/address/CI_Address/country/CharacterString',
'_dist_country': '{_distinfo_resp_contact}/address/CI_Address/country/Country', # If not in CharacterString
'dist_phone': '{_distinfo_resp_contact}/phone/CI_Telephone/voice/CharacterString',
'dist_email': '{_distinfo_resp_contact}/address/CI_Address/electronicMailAddress/CharacterString',
'dist_liability': '{_idinfo}/resourceConstraints/MD_LegalConstraints/otherConstraints/CharacterString',
'processing_fees': '{_distinfo_proc}/fees/CharacterString',
'processing_instrs': '{_distinfo_proc}/orderingInstructions/CharacterString',
'resource_desc': '{_idinfo}/resourceSpecificUsage/MD_Usage/specificUsage/CharacterString',
'tech_prerequisites': '{_idinfo}/environmentDescription/CharacterString',
ATTRIBUTES: '{_attr_base}/{{ad_path}}',
'_attributes_file': '{_attr_citation}/@href',
'__attributes_file': '{_attr_contact_url}', # If not in above: "_attr_citation/@href"
'attribute_accuracy': '{_dataqual_report}/DQ_QuantitativeAttributeAccuracy/measureDescription/CharacterString',
BOUNDING_BOX: '{_idinfo_extent}/geographicElement/EX_GeographicBoundingBox/{{bbox_path}}',
'dataset_completeness': '{_dataqual_report}/DQ_CompletenessOmission/measureDescription/CharacterString',
DIGITAL_FORMS: '{_distinfo}/distributionFormat/MD_Format/{{df_path}}',
'_access_desc': '{_distinfo_rsrc}/description/CharacterString',
'_access_instrs': '{_distinfo_rsrc}/protocol/CharacterString',
'_network_resource': '{_distinfo_rsrc}/linkage/URL',
PROCESS_STEPS: '{_dataqual_lineage}/processStep/LI_ProcessStep/{{ps_path}}',
LARGER_WORKS: '{_idinfo_aggregate_citation}/{{lw_path}}',
'_lw_citation': '{_idinfo_aggregate_contact}/{{lw_path}}',
'_lw_collective': '{_idinfo_aggregate_citation}/collectiveTitle/CharacterString',
'_lw_contact': '{_idinfo_aggregate_contact}/contactInfo/CI_Contact/{{lw_path}}',
'_lw_linkage': '{_idinfo_aggregate_contact}/contactInfo/CI_Contact/onlineResource/CI_OnlineResource/{{lw_path}}',
RASTER_INFO: '{_srinfo_grid_dim}/{{ri_path}}',
'_ri_num_dims': '{_srinfo_grid_rep}/numberOfDimensions/Integer',
'other_citation_info': '{_idinfo_citation}/otherCitationDetails/CharacterString',
'use_constraints': '{_idinfo}/resourceConstraints/MD_Constraints/useLimitation/CharacterString',
DATES: '{_idinfo_extent}/temporalElement/EX_TemporalExtent/extent/{{type_path}}',
KEYWORDS_PLACE: '{_idinfo_keywords}/keyword/CharacterString',
KEYWORDS_STRATUM: '{_idinfo_keywords}/keyword/CharacterString',
KEYWORDS_TEMPORAL: '{_idinfo_keywords}/keyword/CharacterString',
KEYWORDS_THEME: '{_idinfo_keywords}/keyword/CharacterString'
}
# Apply XPATH root formats to the basic data map formats
ISO_TAG_FORMATS.update(ISO_TAG_ROOTS)
ISO_TAG_FORMATS.update(format_xpaths(ISO_TAG_FORMATS, **ISO_TAG_ROOTS))
ISO_TAG_FORMATS = FrozenOrderedDict(ISO_TAG_FORMATS)
ISO_TAG_PRIMITIVES = frozenset({
'Binary', 'Boolean', 'CharacterString',
'Date', 'DateTime', 'timePosition',
'Decimal', 'Integer', 'Real', 'RecordType',
'CI_DateTypeCode', 'MD_KeywordTypeCode', 'URL'
})
class IsoParser(MetadataParser):
""" A class to parse metadata files conforming to the ISO-19115 standard """
def _init_data_map(self):
""" OVERRIDDEN: Initialize required ISO-19115 data map with XPATHS and specialized functions """
if self._data_map is not None:
return # Initiation happens once
# Parse and validate the ISO metadata root
if self._xml_tree is None:
iso_root = ISO_ROOTS[0]
else:
iso_root = get_element_name(self._xml_tree)
if iso_root not in ISO_ROOTS:
raise InvalidContent('Invalid XML root for ISO-19115 standard: {root}', root=iso_root)
iso_data_map = {'_root': iso_root}
iso_data_map.update(ISO_TAG_ROOTS)
iso_data_map.update(ISO_TAG_FORMATS)
iso_data_structures = {}
# Capture and format complex XPATHs
ad_format = iso_data_map[ATTRIBUTES]
ft_source = iso_data_map['_attr_src'].replace('/carrierOfCharacteristics/FC_FeatureAttribute', '')
iso_data_structures[ATTRIBUTES] = format_xpaths(
ISO_DEFINITIONS[ATTRIBUTES],
label=ad_format.format(ad_path='memberName/LocalName'),
aliases=ad_format.format(ad_path='aliases/LocalName'), # Not in spec
definition=ad_format.format(ad_path='definition/CharacterString'),
# First try to populate attribute definition source from FC_FeatureAttribute
definition_src=iso_data_map['_attr_src'] + '/organisationName/CharacterString',
_definition_src=iso_data_map['_attr_src'] + '/individualName/CharacterString',
# Then assume feature type source is the same as attribute: populate from FC_FeatureType
__definition_src=ft_source + '/organisationName/CharacterString',
___definition_src=ft_source + '/individualName/CharacterString'
)
bb_format = iso_data_map[BOUNDING_BOX]
iso_data_structures[BOUNDING_BOX] = format_xpaths(
ISO_DEFINITIONS[BOUNDING_BOX],
east=bb_format.format(bbox_path='eastBoundLongitude/Decimal'),
south=bb_format.format(bbox_path='southBoundLatitude/Decimal'),
west=bb_format.format(bbox_path='westBoundLongitude/Decimal'),
north=bb_format.format(bbox_path='northBoundLatitude/Decimal')
)
ct_format = iso_data_map[CONTACTS]
iso_data_structures[CONTACTS] = format_xpaths(
ISO_DEFINITIONS[CONTACTS],
name=ct_format.format(ct_path='individualName/CharacterString'),
organization=ct_format.format(ct_path='organisationName/CharacterString'),
position=ct_format.format(ct_path='positionName/CharacterString'),
email=ct_format.format(
ct_path='contactInfo/CI_Contact/address/CI_Address/electronicMailAddress/CharacterString'
)
)
dt_format = iso_data_map[DATES]
iso_data_structures[DATES] = {
DATE_TYPE_MULTIPLE: dt_format.format(type_path='TimeInstant/timePosition'),
DATE_TYPE_RANGE_BEGIN: dt_format.format(type_path='TimePeriod/begin/TimeInstant/timePosition'),
DATE_TYPE_RANGE_END: dt_format.format(type_path='TimePeriod/end/TimeInstant/timePosition'),
DATE_TYPE_SINGLE: dt_format.format(type_path='TimeInstant/timePosition') # Same as multiple
}
iso_data_structures[DATES][DATE_TYPE_RANGE] = [
iso_data_structures[DATES][DATE_TYPE_RANGE_BEGIN],
iso_data_structures[DATES][DATE_TYPE_RANGE_END]
]
df_format = iso_data_map[DIGITAL_FORMS]
iso_data_structures[DIGITAL_FORMS] = format_xpaths(
ISO_DEFINITIONS[DIGITAL_FORMS],
name=df_format.format(df_path='name/CharacterString'),
content='', # Not supported in ISO-19115 (appending to spec)
decompression=df_format.format(df_path='fileDecompressionTechnique/CharacterString'),
version=df_format.format(df_path='version/CharacterString'),
specification=df_format.format(df_path='specification/CharacterString'),
access_desc=iso_data_map['_access_desc'],
access_instrs=iso_data_map['_access_instrs'],
network_resource=iso_data_map['_network_resource']
)
keywords_structure = {
'keyword_root': 'MD_Keywords/keyword',
'keyword_type': 'MD_Keywords/type/MD_KeywordTypeCode',
'keyword': 'MD_Keywords/keyword/CharacterString'
}
for keyword_prop in KEYWORD_PROPS:
iso_data_structures[keyword_prop] = deepcopy(keywords_structure)
lw_format = iso_data_map[LARGER_WORKS]
iso_data_structures[LARGER_WORKS] = format_xpaths(
ISO_DEFINITIONS[LARGER_WORKS],
title=lw_format.format(lw_path='title/CharacterString'),
edition=lw_format.format(lw_path='edition/CharacterString'),
origin=iso_data_map['_lw_citation'].format(lw_path='individualName/CharacterString'),
online_linkage=iso_data_map['_lw_linkage'].format(lw_path='linkage/URL'),
other_citation=lw_format.format(lw_path='otherCitationDetails/CharacterString'),
date=lw_format.format(lw_path='editionDate/Date'),
place=iso_data_map['_lw_contact'].format(lw_path='address/CI_Address/city/CharacterString'),
info=iso_data_map['_lw_citation'].format(lw_path='organisationName/CharacterString')
)
ps_format = iso_data_map[PROCESS_STEPS]
iso_data_structures[PROCESS_STEPS] = format_xpaths(
ISO_DEFINITIONS[PROCESS_STEPS],
description=ps_format.format(ps_path='description/CharacterString'),
date=ps_format.format(ps_path='dateTime/DateTime'),
sources=ps_format.format(
ps_path='source/LI_Source/sourceCitation/CI_Citation/alternateTitle/CharacterString'
)
)
ri_format = iso_data_map[RASTER_INFO]
iso_data_structures[RASTER_INFO] = format_xpaths(
ISO_DEFINITIONS[RASTER_DIMS],
type=ri_format.format(ri_path='dimensionName/MD_DimensionNameTypeCode'),
_type=ri_format.format(ri_path='dimensionName/MD_DimensionNameTypeCode/@codeListValue'),
size=ri_format.format(ri_path='dimensionSize/Integer'),
value=ri_format.format(ri_path='resolution/Measure'),
units=ri_format.format(ri_path='resolution/Measure/@uom')
)
# Assign XPATHS and gis_metadata.utils.ParserProperties to data map
for prop, xpath in dict(iso_data_map).items():
if prop == ATTRIBUTES:
iso_data_map[prop] = ParserProperty(self._parse_attribute_details, self._update_attribute_details)
elif prop in (CONTACTS, PROCESS_STEPS):
iso_data_map[prop] = ParserProperty(self._parse_complex_list, self._update_complex_list)
elif prop in (BOUNDING_BOX, LARGER_WORKS):
iso_data_map[prop] = ParserProperty(self._parse_complex, self._update_complex)
elif prop == DATES:
iso_data_map[prop] = ParserProperty(self._parse_dates, self._update_dates)
elif prop == DIGITAL_FORMS:
iso_data_map[prop] = ParserProperty(self._parse_digital_forms, self._update_digital_forms)
elif prop in KEYWORD_PROPS:
iso_data_map[prop] = ParserProperty(self._parse_keywords, self._update_keywords)
elif prop == RASTER_INFO:
iso_data_map[prop] = ParserProperty(self._parse_raster_info, self._update_raster_info)
else:
iso_data_map[prop] = xpath
self._data_map = iso_data_map
self._data_structures = iso_data_structures
def _parse_attribute_details(self, prop=ATTRIBUTES):
""" Concatenates a list of Attribute Details data structures parsed from a remote file """
parsed_attributes = self._parse_attribute_details_file(prop)
if parsed_attributes is None:
# If not in the (official) remote location, try the tree itself
parsed_attributes = self._parse_complex_list(prop)
for attribute in (a for a in parsed_attributes if not a['aliases']):
# Aliases are not in ISO standard: default to label
attribute['aliases'] = attribute['label']
return get_default_for_complex(prop, parsed_attributes)
def _parse_attribute_details_file(self, prop=ATTRIBUTES):
""" Concatenates a list of Attribute Details data structures parsed from a remote file """
# Parse content from remote file URL, which may be stored in one of two places:
# Starting at: contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation
# ATTRIBUTE: href
# ELEMENT TEXT: CI_Citation/.../CI_Contact/onlineResource/CI_OnlineResource/linkage
self._attr_details_file_url = parse_property(
self._xml_tree, None, self._data_map, '_attributes_file'
)
if not self._attr_details_file_url:
self._attr_details_file_url = None
return None
try:
tree_to_parse = get_remote_element(self._attr_details_file_url)
except Exception:
self._attr_details_file_url = None
return None
xpath_map = self._data_structures[ATTRIBUTES]
xpath_root = self._get_xroot_for(prop)
return parse_complex_list(tree_to_parse, xpath_root, xpath_map, prop)
def _parse_digital_forms(self, prop=DIGITAL_FORMS):
""" Concatenates a list of Digital Form data structures parsed from the metadata """
xpath_map = self._data_structures[prop]
# Parse base digital form fields: 'name', 'content', 'decompression', 'version', 'specification'
xpath_root = self._data_map['_digital_forms_root']
digital_forms = parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
# Parse digital form transfer option fields: 'access_desc', 'access_instrs', 'network_resource'
xpath_root = self._data_map['_transfer_options_root']
transfer_opts = parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
# Split out digital form content that has been appended to specifications
content_delim = ISO_DIGITAL_FORMS_DELIM
for digital_form in digital_forms:
specs = reduce_value(digital_form['specification'])
specs = specs.splitlines() if isinstance(specs, str) else specs
specifications = wrap_value(s.strip() for s in specs)
digital_form['content'] = []
digital_form['specification'] = []
has_content = False
# For each specification, insert delim before appending content
for spec in specifications:
has_content = has_content or spec == content_delim
if not has_content:
digital_form['specification'].append(spec)
elif spec != content_delim:
digital_form['content'].append(spec)
# Reduce spec and content to single string values if possible
for form_prop in ('content', 'specification'):
digital_form[form_prop] = reduce_value(filter_empty(digital_form[form_prop], u''))
# Combine digital forms and transfer options into a single complex struct
df_len = len(digital_forms)
to_len = len(transfer_opts)
parsed_forms = []
for idx in range(0, max(df_len, to_len)):
digital_form = {}.fromkeys(ISO_DEFINITIONS[prop], u'')
if idx < df_len:
digital_form.update(i for i in digital_forms[idx].items() if i[1])
if idx < to_len:
digital_form.update(i for i in transfer_opts[idx].items() if i[1])
if any(digital_form.values()):
parsed_forms.append(digital_form)
return get_default_for_complex(prop, parsed_forms)
def _parse_keywords(self, prop):
""" Parse type-specific keywords from the metadata: Theme or Place """
keywords = []
if prop in KEYWORD_PROPS:
xpath_root = self._data_map['_keywords_root']
xpath_map = self._data_structures[prop]
xtype = xpath_map['keyword_type']
xpath = xpath_map['keyword']
ktype = KEYWORD_TYPES[prop]
for element in get_elements(self._xml_tree, xpath_root):
if get_element_text(element, xtype).lower() == ktype.lower():
keywords.extend(get_elements_text(element, xpath))
return keywords
def _parse_raster_info(self, prop=RASTER_INFO):
""" Collapses multiple dimensions into a single raster_info complex struct """
raster_info = {}.fromkeys(ISO_DEFINITIONS[prop], u'')
# Ensure conversion of lists to newlines is in place
raster_info['dimensions'] = get_default_for_complex_sub(
prop=prop,
subprop='dimensions',
value=parse_property(self._xml_tree, None, self._data_map, '_ri_num_dims'),
xpath=self._data_map['_ri_num_dims']
)
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
for dimension in parse_complex_list(self._xml_tree, xpath_root, xpath_map, RASTER_DIMS):
dimension_type = dimension['type'].lower()
if dimension_type == 'vertical':
raster_info['vertical_count'] = dimension['size']
elif dimension_type == 'column':
raster_info['column_count'] = dimension['size']
raster_info['x_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()
elif dimension_type == 'row':
raster_info['row_count'] = dimension['size']
raster_info['y_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()
return raster_info if any(raster_info[k] for k in raster_info) else {}
def _update_attribute_details(self, **update_props):
""" Update operation for ISO Attribute Details metadata: write to "MD_Metadata/featureType" """
tree_to_update = update_props['tree_to_update']
xpath = self._data_map['_attr_citation']
# Cannot write to remote file: remove the featureCatalogueCitation element
self._attr_details_file_url = None
remove_element(tree_to_update, xpath, True)
return self._update_complex_list(**update_props)
def _update_dates(self, **update_props):
"""
Update operation for ISO Dates metadata
:see: gis_metadata.utils.COMPLEX_DEFINITIONS[DATES]
"""
tree_to_update = update_props['tree_to_update']
xpath_root = self._data_map['_dates_root']
if self.dates:
date_type = self.dates[DATE_TYPE]
# First remove all date info from common root
remove_element(tree_to_update, xpath_root)
if date_type == DATE_TYPE_MULTIPLE:
xpath_root += '/TimeInstant'
elif date_type == DATE_TYPE_RANGE:
xpath_root += '/TimePeriod'
return super(IsoParser, self)._update_dates(xpath_root, **update_props)
def _update_digital_forms(self, **update_props):
"""
Update operation for ISO Digital Forms metadata
:see: gis_metadata.utils.COMPLEX_DEFINITIONS[DIGITAL_FORMS]
"""
digital_forms = wrap_value(update_props['values'])
# Update all Digital Form properties: distributionFormat*
xpath_map = self._data_structures[update_props['prop']]
dist_format_props = ('name', 'decompression', 'version', 'specification')
dist_format_xroot = self._data_map['_digital_forms_root']
dist_format_xmap = {prop: xpath_map[prop] for prop in dist_format_props}
dist_formats = []
for digital_form in digital_forms:
dist_format = {prop: digital_form[prop] for prop in dist_format_props}
if digital_form.get('content'):
dist_spec = wrap_value(digital_form.get('specification'))
dist_spec.append(ISO_DIGITAL_FORMS_DELIM)
dist_spec.extend(wrap_value(digital_form['content']))
dist_format['specification'] = dist_spec
dist_formats.append(dist_format)
update_props['values'] = dist_formats
dist_formats = update_complex_list(
xpath_root=dist_format_xroot, xpath_map=dist_format_xmap, **update_props
)
# Update all Network Resources: transferOptions+
trans_option_props = ('access_desc', 'access_instrs', 'network_resource')
trans_option_xroot = self._data_map['_transfer_options_root']
trans_option_xmap = {prop: self._data_map['_' + prop] for prop in trans_option_props}
trans_options = []
for digital_form in digital_forms:
trans_options.append({prop: digital_form[prop] for prop in trans_option_props})
update_props['values'] = trans_options
trans_options = update_complex_list(
xpath_root=trans_option_xroot, xpath_map=trans_option_xmap, **update_props
)
return {
'distribution_formats': dist_formats,
'transfer_options': trans_options
}
def _update_keywords(self, **update_props):
""" Update operation for ISO type-specific Keywords metadata: Theme or Place """
tree_to_update = update_props['tree_to_update']
prop = update_props['prop']
values = update_props['values']
keywords = []
if prop in KEYWORD_PROPS:
xpath_root = self._data_map['_keywords_root']
xpath_map = self._data_structures[prop]
xtype = xpath_map['keyword_type']
xroot = xpath_map['keyword_root']
xpath = xpath_map['keyword']
ktype = KEYWORD_TYPES[prop]
# Remove descriptiveKeyword nodes according to type
for element in get_elements(tree_to_update, xpath_root):
if get_element_text(element, xtype).lower() == ktype.lower():
remove_element(tree_to_update, xpath_root)
element = insert_element(tree_to_update, 0, xpath_root)
insert_element(element, 0, xtype, ktype) # Add the type node
keywords.extend(update_property(element, xroot, xpath, prop, values))
return keywords
def _update_raster_info(self, **update_props):
""" Derives multiple dimensions from a single raster_info complex struct """
tree_to_update = update_props['tree_to_update']
prop = update_props['prop']
values = update_props.pop('values')
# Update number of dimensions at raster_info root (applies to all dimensions below)
xroot, xpath = None, self._data_map['_ri_num_dims']
raster_info = [update_property(tree_to_update, xroot, xpath, prop, values.get('dimensions', u''))]
# Derive vertical, longitude, and latitude dimensions from raster_info
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
v_dimension = {}
if values.get('vertical_count'):
v_dimension = v_dimension.fromkeys(xpath_map, u'')
v_dimension['type'] = 'vertical'
v_dimension['size'] = values.get('vertical_count', u'')
x_dimension = {}
if values.get('column_count') or values.get('x_resolution'):
x_dimension = x_dimension.fromkeys(xpath_map, u'')
x_dimension['type'] = 'column'
x_dimension['size'] = values.get('column_count', u'')
x_dimension['value'] = values.get('x_resolution', u'')
y_dimension = {}
if values.get('row_count') or values.get('y_resolution'):
y_dimension = y_dimension.fromkeys(xpath_map, u'')
y_dimension['type'] = 'row'
y_dimension['size'] = values.get('row_count', u'')
y_dimension['value'] = values.get('y_resolution', u'')
# Update derived dimensions as complex list, and append affected elements for return
update_props['prop'] = RASTER_DIMS
update_props['values'] = [v_dimension, x_dimension, y_dimension]
raster_info += update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
return raster_info
def update(self, use_template=False, **metadata_defaults):
""" OVERRIDDEN: Prevents writing multiple CharacterStrings per XPATH property """
self.validate()
tree_to_update = self._xml_tree if not use_template else self._get_template(**metadata_defaults)
supported_props = self._metadata_props
# Iterate over keys, and extract non-primitive root for all XPATHs
# xroot = identificationInfo/MD_DataIdentification/abstract/
# xpath = identificationInfo/MD_DataIdentification/abstract/CharacterString
#
# This prevents multiple primitive tags from being inserted under an element
for prop, xpath in self._data_map.items():
if not prop.startswith('_') or prop.strip('_') in supported_props:
# Send only public or alternate properties
xroot = self._trim_xpath(xpath, prop)
values = getattr(self, prop, u'')
update_property(tree_to_update, xroot, xpath, prop, values, supported_props)
return tree_to_update
def _trim_xpath(self, xpath, prop):
""" Removes primitive type tags from an XPATH """
xroot = self._get_xroot_for(prop)
if xroot is None and isinstance(xpath, str):
xtags = xpath.split(XPATH_DELIM)
if xtags[-1] in ISO_TAG_PRIMITIVES:
xroot = XPATH_DELIM.join(xtags[:-1])
return xroot
| consbio/gis-metadata-parser | gis_metadata/iso_metadata_parser.py | Python | bsd-3-clause | 32,703 |
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib import messages
from models import UserVote
from forms import UserVoteForm
def vote(request):
if request.method == "POST":
form = UserVoteForm(request.POST)
if form.is_valid():
vote = form.save(commit=False)
vote = UserVote.objects.vote(request.user, vote.vote)
messages.info(request, "Your mood is %s" % vote.get_vote_display())
else:
form = UserVoteForm()
return HttpResponseRedirect(reverse('dashboard'))
| django-stars/dash2011 | presence/apps/vote/views.py | Python | bsd-3-clause | 684 |
from django.forms import ModelForm
from .models import DistributionRequests
class DistributionRequestForm(ModelForm):
class Meta:
model = DistributionRequests
| pulilab/django-collectform | collectform/forms.py | Python | bsd-3-clause | 173 |
from django.contrib import admin
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from .models import RegistrationProfile
from opps.core.admin import apply_opps_rules
@apply_opps_rules('registration')
class RegistrationAdmin(admin.ModelAdmin):
actions = ['activate_users', 'resend_activation_email']
list_display = ('user', 'activation_key_expired', 'activation_key')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email')
def activate_users(self, request, queryset):
"""
Activates the selected users, if they are not alrady
activated.
"""
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _("Activate users")
def resend_activation_email(self, request, queryset):
"""
Re-sends activation emails for the selected users.
Note that this will *only* send activation emails for users
who are eligible to activate; emails will not be sent to users
whose activation keys have expired or who have already
activated.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
for profile in queryset:
if not profile.activation_key_expired():
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
admin.site.register(RegistrationProfile, RegistrationAdmin)
| opps/opps-registration | opps/registration/admin.py | Python | bsd-3-clause | 1,708 |
import itertools
from whylog.config.investigation_plan import Clue
from whylog.constraints.exceptions import TooManyConstraintsToNegate
from whylog.front.utils import FrontInput
class Verifier(object):
UNMATCHED = Clue(None, None, None, None)
@classmethod
def _create_investigation_result(cls, clues_combination, constraints, linkage):
"""
basing on clues combination and constraints,
returns appropriate InvestigationResult object
which collects information about lines
(FrontInput objects) instead of Clues
"""
return InvestigationResult(
[FrontInput.from_clue(clue) for clue in clues_combination], constraints, linkage
)
@classmethod
def _verify_constraint(cls, combination, effect, index, constraint, constraint_manager):
"""
checks if specified clues (which represents parsers: 1,2,.. for some rule) and
effect (which represents parser 0 from this rule) satisfy one given constraint.
returns True if so, or False otherwise
"""
constraint_verifier = constraint_manager.get_constraint_object(index, constraint)
groups = []
for group_info in constraint['clues_groups']:
parser_num, group_num = group_info
if parser_num == 0:
groups.append(effect.regex_parameters[group_num - 1])
else:
if combination[parser_num - 1] == Verifier.UNMATCHED:
return False
groups.append(combination[parser_num - 1].regex_parameters[group_num - 1])
return constraint_verifier.verify(groups, constraint['params'])
@classmethod
def _clues_combinations(cls, clues_tuples, collected_subset=[]):
"""
recursive generator that returns all permutations according to schema:
from first pair (list, number) of clues_tuples,
produces permutations with size 'number' from 'list's elements
and concatenates it with _clues_combinations invoked on the rest of clues_tuples.
example:
>>> xs = [([1, 2], 1), ('abc', 2)]
>>> for l in Verifier._clues_combinations(xs):
>>> print l
[1, 'a', 'b']
[1, 'a', 'c']
[1, 'b', 'a']
[1, 'b', 'c']
[1, 'c', 'a']
[1, 'c', 'b']
[2, 'a', 'b']
[2, 'a', 'c']
[2, 'b', 'a']
[2, 'b', 'c']
[2, 'c', 'a']
[2, 'c', 'b']
it always should be called with empty accumulator,
that is collected_subset=[]
"""
if len(clues_tuples) != 0:
first_list, repetitions_number = clues_tuples[0]
for clues in itertools.permutations(first_list, repetitions_number):
for subset in cls._clues_combinations(
clues_tuples[1:], collected_subset + list(clues)
):
yield subset
else:
yield collected_subset
@classmethod
def _construct_proper_clues_lists(cls, original_clues_lists):
clues_lists = []
for clues, occurrences in original_clues_lists:
if clues:
clues_lists.append((clues, occurrences))
else:
clues_lists.append(([Verifier.UNMATCHED], occurrences))
return clues_lists
@classmethod
def _pack_results_for_constraint_or(cls, combination, constraints):
return cls._create_investigation_result(
(clue for clue in combination if not clue == Verifier.UNMATCHED), constraints,
InvestigationResult.OR
)
@classmethod
def constraints_and(cls, clues_lists, effect, constraints, constraint_manager):
"""
for each combination of clues (they are generated by _clues_combinations)
checks if for all given constraints their requirements are satisfied
and for each such combination produces InvestigationResult object.
returns list of all produced InvestigationResults
"""
clues_lists = cls._construct_proper_clues_lists(clues_lists)
causes = []
for combination in cls._clues_combinations(clues_lists):
if all(
cls._verify_constraint(combination, effect, idx, constraint, constraint_manager)
for idx, constraint in enumerate(constraints)
):
causes.append(
cls._create_investigation_result(
combination, constraints, InvestigationResult.AND
)
)
return causes
@classmethod
def constraints_or(cls, clues_lists, effect, constraints, constraint_manager):
"""
for each combination of clues (they are generated by _clues_combinations)
checks if for any of given constraints their requirements are satisfied
and for each such combination produces InvestigationResult object.
returns list of all produced InvestigationResults
"""
if not constraints:
# when there is lack of constraints, but there are existing clues combinations,
# each of them should be returned
return [
cls._pack_results_for_constraint_or(combination, constraints)
for combination in cls._clues_combinations(clues_lists)
]
causes = []
clues_lists = cls._construct_proper_clues_lists(clues_lists)
for combination in cls._clues_combinations(clues_lists):
verified_constraints = [
constraint
for idx, constraint in enumerate(constraints)
if cls._verify_constraint(combination, effect, idx, constraint, constraint_manager)
] # yapf: disable
if verified_constraints:
causes.append(
cls._pack_results_for_constraint_or(combination, verified_constraints)
)
return causes
@classmethod
def constraints_not(cls, clues_lists, effect, constraints, constraint_manager):
"""
provide investigation if there is zero or one constraint,
because only in such cases NOT linkage has sense
"""
if len(constraints) > 1:
raise TooManyConstraintsToNegate()
if constraints:
if clues_lists:
return cls.single_constraint_not(
clues_lists, effect, constraints[0], constraint_manager
)
else:
if clues_lists:
# if all parsers found their matched logs, the NOT requirement isn't satisfied
return []
return [cls._create_investigation_result([], [], InvestigationResult.NOT)]
@classmethod
def single_constraint_not(cls, clues_lists, effect, constraint, constraint_manager):
"""
for each combination of clues (they are generated by _clues_combinations)
checks for given constraint if its requirements are not satisfied
and if they are not, it produces InvestigationResult object.
returns list of all produced InvestigationResults
"""
clues_lists = cls._construct_proper_clues_lists(clues_lists)
for combination in cls._clues_combinations(clues_lists):
if cls._verify_constraint(combination, effect, 0, constraint, constraint_manager):
# called with constraint index = 0, because this function assumes that there is one constraint
return []
return [cls._create_investigation_result([], [constraint], InvestigationResult.NOT)]
class InvestigationResult(object):
AND = "AND"
OR = "OR"
NOT = "NOT"
def __init__(self, lines, constraints, cons_linkage):
self.lines = lines
self.constraints = constraints
self.constraints_linkage = cons_linkage
def __repr__(self):
if self.constraints_linkage in [self.AND, self.OR]:
return "\n(\n result lines: %s;\n due to '%s' constraints: %s\n)" % (
self.lines, self.constraints_linkage, self.constraints
)
else:
return "\n(\n no result lines due to NOT;\n constraints: %s\n)" % (
self.constraints
)
def __eq__(self, other):
return all((
self.lines == other.lines,
self.constraints == other.constraints,
self.constraints_linkage == other.constraints_linkage
)) # yapf: disable
| andrzejgorski/whylog | whylog/constraints/verifier.py | Python | bsd-3-clause | 8,521 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlite3
from flask import Flask, render_template, g, current_app, request
from flask.ext.paginate import Pagination
app = Flask(__name__)
app.config.from_pyfile('app.cfg')
@app.before_request
def before_request():
g.conn = sqlite3.connect('test.db')
g.conn.row_factory = sqlite3.Row
g.cur = g.conn.cursor()
@app.teardown_request
def teardown(error):
if hasattr(g, 'conn'):
g.conn.close()
@app.route('/')
def index():
g.cur.execute('select count(*) from users')
total = g.cur.fetchone()[0]
page, per_page, offset = get_page_items()
sql = 'select name from users order by name limit {}, {}'\
.format(offset, per_page)
g.cur.execute(sql)
users = g.cur.fetchall()
pagination = get_pagination(page=page,
per_page=per_page,
total=total,
record_name='users',
)
return render_template('index.html', users=users,
page=page,
per_page=per_page,
pagination=pagination,
)
def get_css_framework():
return current_app.config.get('CSS_FRAMEWORK', 'bootstrap3')
def get_link_size():
return current_app.config.get('LINK_SIZE', 'sm')
def show_single_page_or_not():
return current_app.config.get('SHOW_SINGLE_PAGE', False)
def get_page_items():
page = int(request.args.get('page', 1))
per_page = request.args.get('per_page')
if not per_page:
per_page = current_app.config.get('PER_PAGE', 10)
else:
per_page = int(per_page)
offset = (page - 1) * per_page
return page, per_page, offset
def get_pagination(**kwargs):
kwargs.setdefault('record_name', 'records')
return Pagination(css_framework=get_css_framework(),
link_size=get_link_size(),
show_single_page=show_single_page_or_not(),
**kwargs
)
if __name__ == '__main__':
app.run(debug=True)
| wangjun/flask-paginate | example/app.py | Python | bsd-3-clause | 2,185 |
from serial_settings import SerialSettings
class AbstractStream(object):
def __init__(self, config, name):
"""
:type name: str
"""
self.config = config
self.name = name
def open(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def read(self, num_bytes=1):
raise NotImplementedError
def write(self, data):
raise NotImplementedError
def reconfigure(self, config):
raise NotImplementedError
| ThomasGerstenberg/serial_monitor | stream/__init__.py | Python | bsd-3-clause | 551 |
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.special import factorial
from scipy.lib.six import xrange
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator",
"barycentric_interpolate", "PiecewisePolynomial",
"piecewise_polynomial_interpolate", "approximate_taylor_polynomial"]
def _isscalar(x):
"""Check whether x is if a scalar type, or 0-dim"""
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
class _Interpolator1D(object):
"""
Common features in univariate interpolation
Deal with input data type and interpolation axis rolling. The
actual interpolator can assume the y-data is of shape (n, r) where
`n` is the number of x-points, and `r` the number of variables,
and use self.dtype as the y-data type.
Attributes
----------
_y_axis
Axis along which the interpolation goes in the original array
_y_extra_shape
Additional trailing shape of the input arrays, excluding
the interpolation axis.
dtype
Dtype of the y-data arrays. Can be set via set_dtype, which
forces it to be float or complex.
Methods
-------
__call__
_prepare_x
_finish_y
_reshape_yi
_set_yi
_set_dtype
_evaluate
"""
__slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
def __init__(self, xi=None, yi=None, axis=None):
self._y_axis = axis
self._y_extra_shape = None
self.dtype = None
if yi is not None:
self._set_yi(yi, xi=xi, axis=axis)
def __call__(self, x):
"""
Evaluate the interpolant
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate(x)
return self._finish_y(y, x_shape)
def _evaluate(self, x):
"""
Actually evaluate the value of the interpolator.
"""
raise NotImplementedError()
def _prepare_x(self, x):
"""Reshape input x array to 1-D"""
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
# Cast integers etc to floats
x = x.astype(float)
x_shape = x.shape
return x.ravel(), x_shape
def _finish_y(self, y, x_shape):
"""Reshape interpolated y back to n-d array similar to initial y"""
y = y.reshape(x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = (list(range(nx, nx + self._y_axis))
+ list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
y = y.transpose(s)
return y
def _reshape_yi(self, yi, check=False):
yi = np.rollaxis(np.asarray(yi), self._y_axis)
if check and yi.shape[1:] != self._y_extra_shape:
ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:],
self._y_extra_shape[:-self._y_axis])
raise ValueError("Data must be of shape %s" % ok_shape)
return yi.reshape((yi.shape[0], -1))
def _set_yi(self, yi, xi=None, axis=None):
if axis is None:
axis = self._y_axis
if axis is None:
raise ValueError("no interpolation axis specified")
yi = np.asarray(yi)
shape = yi.shape
if shape == ():
shape = (1,)
if xi is not None and shape[axis] != len(xi):
raise ValueError("x and y arrays must be equal in length along "
"interpolation axis.")
self._y_axis = (axis % yi.ndim)
self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:]
self.dtype = None
self._set_dtype(yi.dtype)
def _set_dtype(self, dtype, union=False):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.dtype, np.complexfloating):
self.dtype = np.complex_
else:
if not union or self.dtype != np.complex_:
self.dtype = np.float_
class _Interpolator1DWithDerivatives(_Interpolator1D):
def derivatives(self, x, der=None):
"""
Evaluate many derivatives of the polynomial at the point x
Produce an array of all derivative values at the point x.
Parameters
----------
x : array-like
Point or points at which to evaluate the derivatives
der : None or integer
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points). This number includes the function value as 0th
derivative.
Returns
-------
d : ndarray
Array with derivatives; d[j] contains the j-th derivative.
Shape of d[j] is determined by replacing the interpolation
axis in the original array with the shape of x.
Examples
--------
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
array([1.0,2.0,3.0])
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
array([[1.0,1.0],
[2.0,2.0],
[3.0,3.0]])
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der)
y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+ list(range(1,nx+1)) +
list(range(nx+1+self._y_axis, nx+ny+1)))
y = y.transpose(s)
return y
def derivative(self, x, der=1):
"""
Evaluate one derivative of the polynomial at the point x
Parameters
----------
x : array-like
Point or points at which to evaluate the derivatives
der : integer, optional
Which derivative to extract. This number includes the
function value as 0th derivative.
Returns
-------
d : ndarray
Derivative interpolated at the x-points. Shape of d is
determined by replacing the interpolation axis in the
original array with the shape of x.
Notes
-----
This is computed by evaluating all derivatives up to the desired
one (using self.derivatives()) and then discarding the rest.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der+1)
return self._finish_y(y[der], x_shape)
class KroghInterpolator(_Interpolator1DWithDerivatives):
"""
Interpolating polynomial for a set of points.
The polynomial passes through all the pairs (xi,yi). One may
additionally specify a number of derivatives at each point xi;
this is done by repeating the value xi and specifying the
derivatives as successive yi values.
Allows evaluation of the polynomial and all its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Parameters
----------
xi : array-like, length N
Known x-coordinates. Must be sorted in increasing order.
yi : array-like
Known y-coordinates. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on [1]_.
References
----------
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation", 1970.
Examples
--------
To produce a polynomial that is zero at 0 and 1 and has
derivative 2 at 0, call
>>> KroghInterpolator([0,0,1],[0,2,0])
This constructs the quadratic 2*X**2-2*X. The derivative condition
is indicated by the repeated zero in the xi array; the corresponding
yi values are 0, the function value, and 2, the derivative value.
For another example, given xi, yi, and a derivative ypi for each
point, appropriate arrays can be constructed as:
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
>>> KroghInterpolator(xi_k, yi_k)
To produce a vector-valued polynomial, supply a higher-dimensional
array for yi:
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
"""
def __init__(self, xi, yi, axis=0):
_Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)
self.xi = np.asarray(xi)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
c = np.zeros((self.n+1, self.r), dtype=self.dtype)
c[0] = self.yi[0]
Vk = np.zeros((self.n, self.r), dtype=self.dtype)
for k in xrange(1,self.n):
s = 0
while s <= k and xi[k-s] == xi[k]:
s += 1
s -= 1
Vk[0] = self.yi[k]/float(factorial(s))
for i in xrange(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s == 0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def _evaluate(self, x):
pi = 1
p = np.zeros((len(x), self.r), dtype=self.dtype)
p += self.c[0,np.newaxis,:]
for k in range(1, self.n):
w = x - self.xi[k-1]
pi = w*pi
p += pi[:,np.newaxis] * self.c[k]
return p
def _evaluate_derivatives(self, x, der=None):
n = self.n
r = self.r
if der is None:
der = self.n
pi = np.zeros((n, len(x)))
w = np.zeros((n, len(x)))
pi[0] = 1
p = np.zeros((len(x), self.r))
p += self.c[0,np.newaxis,:]
for k in xrange(1,n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1]*pi[k-1]
p += pi[k,:,np.newaxis]*self.c[k]
cn = np.zeros((max(der,n+1), len(x), r), dtype=self.dtype)
cn[:n+1,:,:] += self.c[:n+1,np.newaxis,:]
cn[0] = p
for k in xrange(1,n):
for i in xrange(1,n-k+1):
pi[i] = w[k+i-1]*pi[i-1]+pi[i]
cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]
cn[k] *= factorial(k)
cn[n,:,:] = 0
return cn[:der]
def krogh_interpolate(xi,yi,x,der=0,axis=0):
"""
Convenience function for polynomial interpolation.
See `KroghInterpolator` for more details.
Parameters
----------
xi : array_like
Known x-coordinates.
yi : array_like
Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
vectors of length R, or scalars if R=1.
x : array_like
Point or points at which to evaluate the derivatives.
der : int or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be the number of derivatives by N by R.
If `x` is a scalar, the middle dimension will be dropped; if
the `yi` are scalars then the last dimension will be dropped.
See Also
--------
KroghInterpolator
Notes
-----
Construction of the interpolating polynomial is a relatively expensive
process. If you want to evaluate it repeatedly consider using the class
KroghInterpolator (which is what this function uses).
"""
P = KroghInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
"""
Estimate the Taylor polynomial of f at x by polynomial fitting.
Parameters
----------
f : callable
The function whose Taylor polynomial is sought. Should accept
a vector of `x` values.
x : scalar
The point at which the polynomial is to be evaluated.
degree : int
The degree of the Taylor polynomial
scale : scalar
The width of the interval to use to evaluate the Taylor polynomial.
Function values spread over a range this wide are used to fit the
polynomial. Must be chosen carefully.
order : int or None, optional
The order of the polynomial to be used in the fitting; `f` will be
evaluated ``order+1`` times. If None, use `degree`.
Returns
-------
p : poly1d instance
The Taylor polynomial (translated to the origin, so that
for example p(0)=f(x)).
Notes
-----
The appropriate choice of "scale" is a trade-off; too large and the
function differs from its Taylor polynomial too much to get a good
answer, too small and round-off errors overwhelm the higher-order terms.
The algorithm used becomes numerically unstable around order 30 even
under ideal circumstances.
Choosing order somewhat larger than degree may improve the higher-order
terms.
"""
if order is None:
order = degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(_Interpolator1D):
"""The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points.
Allows evaluation of the polynomial, efficient changing of the y
values to be interpolated, and updating by adding more x values.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial.
The values yi need to be provided before the function is
evaluated, but none of the preprocessing depends on them, so rapid
updates are possible.
Parameters
----------
xi : array-like
1-d array of x coordinates of the points the polynomial
should pass through
yi : array-like
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later via the `set_y` method.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
This class uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
"""
def __init__(self, xi, yi=None, axis=0):
_Interpolator1D.__init__(self, xi, yi, axis)
self.xi = np.asarray(xi)
self.set_yi(yi)
self.n = len(self.xi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in xrange(1,self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def set_yi(self, yi, axis=None):
"""
Update the y values to be interpolated
The barycentric interpolation algorithm requires the calculation
of weights, but these depend only on the xi. The yi can be changed
at any time.
Parameters
----------
yi : array_like
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
"""
if yi is None:
self.yi = None
return
self._set_yi(yi, xi=self.xi, axis=axis)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
def add_xi(self, xi, yi=None):
"""
Add more x values to the set to be interpolated
The barycentric interpolation algorithm allows easy updating by
adding more points for the polynomial to pass through.
Parameters
----------
xi : array_like
The x coordinates of the points that the polynomial should pass
through.
yi : array_like, optional
The y coordinates of the points the polynomial should pass through.
Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
vector-valued.
If `yi` is not given, the y values will be supplied later. `yi` should
be given if and only if the interpolator has y values specified.
"""
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = self._reshape_yi(yi, check=True)
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi **= -1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in xrange(old_n,self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def __call__(self, x):
"""Evaluate the interpolating polynomial at the points x
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Currently the code computes an outer product between x and the
weights, that is, it constructs an intermediate array of size
N by len(x), where N is the degree of the polynomial.
"""
return _Interpolator1D.__call__(self, x)
def _evaluate(self, x):
if x.size == 0:
p = np.zeros((0, self.r), dtype=self.dtype)
else:
c = x[...,np.newaxis]-self.xi
z = c == 0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[...,np.newaxis]
# Now fix where x==some xi
r = np.nonzero(z)
if len(r) == 1: # evaluation at a scalar
if len(r[0]) > 0: # equals one of the points
p = self.yi[r[0][0]]
else:
p[r[:-1]] = self.yi[r[-1]]
return p
def barycentric_interpolate(xi, yi, x, axis=0):
"""
Convenience function for polynomial interpolation.
Constructs a polynomial that passes through a given set of points,
then evaluates the polynomial. For reasons of numerical stability,
this function does not compute the coefficients of the polynomial.
This function uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the `x` coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Parameters
----------
xi : array_like
1-d array of x coordinates of the points the polynomial should
pass through
yi : array_like
The y coordinates of the points the polynomial should pass through.
x : scalar or array_like
Points to evaluate the interpolator at.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
y : scalar or array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
See Also
--------
BarycentricInterpolator
Notes
-----
Construction of the interpolation weights is a relatively slow process.
If you want to call this many times with the same xi (but possibly
varying yi or x) you should use the class `BarycentricInterpolator`.
This is what this function uses internally.
"""
return BarycentricInterpolator(xi, yi, axis=axis)(x)
class PiecewisePolynomial(_Interpolator1DWithDerivatives):
"""Piecewise polynomial curve specified by points and derivatives
This class represents a curve that is a piecewise polynomial. It
passes through a list of points and has specified derivatives at
each point. The degree of the polynomial may vary from segment to
segment, as may the number of derivatives available. The degree
should not exceed about thirty.
Appending points to the end of the curve is efficient.
Parameters
----------
xi : array-like
a sorted 1-d array of x-coordinates
yi : array-like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i] (for axis=0)
orders : list of integers, or integer
a list of polynomial orders, or a single universal order
direction : {None, 1, -1}
indicates whether the xi are increasing or decreasing
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two xi
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
If orders is None, or orders[i] is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If orders[i] is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
"""
def __init__(self, xi, yi, orders=None, direction=None, axis=0):
_Interpolator1DWithDerivatives.__init__(self, axis=axis)
warnings.warn('PiecewisePolynomial is deprecated in scipy 0.14. '
'Use BPoly.from_derivatives instead.',
category=DeprecationWarning)
if axis != 0:
try:
yi = np.asarray(yi)
except ValueError:
raise ValueError("If yi is a list, then axis must be 0")
preslice = ((slice(None,None,None),) * (axis % yi.ndim))
slice0 = preslice + (0,)
slice1 = preslice + (slice(1, None, None),)
else:
slice0 = 0
slice1 = slice(1, None, None)
yi0 = np.asarray(yi[slice0])
self._set_yi(yi0)
self.xi = [xi[0]]
self.yi = [self._reshape_yi(yi0)]
self.n = 1
self.r = np.prod(self._y_extra_shape, dtype=np.int64)
self.direction = direction
self.orders = []
self.polynomials = []
self.extend(xi[1:],yi[slice1],orders)
def _make_polynomial(self,x1,y1,x2,y2,order,direction):
"""Construct the interpolating polynomial object
Deduces the number of derivatives to match at each end
from order and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
"""
n = order+1
n1 = min(n//2,len(y1))
n2 = min(n-n1,len(y2))
n1 = min(n-n2,len(y1))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g has %d derivatives, but order %d requested" % (x1, len(y1), x2, len(y2), order))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with length y1 or y2.")
xi = np.zeros(n)
yi = np.zeros((n, self.r), dtype=self.dtype)
xi[:n1] = x1
yi[:n1] = y1[:n1].reshape((n1, self.r))
xi[n1:] = x2
yi[n1:] = y2[:n2].reshape((n2, self.r))
return KroghInterpolator(xi,yi,axis=0)
def append(self, xi, yi, order=None):
"""
Append a single point with derivatives to the PiecewisePolynomial
Parameters
----------
xi : float
Input
yi : array_like
`yi` is the list of derivatives known at `xi`
order : integer or None
a polynomial order, or instructions to use the highest
possible order
"""
yi = self._reshape_yi(yi, check=True)
self._set_dtype(yi.dtype, union=True)
if self.direction is None:
self.direction = np.sign(xi-self.xi[-1])
elif (xi-self.xi[-1])*self.direction < 0:
raise ValueError("x coordinates must be in the %d direction: %s" % (self.direction, self.xi))
self.xi.append(xi)
self.yi.append(yi)
if order is None:
n1 = len(self.yi[-2])
n2 = len(self.yi[-1])
n = n1+n2
order = n-1
self.orders.append(order)
self.polynomials.append(self._make_polynomial(
self.xi[-2], self.yi[-2],
self.xi[-1], self.yi[-1],
order, self.direction))
self.n += 1
def extend(self, xi, yi, orders=None):
"""
Extend the PiecewisePolynomial by a list of points
Parameters
----------
xi : array_like
A sorted list of x-coordinates.
yi : list of lists of length N1
``yi[i]`` (if ``axis == 0``) is the list of derivatives known
at ``xi[i]``.
orders : int or list of ints
A list of polynomial orders, or a single universal order.
direction : {None, 1, -1}
Indicates whether the `xi` are increasing or decreasing.
+1 indicates increasing
-1 indicates decreasing
None indicates that it should be deduced from the first two `xi`.
"""
if self._y_axis == 0:
# allow yi to be a ragged list
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[i],orders)
else:
self.append(xi[i],yi[i],orders[i])
else:
preslice = (slice(None,None,None),) * self._y_axis
for i in xrange(len(xi)):
if orders is None or _isscalar(orders):
self.append(xi[i],yi[preslice + (i,)],orders)
else:
self.append(xi[i],yi[preslice + (i,)],orders[i])
def _evaluate(self, x):
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos](x)
else:
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = np.zeros((m, self.r), dtype=self.dtype)
if y.size > 0:
for i in xrange(self.n-1):
c = pos == i
y[c] = self.polynomials[i](x[c])
return y
def _evaluate_derivatives(self, x, der=None):
if der is None and self.polynomials:
der = self.polynomials[0].n
if _isscalar(x):
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = self.polynomials[pos].derivatives(x,der=der)
else:
m = len(x)
pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)
y = np.zeros((der,m,self.r), dtype=self.dtype)
if y.size > 0:
for i in xrange(self.n-1):
c = pos == i
y[:,c] = self.polynomials[i].derivatives(x[c],der=der)
return y
def piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0,axis=0):
"""
Convenience function for piecewise polynomial interpolation.
Parameters
----------
xi : array_like
A sorted list of x-coordinates.
yi : list of lists
``yi[i]`` is the list of derivatives known at ``xi[i]``.
x : scalar or array_like
Coordinates at which to evalualte the polynomial.
orders : int or list of ints, optional
A list of polynomial orders, or a single universal order.
der : int or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the `yi` array corresponding to the x-coordinate values.
Returns
-------
y : ndarray
Interpolated values or derivatives. If multiple derivatives
were requested, these are given along the first axis.
See Also
--------
PiecewisePolynomial
Notes
-----
If `orders` is None, or ``orders[i]`` is None, then the degree of the
polynomial segment is exactly the degree required to match all i
available derivatives at both endpoints. If ``orders[i]`` is not None,
then some derivatives will be ignored. The code will try to use an
equal number of derivatives from each end; if the total number of
derivatives needed is odd, it will prefer the rightmost endpoint. If
not enough derivatives are available, an exception is raised.
Construction of these piecewise polynomials can be an expensive process;
if you repeatedly evaluate the same polynomial, consider using the class
PiecewisePolynomial (which is what this function does).
"""
P = PiecewisePolynomial(xi, yi, orders, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
| maciejkula/scipy | scipy/interpolate/polyint.py | Python | bsd-3-clause | 32,302 |
import urllib
def command_oraakkeli(bot, user, channel, args):
"""Asks a question from the oracle (http://www.lintukoto.net/viihde/oraakkeli/)"""
if not args: return
args = urllib.quote_plus(args)
answer = getUrl("http://www.lintukoto.net/viihde/oraakkeli/index.php?kysymys=%s&html=0" % args).getContent()
answer = unicode(answer)
answer = answer.encode("utf-8")
return bot.say(channel, "Oraakkeli vastaa: %s" % answer)
| nigeljonez/newpyfibot | modules/module_oraakkeli.py | Python | bsd-3-clause | 459 |
import datetime
from django.db import models, IntegrityError
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse as django_reverse
from django.utils.http import urlquote
from django.conf import settings
from tower import ugettext as _, ugettext_lazy as _lazy
from mptt.models import MPTTModel
from spark.urlresolvers import reverse, absolute_url
from spark.helpers import urlparams
from spark.models import City
from sharing import utils as sharing_utils
from sharing.messages import TWITTER_BADGE_MSG, FACEBOOK_BADGE_MSG
from challenges.models import Challenge
from challenges import utils
class Profile(models.Model):
user = models.OneToOneField(User, primary_key=True)
# Game progress
level = models.PositiveIntegerField(default=1)
challenges = models.ManyToManyField(Challenge, through='CompletedChallenge')
new_challenges = models.BooleanField(default=False)
# Boost 1/2
boost1_completed = models.BooleanField(default=False)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
major_city = models.ForeignKey(City, blank=True, null=True)
city_name = models.CharField(max_length=255, blank=True, null=True)
country_code = models.CharField(max_length=2, blank=True, null=True)
us_state = models.CharField(max_length=2, blank=True, null=True)
# Boost 2/2
boost2_completed = models.BooleanField(default=False)
no_parent = models.BooleanField(default=True)
parent_username = models.CharField(max_length=30, blank=True, null=True)
date_boost2_localtime = models.DateTimeField(blank=True, null=True)
# Flags
login_desktop = models.BooleanField(default=False)
login_mobile = models.BooleanField(default=False)
is_non_android = models.BooleanField(default=False)
# Personal stats
longest_chain = models.PositiveIntegerField(default=0)
def __unicode__(self):
return unicode(self.user)
def get_absolute_url(self):
return reverse('desktop.user', args=[self.user.username])
@property
def generic_sharing_url(self):
url = urlparams(django_reverse('desktop.user', args=[self.user.username]))
return absolute_url(url)
def _social_sharing_url(self, service):
# django_reverse used instead of reverse because we don't want a locale preprended to sharing links.
url = urlparams(django_reverse('desktop.user', args=[self.user.username]),
f=service)
return absolute_url(url)
@property
def twitter_sharing_url(self):
return self._social_sharing_url('t')
@property
def facebook_sharing_url(self):
return self._social_sharing_url('fb')
@property
def poster_sharing_url(self):
return self._social_sharing_url('p')
@property
def badges(self):
"""Returns a list of dicts used for badge list rendering.
They represent all badges earned by the user in the Spark game.
"""
badges = []
completed_challenges = CompletedChallenge.objects.filter(profile=self,
date_badge_earned__isnull=False)
for cc in completed_challenges:
badge_id = utils.get_challenge_id(cc.challenge.level, cc.challenge.number)
badge_description = cc.challenge.badge_description
badges.append({
'id': badge_id,
'name': cc.challenge.badge_name,
'description': badge_description,
'date_earned': cc.date_badge_earned,
'new': cc.new_badge,
'twitter_msg': urlquote(unicode(TWITTER_BADGE_MSG % {'badge_name':cc.challenge.badge_name, 'short_url':''})),
'facebook_msg': urlquote(unicode(FACEBOOK_BADGE_MSG % {'badge_name':cc.challenge.badge_name})),
'facebook_img': absolute_url(settings.MEDIA_URL+'img/badges/fb/'+badge_id.replace('_','-')+'.png'),
'facebook_desc': urlquote(badge_description)
})
return badges
def has_badge(self, badge_id):
"""Returns whether this user has earned the given badge."""
if badge_id:
return CompletedChallenge.objects.filter(profile=self, challenge__pk=badge_id,
date_badge_earned__isnull=False).count() == 1
else:
return False
@property
def total_badges_earned(self):
"""Returns the total number of badges earned by the user.
Doesn't include hidden unlocked badges from an upper level.
"""
return CompletedChallenge.objects.filter(profile=self, date_badge_earned__isnull=False).count()
def get_home_location(self, locale):
"""Returns a string containing the location determined by Google Location Services
when Boost your Spark 1/2 was completed by the user.
"""
from geo.countries import countries
if self.country_code and locale in countries:
country = countries[locale][self.country_code.lower()]
return '%s, %s' % (self.city_name, country)
else:
return ''
@property
def spark_started_with(self):
if self.parent_username is not None:
return self.parent_username
return ''
@property
def most_recent_share(self):
"""Most recent share stat displayed on desktop dashboard/user pages."""
from stats.models import SharingHistory
share = SharingHistory.objects.filter(parent=self)[:1]
if share:
return share[0].date_shared
else:
return None
@property
def shares_over_time(self):
"""Aggregate data of Spark shares since the start of the campaign.
Used by the 'shares over time' diagram in the user dashboard.
"""
from stats.models import SharingHistory
return SharingHistory.get_shares_over_time(self)
@property
def sparked_countries(self):
"""List of countries this user has shared their Spark with."""
from .utils import user_node
countries = set()
node = user_node(self.user)
for child in node.get_children():
cc = child.user.profile.country_code
if cc:
countries.add(cc.lower())
return list(countries)
@property
def total_shares(self):
"""Total shares stat displayed on desktop dashboard/user pages."""
from stats.models import SharingHistory
return SharingHistory.objects.filter(parent=self).count()
@property
def challenge_info(self):
"""Returns a list of dicts containing level/challenge completion information.
Used to render both desktop and mobile collapsing challenge lists.
"""
return utils.get_profile_levels(self)
@property
def new_challenge_count(self):
"""Returns the number of newly available challenges in the user's current level."""
if self.new_challenges:
challenge_count = utils.CHALLENGE_COUNT_PER_LVL[self.level-1]
completed_challenge_count = len(CompletedChallenge.objects.filter(profile=self,
challenge__level=self.level))
return challenge_count - completed_challenge_count
else:
return 0
@property
def new_badge_count(self):
"""Returns the number of recently earned badges."""
return len([b for b in self.badges if b['new']])
@property
def qr_code_download(self):
"""Returns the URL of a QR code which, when scanned, points to: https://[domain]/download?f=qr&user=[username]
"""
url = absolute_url(urlparams(django_reverse('sharing.download'), user=self.user.username))
return sharing_utils.url2qr(url)
@property
def continent_code(self):
from geo.continents import countries_continents
code = ''
if self.country_code:
code = countries_continents[self.country_code]
return code
@property
def total_countries_sparked(self):
"""Returns the total number of countries where the user's children are located."""
return len(self.sparked_countries)
@property
def total_continents_sparked(self):
"""Returns the total number of continents where the user's children are located."""
from geo.continents import countries_continents
from .utils import user_node
continents = set()
node = user_node(self.user)
for child in node.get_children():
cc = child.user.profile.country_code
if cc:
continents.add(countries_continents[cc])
return len(continents)
@property
def children_profiles(self):
"""Returns a list of profiles of the user's children in the user tree."""
from .utils import user_node
return [child.user.profile for child in user_node(self.user).get_children()]
def clear_new_badges(self):
"""Clears notifications of recently earned badges."""
CompletedChallenge.objects.filter(profile=self, new_badge=True).update(new_badge=False)
def clear_new_challenges(self):
"""Clears notifications of new available challenges."""
self.new_challenges = False
self.save()
def complete_challenges(self, challenges):
"""Helper method to easily save the completion of given challenges for this user."""
from stats.models import GlobalStats
error = False
if challenges:
for challenge in challenges:
try:
# If the completed challenge is from an upper level and not an easter egg, we keep the badge hidden.
# This is done by setting the date_badge_earned to NULL.
date = None if self.level < challenge.level and not challenge.easter_egg else datetime.datetime.now()
CompletedChallenge.objects.create(profile=self, challenge=challenge, date_badge_earned=date,
# Don't set new_badge to True if the badge is hidden.
new_badge=date is not None)
GlobalStats.increment_total_badges()
except IntegrityError:
# Challenge was already completed by another concurrent 'update_completed_challenges' task.
# In this case, fail silently.
pass
def trigger_multisparker_badge(self):
from challenges.tasks import update_completed_challenges
if self.login_desktop and self.login_mobile:
update_completed_challenges(self.user.id)
def update_ancestors_longest_chain(self):
"""Updates 'longest chain' stat of all ancestors of this user when relevant.
Used after Boost step 2 confirmation so that all users involved have their longest chain stat updated.
"""
from .utils import user_node
ancestors = user_node(self.user).get_ancestors()
chain_length = len(ancestors)
for profile in (ancestor.user.profile for ancestor in ancestors):
if profile.longest_chain < chain_length:
profile.longest_chain = chain_length
profile.save()
chain_length -= 1
def add_city_shares_for_children(self):
"""Creates city shares in the CitySharingHistory for the global visualization.
This is useful when a user already has children when he completes boost 1 (geolocation).
As soon as it's completed, city shares are created for all geolocated children.
"""
from stats.models import CitySharingHistory
for child in self.children_profiles:
if child.boost1_completed:
CitySharingHistory.add_share_from_profiles(self, child)
# Retrieves or creates a Profile automatically whenever the profile property is accessed
User.profile = property(lambda u: Profile.objects.get_or_create(user=u)[0])
class CompletedChallenge(models.Model):
"""Mapping table for challenge completion and badge awarding."""
challenge = models.ForeignKey(Challenge)
profile = models.ForeignKey(Profile, db_index=True)
date_completed = models.DateTimeField(auto_now_add=True)
date_badge_earned = models.DateTimeField(blank=True, null=True)
new_badge = models.BooleanField(default=False)
class Meta:
unique_together = ('challenge', 'profile')
def __unicode__(self):
return "%s <-> %s" % (self.profile, self.challenge)
class UserNode(MPTTModel):
"""
Represents a user in the Spark sharing hierarchy.
This model is mainly used for storing chains of shares as user trees.
"""
user = models.OneToOneField(User, related_name='node', db_index=True)
parent = models.ForeignKey('self', default=None, blank=True, null=True,
related_name='children')
class Meta:
db_table='users_tree'
class MPTTMeta:
pass
def __unicode__(self):
return unicode(self.user)
| mozilla/spark | apps/users/models.py | Python | bsd-3-clause | 13,725 |
# -*- coding: utf-8 -*-
default_app_config = 'allink_apps.locations.apps.AllinkLocationsConfig'
__version__ = '0.0.1'
| allink/allink-apps | locations/__init__.py | Python | bsd-3-clause | 118 |
import subprocess
import os
import sys
import commands
import numpy as np
import pyroms
import pyroms_toolbox
from remap_bio_woa import remap_bio_woa
from remap_bio_glodap import remap_bio_glodap
data_dir_woa = '/archive/u1/uaf/kate/COBALT/'
data_dir_glodap = '/archive/u1/uaf/kate/COBALT/'
dst_dir='./'
src_grd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('/archive/u1/uaf/kate/COBALT/GFDL_CM2.1_grid.nc', name='ESM2M_NWGOA3')
dst_grd = pyroms.grid.get_ROMS_grid('NWGOA3')
# define all tracer stuff
list_tracer = ['alk', 'cadet_arag', 'cadet_calc', 'dic', 'fed', 'fedet', 'fedi', 'felg', 'fesm', 'ldon', 'ldop', 'lith', 'lithdet', 'nbact', 'ndet', 'ndi', 'nlg', 'nsm', 'nh4', 'no3', 'o2', 'pdet', 'po4', 'srdon', 'srdop', 'sldon', 'sldop', 'sidet', 'silg', 'sio4', 'nsmz', 'nmdz', 'nlgz']
tracer_longname = ['Alkalinity', 'Detrital CaCO3', 'Detrital CaCO3', 'Dissolved Inorganic Carbon', 'Dissolved Iron', 'Detrital Iron', 'Diazotroph Iron', 'Large Phytoplankton Iron', 'Small Phytoplankton Iron', 'labile DON', 'labile DOP', 'Lithogenic Aluminosilicate', 'lithdet', 'bacterial', 'ndet', 'Diazotroph Nitrogen', 'Large Phytoplankton Nitrogen', 'Small Phytoplankton Nitrogen', 'Ammonia', 'Nitrate', 'Oxygen', 'Detrital Phosphorus', 'Phosphate', 'Semi-Refractory DON', 'Semi-Refractory DOP', 'Semilabile DON', 'Semilabile DOP', 'Detrital Silicon', 'Large Phytoplankton Silicon', 'Silicate', 'Small Zooplankton Nitrogen', 'Medium-sized zooplankton Nitrogen', 'large Zooplankton Nitrogen']
tracer_units = ['mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'g/kg', 'g/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg']
#------- WOA13 ---------------------------------
id_tracer_update_woa = [19,20,22,29]
list_tracer_update_woa = []
tracer_longname_update_woa = []
tracer_units_update_woa = []
for idtra in id_tracer_update_woa:
print list_tracer[idtra]
for idtra in id_tracer_update_woa:
# add to tracer update
list_tracer_update_woa.append(list_tracer[idtra])
tracer_longname_update_woa.append(tracer_longname[idtra])
tracer_units_update_woa.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_woa)):
ctra = list_tracer_update_woa[ktr]
if ctra == 'sio4':
ctra = 'si'
mydict = {'tracer':list_tracer_update_woa[ktr],'longname':tracer_longname_update_woa[ktr],'units':tracer_units_update_woa[ktr],'file':data_dir_woa + ctra + '_WOA13-CM2.1_monthly.nc', \
'frame':mm}
remap_bio_woa(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_woa[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
#--------- GLODAP -------------------------------
id_tracer_update_glodap = [0,3]
list_tracer_update_glodap = []
tracer_longname_update_glodap = []
tracer_units_update_glodap = []
for idtra in id_tracer_update_glodap:
print list_tracer[idtra]
for idtra in id_tracer_update_glodap:
# add to tracer update
list_tracer_update_glodap.append(list_tracer[idtra])
tracer_longname_update_glodap.append(tracer_longname[idtra])
tracer_units_update_glodap.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_glodap)):
ctra = list_tracer_update_glodap[ktr]
mydict = {'tracer':list_tracer_update_glodap[ktr],'longname':tracer_longname_update_glodap[ktr],'units':tracer_units_update_glodap[ktr],'file':data_dir_glodap + ctra + '_GLODAP-ESM2M_annual.nc', \
'frame':mm}
remap_bio_glodap(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_glodap[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
| kshedstrom/pyroms | examples/cobalt-preproc/Clim_bio/make_clim_file_bio_addons.py | Python | bsd-3-clause | 4,460 |
# # # # # # # # # # # # # #
# CAPTAINHOOK IDENTIFIER #
# # # # # # # # # # # # # #
from .utils import bash, filter_python_files
DEFAULT = 'off'
CHECK_NAME = 'frosted'
NO_FROSTED_MSG = (
"frosted is required for the frosted plugin.\n"
"`pip install frosted` or turn it off in your tox.ini file.")
REQUIRED_FILES = ['tox.ini']
def run(files, temp_folder):
"Check frosted errors in the code base."
try:
import frosted # NOQA
except ImportError:
return NO_FROSTED_MSG
py_files = filter_python_files(files)
cmd = 'frosted {0}'.format(' '.join(py_files))
return bash(cmd).value()
| Friz-zy/captainhook | captainhook/checkers/frosted_checker.py | Python | bsd-3-clause | 629 |
# -*- coding: utf-8 -*-
from unittest import TestCase
import logging
log = logging.getLogger(__name__)
from formencode import validators, Invalid
from nose.tools import *
import formencode
from coregae.widget.field import *
class TestBasefield(TestCase):
def test_basefield(self):
"""
Test for functions of BaseFields
"""
c = {'id':'AB1234', 'size':20}
bf = TextField(name = 'foo', args = c)
body = bf.render_body()
assert_true( 'name = "foo"' in body)
assert_true( 'id = "AB1234"' in body)
body = bf.render_body(value = 'VALUESTRING')
assert_true("VALUESTRING" in body)
bf.title = 'THETITLE'
assert_true("THETITLE" in bf.get_title())
def test_validate(self):
"""
Test for MediaHandler, validation
"""
v = validators
c = {'id':'AB1234', 'size':20}
bf = TextField(name = 'foo', args = c, validator = v.Int())
assert_equal(bf.validate('1')[0], 1)
assert_equal(bf.validate('A')[0], None)
# test for multiple validators
bf = TextField(name = 'foo', args = c,
validator = ( v.Int(), v.OneOf([1, 2, 3]) ))
assert_equal(bf.validate('1')[0], 1)
assert_equal(bf.validate(2)[0], 2)
assert_equal(bf.validate('A')[0], None)
assert_equal(bf.validate('4')[0], None)
assert_equal(bf.validate(10)[0], None)
bf = TextField(name = 'foo', args = c,
validator = ( v.Int(), v.OneOf([1, 2, 3]), ),
required = True)
r = bf.validate('')
assert_equal(r[0], None)
def test_fields(self):
"""
Test for functions of subclass of BaseField
"""
c = {'id':'AB1234', 'size':20}
tf = TextField(name = 'foo', args = c)
body = tf.render_body()
assert_true( 'name = "foo"' in body)
assert_true( 'id = "AB1234"' in body)
hf = HiddenField(name = 'foo', args = c, default = 'defoo')
body = hf.render_body(value = 'VALUESTRING')
assert_true("VALUESTRING" in body)
body = hf.render_body()
assert_true("defoo" in body)
rf = RadioField(name = 'foo', args = c,
values = (('vfoo', 'v1'), ('vbar', 'v2')))
body = rf.render_body()
for v in ('vfoo', 'vbar'):
assert_true(">%s<" % v in body)
for v in ('v1', 'v2'):
assert_true("value = '%s'" % v in body)
assert_true("checked" not in body)
body = rf.render_body(value = 'v2')
assert_true("checked" in body)
cg = CheckboxGroup(name = 'foo', args = c,
values = (('vfoo', 'v1'), ('vbar', 1)))
body = cg.render_body()
for v in ('vfoo', 'vbar'):
assert_true(">%s<" % v in body)
for v in ('v1', '1'):
assert_true('value = "%s"' % v in body)
for v in ('v1', '1'):
assert_true('name = "foo_%s"' % v in body)
assert_true("checked" not in body)
body = cg.render_body(value = 'v1')
assert_true("checked" in body)
body = cg.render_body(value = [1])
assert_true("checked" in body)
v = validators
cg2 = CheckboxGroup(name = 'foo', args = c,
values = (('vfoo', 'v1'), ('vbar', 'v2')),
validator = v.Int())
t = cg2.validate({'foo_v1':'1', 'foo_v2':'a', 'foo_g3':'b'})
assert_equal(t[0][1], None)
assert_true(isinstance(t[0][2], Invalid))
t = cg2.validate({'foo_v1':'1', 'foo_v2':'2', 'foo_g3':'b'})
assert_equal(sorted(t[0][1]), [1, 2])
assert_equal(t[0][2], None)
sf = SelectField(name = 'foo', args = c,
values = (('vfoo', 'v1'), ('vbar', 'v2')))
body = sf.render_body()
for v in ('vfoo', 'vbar'):
assert_true("> %s </option>" % v in body)
for v in ('v1', 'v2'):
assert_true('value = "%s"' % v in body)
assert_true("selected" not in body)
body = sf.render_body(value = 'v2')
assert_true("selected" in body)
cf = CheckboxField(name = 'foo', args = c)
body = cf.render_body()
assert_true('name = "foo"' in body)
cf = CheckboxField(name = 'foo')
body = cf.render_body(value = True)
assert_true("checked" in body)
tf = TextArea(name = 'foo', args = c)
body = tf.render_body()
assert_true('name = "foo"' in body)
body = tf.render_body(value = 'this is body<body>')
assert_true(">this is body<body><" in body)
rt = RichText(name = 'foo', args = c)
assert_equal(len(rt.get_objects()), 1)
assert_equal(len(rt.get_object_tag()), 1)
ff = FileField(name = 'foo')
body = ff.render_body()
assert_true('type = "file"' in body)
assert_false('disabled' in body)
body = ff.render_body('bar')
assert_true(ff.REPLACE_PREFIX+'foo' in body)
imgf = ImageField(name = 'foo')
body = imgf.render_body(value = 'path/to/image')
assert_true("path/to/image" in body)
tf = TextField(name = 'foo', args = c, default = 'bar')
body = tf.render_body()
assert_true( 'value = "bar"' in body)
| Letractively/aha-gae | aha/widget/tests/test_fields.py | Python | bsd-3-clause | 5,382 |
import optparse
from os import curdir
from os.path import abspath
import sys
from autoscalebot.tasks import start_autoscaler
from autoscalebot import version
def main(args=sys.argv[1:]):
CLI_ROOT = abspath(curdir)
sys.path.insert(0, CLI_ROOT)
parser = optparse.OptionParser(
usage="%prog or type %prog -h (--help) for help",
version=version
)
parser.add_option("--settings",
dest="settings",
default=None,
type="string",
help='settings to use when autoscaling')
options, args = parser.parse_args()
if options.settings:
settings = __import__(options.settings)
start_autoscaler(settings=settings)
| wieden-kennedy/autoscalebot | autoscalebot/cli.py | Python | bsd-3-clause | 747 |
AuthorizedException = (
BufferError,
ArithmeticError,
AssertionError,
AttributeError,
EnvironmentError,
EOFError,
LookupError,
MemoryError,
ReferenceError,
RuntimeError,
SystemError,
TypeError,
ValueError
)
| ainafp/nilearn | nilearn/_utils/exceptions.py | Python | bsd-3-clause | 311 |
# Hierarchical Agglomerative Cluster Analysis
#
# Copyright (C) 2013 Folgert Karsdorp
# Author: Folgert Karsdorp <[email protected]>
# URL: <https://github.com/fbkarsdorp/HAC-python>
# For licence information, see LICENCE.TXT
import numpy
from numpy import dot, sqrt
def binarize_vector(u):
return u > 0
def cosine_distance(u, v, binary=False):
"""Return the cosine distance between two vectors."""
if binary:
return cosine_distance_binary(u, v)
return 1.0 - dot(u, v) / (sqrt(dot(u, u)) * sqrt(dot(v, v)))
def cosine_distance_binary(u, v):
u = binarize_vector(u)
v = binarize_vector(v)
return (1.0 * (u * v).sum()) / numpy.sqrt((u.sum() * v.sum()))
def euclidean_distance(u, v):
"""Return the euclidean distance between two vectors."""
diff = u - v
return sqrt(dot(diff, diff))
def cityblock_distance(u, v):
"""Return the Manhattan/City Block distance between two vectors."""
return abs(u-v).sum()
def canberra_distance(u, v):
"""Return the canberra distance between two vectors."""
return numpy.sum(abs(u-v) / abs(u+v))
def correlation(u, v):
"""Return the correlation distance between two vectors."""
u_var = u - u.mean()
v_var = v - v.mean()
return 1.0 - dot(u_var, v_var) / (sqrt(dot(u_var, u_var)) *
sqrt(dot(v_var, v_var)))
def dice(u, v):
"""Return the dice coefficient between two vectors."""
u = u > 0
v = v > 0
return (2.0 * (u * v).sum()) / (u.sum() + v.sum())
def jaccard_distance(u, v):
"""return jaccard distance"""
u = numpy.asarray(u)
v = numpy.asarray(v)
return (numpy.double(numpy.bitwise_and((u != v),
numpy.bitwise_or(u != 0, v != 0)).sum())
/ numpy.double(numpy.bitwise_or(u != 0, v != 0).sum()))
def jaccard(u, v):
"""Return the Jaccard coefficient between two vectors."""
u = u > 0
v = v > 0
return (1.0 * (u * v).sum()) / (u + v).sum()
| mikekestemont/PyStyl | pystyl/clustering/distance.py | Python | bsd-3-clause | 1,971 |
# -*- coding: utf-8 -*-
import json
from unittest import mock
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749 import errors
from oauthlib.oauth2.rfc6749.grant_types import (
ResourceOwnerPasswordCredentialsGrant,
)
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from tests.unittest import TestCase
class ResourceOwnerPasswordCredentialsGrantTest(TestCase):
def setUp(self):
mock_client = mock.MagicMock()
mock_client.user.return_value = 'mocked user'
self.request = Request('http://a.b/path')
self.request.grant_type = 'password'
self.request.username = 'john'
self.request.password = 'doe'
self.request.client = mock_client
self.request.scopes = ('mocked', 'scopes')
self.mock_validator = mock.MagicMock()
self.auth = ResourceOwnerPasswordCredentialsGrant(
request_validator=self.mock_validator)
def set_client(self, request, *args, **kwargs):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_create_token_response(self):
bearer = BearerToken(self.mock_validator)
headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertIn('refresh_token', token)
# ensure client_authentication_required() is properly called
self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
# fail client authentication
self.mock_validator.reset_mock()
self.mock_validator.validate_user.return_value = True
self.mock_validator.authenticate_client.return_value = False
status_code = self.auth.create_token_response(self.request, bearer)[2]
self.assertEqual(status_code, 401)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
# mock client_authentication_required() returning False then fail
self.mock_validator.reset_mock()
self.mock_validator.client_authentication_required.return_value = False
self.mock_validator.authenticate_client_id.return_value = False
status_code = self.auth.create_token_response(self.request, bearer)[2]
self.assertEqual(status_code, 401)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
def test_create_token_response_without_refresh_token(self):
# self.auth.refresh_token = False so we don't generate a refresh token
self.auth = ResourceOwnerPasswordCredentialsGrant(
request_validator=self.mock_validator, refresh_token=False)
bearer = BearerToken(self.mock_validator)
headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
# ensure no refresh token is generated
self.assertNotIn('refresh_token', token)
# ensure client_authentication_required() is properly called
self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
# fail client authentication
self.mock_validator.reset_mock()
self.mock_validator.validate_user.return_value = True
self.mock_validator.authenticate_client.return_value = False
status_code = self.auth.create_token_response(self.request, bearer)[2]
self.assertEqual(status_code, 401)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
# mock client_authentication_required() returning False then fail
self.mock_validator.reset_mock()
self.mock_validator.client_authentication_required.return_value = False
self.mock_validator.authenticate_client_id.return_value = False
status_code = self.auth.create_token_response(self.request, bearer)[2]
self.assertEqual(status_code, 401)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
def test_custom_auth_validators_unsupported(self):
authval1, authval2 = mock.Mock(), mock.Mock()
expected = ('ResourceOwnerPasswordCredentialsGrant does not '
'support authorization validators. Use token '
'validators instead.')
with self.assertRaises(ValueError) as caught:
ResourceOwnerPasswordCredentialsGrant(self.mock_validator,
pre_auth=[authval1])
self.assertEqual(caught.exception.args[0], expected)
with self.assertRaises(ValueError) as caught:
ResourceOwnerPasswordCredentialsGrant(self.mock_validator,
post_auth=[authval2])
self.assertEqual(caught.exception.args[0], expected)
with self.assertRaises(AttributeError):
self.auth.custom_validators.pre_auth.append(authval1)
with self.assertRaises(AttributeError):
self.auth.custom_validators.pre_auth.append(authval2)
def test_custom_token_validators(self):
tknval1, tknval2 = mock.Mock(), mock.Mock()
self.auth.custom_validators.pre_token.append(tknval1)
self.auth.custom_validators.post_token.append(tknval2)
bearer = BearerToken(self.mock_validator)
self.auth.create_token_response(self.request, bearer)
self.assertTrue(tknval1.called)
self.assertTrue(tknval2.called)
def test_error_response(self):
pass
def test_scopes(self):
pass
def test_invalid_request_missing_params(self):
del self.request.grant_type
self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request,
self.request)
def test_invalid_request_duplicates(self):
request = mock.MagicMock(wraps=self.request)
request.duplicate_params = ['scope']
self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request,
request)
def test_invalid_grant_type(self):
self.request.grant_type = 'foo'
self.assertRaises(errors.UnsupportedGrantTypeError,
self.auth.validate_token_request, self.request)
def test_invalid_user(self):
self.mock_validator.validate_user.return_value = False
self.assertRaises(errors.InvalidGrantError, self.auth.validate_token_request,
self.request)
def test_client_id_missing(self):
del self.request.client.client_id
self.assertRaises(NotImplementedError, self.auth.validate_token_request,
self.request)
def test_valid_token_request(self):
self.mock_validator.validate_grant_type.return_value = True
self.auth.validate_token_request(self.request)
| idan/oauthlib | tests/oauth2/rfc6749/grant_types/test_resource_owner_password.py | Python | bsd-3-clause | 7,233 |
import typing
import anyio
from starlette.requests import Request
from starlette.responses import Response, StreamingResponse
from starlette.types import ASGIApp, Receive, Scope, Send
RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]
DispatchFunction = typing.Callable[
[Request, RequestResponseEndpoint], typing.Awaitable[Response]
]
class BaseHTTPMiddleware:
def __init__(self, app: ASGIApp, dispatch: DispatchFunction = None) -> None:
self.app = app
self.dispatch_func = self.dispatch if dispatch is None else dispatch
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] != "http":
await self.app(scope, receive, send)
return
async def call_next(request: Request) -> Response:
app_exc: typing.Optional[Exception] = None
send_stream, recv_stream = anyio.create_memory_object_stream()
async def coro() -> None:
nonlocal app_exc
async with send_stream:
try:
await self.app(scope, request.receive, send_stream.send)
except Exception as exc:
app_exc = exc
task_group.start_soon(coro)
try:
message = await recv_stream.receive()
except anyio.EndOfStream:
if app_exc is not None:
raise app_exc
raise RuntimeError("No response returned.")
assert message["type"] == "http.response.start"
async def body_stream() -> typing.AsyncGenerator[bytes, None]:
async with recv_stream:
async for message in recv_stream:
assert message["type"] == "http.response.body"
yield message.get("body", b"")
if app_exc is not None:
raise app_exc
response = StreamingResponse(
status_code=message["status"], content=body_stream()
)
response.raw_headers = message["headers"]
return response
async with anyio.create_task_group() as task_group:
request = Request(scope, receive=receive)
response = await self.dispatch_func(request, call_next)
await response(scope, receive, send)
task_group.cancel_scope.cancel()
async def dispatch(
self, request: Request, call_next: RequestResponseEndpoint
) -> Response:
raise NotImplementedError() # pragma: no cover
| encode/starlette | starlette/middleware/base.py | Python | bsd-3-clause | 2,630 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUCTranslationMap(NURESTObject):
""" Represents a CTranslationMap in the VSD
Notes:
1:1 mapping of customer private IPs in customer domain to customer alias (public) IPs in provider domain and N:1 mapping to customer alias SPAT IP in the provider domain.
"""
__rest_name__ = "ctranslationmap"
__resource_name__ = "ctranslationmaps"
## Constants
CONST_MAPPING_TYPE_PAT = "PAT"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_MAPPING_TYPE_NAT = "NAT"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a CTranslationMap instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> ctranslationmap = NUCTranslationMap(id=u'xxxx-xxx-xxx-xxx', name=u'CTranslationMap')
>>> ctranslationmap = NUCTranslationMap(data=my_dict)
"""
super(NUCTranslationMap, self).__init__()
# Read/Write Attributes
self._mapping_type = None
self._last_updated_by = None
self._entity_scope = None
self._associated_domain_id = None
self._customer_alias_ip = None
self._customer_ip = None
self._external_id = None
self.expose_attribute(local_name="mapping_type", remote_name="mappingType", attribute_type=str, is_required=True, is_unique=False, choices=[u'NAT', u'PAT'])
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="associated_domain_id", remote_name="associatedDomainID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="customer_alias_ip", remote_name="customerAliasIP", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="customer_ip", remote_name="customerIP", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def mapping_type(self):
""" Get mapping_type value.
Notes:
NAT for 1:1 mapping or PAT for *:1 mappings.
This attribute is named `mappingType` in VSD API.
"""
return self._mapping_type
@mapping_type.setter
def mapping_type(self, value):
""" Set mapping_type value.
Notes:
NAT for 1:1 mapping or PAT for *:1 mappings.
This attribute is named `mappingType` in VSD API.
"""
self._mapping_type = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def associated_domain_id(self):
""" Get associated_domain_id value.
Notes:
Domain associated to this address mapping.
This attribute is named `associatedDomainID` in VSD API.
"""
return self._associated_domain_id
@associated_domain_id.setter
def associated_domain_id(self, value):
""" Set associated_domain_id value.
Notes:
Domain associated to this address mapping.
This attribute is named `associatedDomainID` in VSD API.
"""
self._associated_domain_id = value
@property
def customer_alias_ip(self):
""" Get customer_alias_ip value.
Notes:
Customer public IP in the provider domain.
This attribute is named `customerAliasIP` in VSD API.
"""
return self._customer_alias_ip
@customer_alias_ip.setter
def customer_alias_ip(self, value):
""" Set customer_alias_ip value.
Notes:
Customer public IP in the provider domain.
This attribute is named `customerAliasIP` in VSD API.
"""
self._customer_alias_ip = value
@property
def customer_ip(self):
""" Get customer_ip value.
Notes:
Customer private IP in the customer domain.
This attribute is named `customerIP` in VSD API.
"""
return self._customer_ip
@customer_ip.setter
def customer_ip(self, value):
""" Set customer_ip value.
Notes:
Customer private IP in the customer domain.
This attribute is named `customerIP` in VSD API.
"""
self._customer_ip = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| nuagenetworks/vspk-python | vspk/v5_0/nuctranslationmap.py | Python | bsd-3-clause | 9,274 |
from __future__ import unicode_literals
from os.path import abspath, basename, dirname, join, normpath
from sys import path
import markdown
"""Common settings and globals."""
# PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(SITE_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
# END PATH CONFIGURATION
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# END DATABASE CONFIGURATION
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Sao_Paulo'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# See: https://docs.djangoproject.com/en/1.6/ref/settings/#locale-paths
LOCALE_PATHS = (
join(SITE_ROOT, 'locale'),
)
# END GENERAL CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = r"%wg@#2myqx)@0p3b%(h1deucjrka2+%kqb*ze^37m0+_f-wxr)"
# END SECRET CONFIGURATION
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# END SITE CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
# END FIXTURE CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
# END TEMPLATE CONFIGURATION
# MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'kb.middleware.KnowledgeMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.settings.urls' % SITE_NAME
# END URL CONFIGURATION
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'taggit',
'haystack',
'crispy_forms',
'djangosecure',
'kb',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'example',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# END LOGGING CONFIGURATION
# AUTHENTICATION CONFIGURATION
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-LOGIN_REDIRECT_URL
LOGIN_REDIRECT_URL = '/'
# END AUTHENTICATION CONFIGURATION
# THIRD PARTY CONFIGURATION
# http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# See: http://south.readthedocs.org/en/latest/installation.html#configuring-your-django-installation
INSTALLED_APPS += (
# Database migration helpers:
'south',
)
SOUTH_MIGRATION_MODULES = {
'taggit': 'taggit.south_migrations',
}
KNOWLEDGE_SETTINGS = {
'DEFAULT_SEARCH_FORM_CLASS': 'example.forms.ExampleSearchForm',
}
MARKUP_FIELD_TYPES = (
('markdown', markdown.markdown),
)
# END THIRD PARTY CONFIGURATION
| eliostvs/django-kb-example | example/settings/settings/base.py | Python | bsd-3-clause | 7,689 |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from json import loads
from collections import defaultdict
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import r_client
from qiita_db.util import generate_analyses_list_per_study
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_db.exceptions import QiitaDBColumnError
from qiita_db.processing_job import ProcessingJob
from qiita_pet.handlers.api_proxy.util import check_access
SAMPLE_TEMPLATE_KEY_FORMAT = 'sample_template_%s'
def _check_sample_template_exists(samp_id):
"""Make sure a sample template exists in the system
Parameters
----------
samp_id : int or str castable to int
SampleTemplate id to check
Returns
-------
dict
{'status': status,
'message': msg}
"""
if not SampleTemplate.exists(int(samp_id)):
return {'status': 'error',
'message': 'Sample template %d does not exist' % int(samp_id)
}
return {'status': 'success',
'message': ''}
def sample_template_get_req(samp_id, user_id):
"""Gets the json of the full sample template
Parameters
----------
samp_id : int or int castable string
SampleTemplate id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict of objects
{'status': status,
'message': msg,
'template': dict of {str: {str: object, ...}, ...}
template is dictionary where the keys access_error the metadata samples
and the values are a dictionary of column and value.
Format {sample: {column: value, ...}, ...}
"""
exists = _check_sample_template_exists(int(samp_id))
if exists['status'] != 'success':
return exists
access_error = check_access(int(samp_id), user_id)
if access_error:
return access_error
template = SampleTemplate(int(samp_id))
access_error = check_access(template.study_id, user_id)
if access_error:
return access_error
df = template.to_dataframe()
return {'status': 'success',
'message': '',
'template': df.to_dict(orient='index')}
def sample_template_samples_get_req(samp_id, user_id):
"""Returns list of samples in the sample template
Parameters
----------
samp_id : int or str typecastable to int
SampleTemplate id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict
Returns summary information in the form
{'status': str,
'message': str,
'samples': list of str}
samples is list of samples in the template
"""
exists = _check_sample_template_exists(int(samp_id))
if exists['status'] != 'success':
return exists
access_error = check_access(samp_id, user_id)
if access_error:
return access_error
return {'status': 'success',
'message': '',
'samples': sorted(x for x in SampleTemplate(int(samp_id)))
}
def sample_template_meta_cats_get_req(samp_id, user_id):
"""Returns list of metadata categories in the sample template
Parameters
----------
samp_id : int or str typecastable to int
SampleTemplate id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict
Returns information in the form
{'status': str,
'message': str,
'categories': list of str}
samples is list of metadata categories in the template
"""
exists = _check_sample_template_exists(int(samp_id))
if exists['status'] != 'success':
return exists
access_error = check_access(samp_id, user_id)
if access_error:
return access_error
return {'status': 'success',
'message': '',
'categories': sorted(SampleTemplate(int(samp_id)).categories)
}
def sample_template_category_get_req(category, samp_id, user_id):
"""Returns dict of values for each sample in the given category
Parameters
----------
category : str
Metadata category to get values for
samp_id : int or str typecastable to int
SampleTemplate id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict
Returns information in the form
{'status': str,
'message': str,
'values': dict of {str: object}}
"""
exists = _check_sample_template_exists(int(samp_id))
if exists['status'] != 'success':
return exists
access_error = check_access(samp_id, user_id)
if access_error:
return access_error
st = SampleTemplate(int(samp_id))
try:
values = st.get_category(category)
except QiitaDBColumnError:
return {'status': 'error',
'message': 'Category %s does not exist in sample template' %
category}
return {'status': 'success',
'message': '',
'values': values}
def analyses_associated_with_study(study_id, user_id):
"""Returns all available analyses in study_id
Parameters
----------
study_id : int or str typecastable to int
Study id to get info for
user_id : str
User requesting the sample template info
Returns
-------
dict
Returns information in the form
{'status': str,
'message': str,
'values': list of [qiita_db.analysis.Analysis,
prep_ids for this study]}
"""
access_error = check_access(study_id, user_id)
if access_error:
return access_error
values = generate_analyses_list_per_study(study_id)
return {'status': 'success',
'message': '',
'values': values}
def get_sample_template_processing_status(st_id):
# Initialize variables here
processing = False
alert_type = ''
alert_msg = ''
job_info = r_client.get(SAMPLE_TEMPLATE_KEY_FORMAT % st_id)
if job_info:
job_info = defaultdict(lambda: '', loads(job_info))
job_id = job_info['job_id']
job = ProcessingJob(job_id)
job_status = job.status
processing = job_status not in ('success', 'error')
if processing:
alert_type = 'info'
alert_msg = 'This sample template is currently being processed'
elif job_status == 'error':
alert_type = 'danger'
alert_msg = job.log.msg.replace('\n', '</br>')
else:
alert_type = job_info['alert_type']
alert_msg = job_info['alert_msg'].replace('\n', '</br>')
return processing, alert_type, alert_msg
@execute_as_transaction
def sample_template_filepaths_get_req(study_id, user_id):
"""Returns all the filepaths attached to the sample template
Parameters
----------
study_id : int
The current study object id
user_id : str
The current user object id
Returns
-------
dict
Filepaths in the form
{'status': status,
'message': msg,
'filepaths': filepaths}
status can be success, warning, or error depending on result
message has the warnings or errors
filepaths is a list of tuple of int and str
All files in the sample template, as [(id, URL), ...]
"""
exists = _check_sample_template_exists(int(study_id))
if exists['status'] != 'success':
return exists
access_error = check_access(study_id, user_id)
if access_error:
return access_error
try:
template = SampleTemplate(int(study_id))
except QiitaDBUnknownIDError as e:
return {'status': 'error',
'message': str(e)}
return {'status': 'success',
'message': '',
'filepaths': template.get_filepaths()
}
| antgonza/qiita | qiita_pet/handlers/api_proxy/sample_template.py | Python | bsd-3-clause | 8,355 |
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
"""
Model for testing arithmetic expressions.
"""
from django.db import models
@python_2_unicode_compatible
class Number(models.Model):
integer = models.BigIntegerField(db_column='the_integer')
float = models.FloatField(null=True, db_column='the_float')
def __str__(self):
return '%i, %.3f' % (self.integer, self.float)
class Experiment(models.Model):
name = models.CharField(max_length=24)
assigned = models.DateField()
completed = models.DateField()
start = models.DateTimeField()
end = models.DateTimeField()
class Meta:
ordering = ('name',)
def duration(self):
return self.end - self.start
| ericholscher/django | tests/expressions_regress/models.py | Python | bsd-3-clause | 766 |
from django.contrib import admin
from achievs.models import Achievement
# from achievs.models import Gold
# from achievs.models import Silver
# from achievs.models import Bronze
# from achievs.models import Platinum
from achievs.models import Level
# class PlatinumInline(admin.StackedInline):
# model=Platinum
# class GoldInline(admin.StackedInline):
# model=Gold
# class SilverInline(admin.StackedInline):
# model=Silver
# class BronzeInline(admin.StackedInline):
# model=Bronze
class LevelInline(admin.StackedInline):
model=Level
class AchievementAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name']}),
('Date information', {'fields': ['pub_date']}),
]
#inlines=[GoldInline, SilverInline, BronzeInline, PlatinumInline]
inlines=[LevelInline]
list_display = ('name', 'pub_date')
list_filter=['pub_date']
search_fields=['name']
date_hierarchy='pub_date'
# admin.site.register(Gold)
# admin.site.register(Silver)
# admin.site.register(Bronze)
# admin.site.register(Platinum)
admin.site.register(Level)
admin.site.register(Achievement, AchievementAdmin) | eawerbaneth/Scoreboard | achievs/admin.py | Python | bsd-3-clause | 1,116 |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
import pandas.core.common as com
class TestSample:
@pytest.fixture(params=[Series, DataFrame])
def obj(self, request):
klass = request.param
if klass is Series:
arr = np.random.randn(10)
else:
arr = np.random.randn(10, 10)
return klass(arr, dtype=None)
@pytest.mark.parametrize("test", list(range(10)))
def test_sample(self, test, obj):
# Fixes issue: 2419
# Check behavior of random_state argument
# Check for stability when receives seed or random state -- run 10
# times.
seed = np.random.randint(0, 100)
tm.assert_equal(
obj.sample(n=4, random_state=seed), obj.sample(n=4, random_state=seed)
)
tm.assert_equal(
obj.sample(frac=0.7, random_state=seed),
obj.sample(frac=0.7, random_state=seed),
)
tm.assert_equal(
obj.sample(n=4, random_state=np.random.RandomState(test)),
obj.sample(n=4, random_state=np.random.RandomState(test)),
)
tm.assert_equal(
obj.sample(frac=0.7, random_state=np.random.RandomState(test)),
obj.sample(frac=0.7, random_state=np.random.RandomState(test)),
)
tm.assert_equal(
obj.sample(frac=2, replace=True, random_state=np.random.RandomState(test)),
obj.sample(frac=2, replace=True, random_state=np.random.RandomState(test)),
)
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(obj.sample(n=4))
os2.append(obj.sample(frac=0.7))
tm.assert_equal(*os1)
tm.assert_equal(*os2)
def test_sample_lengths(self, obj):
# Check lengths are right
assert len(obj.sample(n=4) == 4)
assert len(obj.sample(frac=0.34) == 3)
assert len(obj.sample(frac=0.36) == 4)
def test_sample_invalid_random_state(self, obj):
# Check for error when random_state argument invalid.
msg = (
"random_state must be an integer, array-like, a BitGenerator, Generator, "
"a numpy RandomState, or None"
)
with pytest.raises(ValueError, match=msg):
obj.sample(random_state="a_string")
def test_sample_wont_accept_n_and_frac(self, obj):
# Giving both frac and N throws error
msg = "Please enter a value for `frac` OR `n`, not both"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, frac=0.3)
def test_sample_requires_positive_n_frac(self, obj):
with pytest.raises(
ValueError,
match="A negative number of rows requested. Please provide `n` >= 0",
):
obj.sample(n=-3)
with pytest.raises(
ValueError,
match="A negative number of rows requested. Please provide `frac` >= 0",
):
obj.sample(frac=-0.3)
def test_sample_requires_integer_n(self, obj):
# Make sure float values of `n` give error
with pytest.raises(ValueError, match="Only integers accepted as `n` values"):
obj.sample(n=3.2)
def test_sample_invalid_weight_lengths(self, obj):
# Weight length must be right
msg = "Weights and axis to be sampled must be of same length"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError, match=msg):
bad_weights = [0.5] * 11
obj.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError, match="Fewer non-zero entries in p than size"):
bad_weight_series = Series([0, 0, 0.2])
obj.sample(n=4, weights=bad_weight_series)
def test_sample_negative_weights(self, obj):
# Check won't accept negative weights
bad_weights = [-0.1] * 10
msg = "weight vector many not include negative values"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=bad_weights)
def test_sample_inf_weights(self, obj):
# Check inf and -inf throw errors:
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
msg = "weight vector may not include `inf` values"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=weights_with_inf)
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=weights_with_ninf)
def test_sample_zero_weights(self, obj):
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
obj.sample(n=3, weights=zero_weights)
def test_sample_missing_weights(self, obj):
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
obj.sample(n=3, weights=nan_weights)
def test_sample_none_weights(self, obj):
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
tm.assert_equal(
obj.sample(n=1, axis=0, weights=weights_with_None), obj.iloc[5:6]
)
@pytest.mark.parametrize(
"func_str,arg",
[
("np.array", [2, 3, 1, 0]),
("np.random.MT19937", 3),
("np.random.PCG64", 11),
],
)
def test_sample_random_state(self, func_str, arg, frame_or_series):
# GH#32503
obj = DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
if frame_or_series is Series:
obj = obj["col1"]
result = obj.sample(n=3, random_state=eval(func_str)(arg))
expected = obj.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
tm.assert_equal(result, expected)
def test_sample_generator(self, frame_or_series):
# GH#38100
obj = frame_or_series(np.arange(100))
rng = np.random.default_rng()
# Consecutive calls should advance the seed
result1 = obj.sample(n=50, random_state=rng)
result2 = obj.sample(n=50, random_state=rng)
assert not (result1.index.values == result2.index.values).all()
# Matching generator initialization must give same result
# Consecutive calls should advance the seed
result1 = obj.sample(n=50, random_state=np.random.default_rng(11))
result2 = obj.sample(n=50, random_state=np.random.default_rng(11))
tm.assert_equal(result1, result2)
def test_sample_upsampling_without_replacement(self, frame_or_series):
# GH#27451
obj = DataFrame({"A": list("abc")})
if frame_or_series is Series:
obj = obj["A"]
msg = (
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
with pytest.raises(ValueError, match=msg):
obj.sample(frac=2, replace=False)
class TestSampleDataFrame:
# Tests which are relevant only for DataFrame, so these are
# as fully parametrized as they can get.
def test_sample(self):
# GH#2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = DataFrame(
{
"col1": range(10, 20),
"col2": range(20, 30),
"colString": ["a"] * 10,
"easyweights": easy_weight_list,
}
)
sample1 = df.sample(n=1, weights="easyweights")
tm.assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series or
# DataFrame with axis = 1.
ser = Series(range(10))
msg = "Strings cannot be passed as weights when sampling from a Series."
with pytest.raises(ValueError, match=msg):
ser.sample(n=3, weights="weight_column")
msg = (
"Strings can only be passed to weights when sampling from rows on a "
"DataFrame"
)
with pytest.raises(ValueError, match=msg):
df.sample(n=1, weights="weight_column", axis=1)
# Check weighting key error
with pytest.raises(
KeyError, match="'String passed to weights not a valid column'"
):
df.sample(n=3, weights="not_a_real_column_name")
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = DataFrame({"col1": range(10), "col2": ["a"] * 10})
second_column_weight = [0, 1]
tm.assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]]
)
# Different axis arg types
tm.assert_frame_equal(
df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]]
)
weight = [0] * 10
weight[5] = 0.5
tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6])
tm.assert_frame_equal(
df.sample(n=1, axis="index", weights=weight), df.iloc[5:6]
)
# Check out of range axis values
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis=2)
msg = "No axis named not_a_name for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis="not_a_name")
ser = Series(range(10))
with pytest.raises(ValueError, match="No axis named 1 for object type Series"):
ser.sample(n=1, axis=1)
# Test weight length compared to correct axis
msg = "Weights and axis to be sampled must be of same length"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis=1, weights=[0.5] * 10)
def test_sample_axis1(self):
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = DataFrame(
{"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
)
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
tm.assert_frame_equal(sample1, df[["colString"]])
# Test default axes
tm.assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)
)
def test_sample_aligns_weights_with_frame(self):
# Test that function aligns weights with frame
df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3])
ser = Series([1, 0, 0], index=[3, 5, 9])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser))
# Weights have index values to be dropped because not in
# sampled DataFrame
ser2 = Series([0.001, 0, 10000], index=[3, 5, 10])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser2))
# Weights have empty values to be filed with zeros
ser3 = Series([0.01, 0], index=[3, 5])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser3))
# No overlap in weight and sampled DataFrame indices
ser4 = Series([1, 0], index=[1, 2])
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
df.sample(1, weights=ser4)
def test_sample_is_copy(self):
# GH#27357, GH#30784: ensure the result of sample is an actual copy and
# doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
df = DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
df2 = df.sample(3)
with tm.assert_produces_warning(None):
df2["d"] = 1
def test_sample_ignore_index(self):
# GH 38581
df = DataFrame(
{"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
)
result = df.sample(3, ignore_index=True)
expected_index = Index([0, 1, 2])
tm.assert_index_equal(result.index, expected_index)
| gfyoung/pandas | pandas/tests/frame/methods/test_sample.py | Python | bsd-3-clause | 12,586 |
from functools import reduce
class ScopedString (object):
def __init__ (self):
self._stack = []
def push (self, frame):
self._stack.append (frame)
def pop (self):
frame = self._stack.pop()
return frame
def __str__ (self):
return '.'.join (self._stack)
class ScopedList (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push()
def push (self):
self._stack.append ([])
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
self._stack.pop()
def append (self, val):
self._stack[-1].append (val)
def _normalize (self):
return reduce (lambda x, y: x + y, self._stack, [])
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
class ScopedDict (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push ()
def push (self):
self._stack.insert (0, {})
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
temp = self._stack[0]
del (self._stack[0])
return temp
def _normalize (self):
normal = {}
for frame in self._stack:
for key, value in frame.items():
if key not in normal:
normal[key] = value
return normal
def __getitem__ (self, key):
for frame in self._stack:
if key in frame:
return frame[key]
raise KeyError (key)
def __setitem__ (self, key, value):
self._stack[0][key] = value
def __contains__ (self, key):
for frame in self._stack:
if key in frame:
return True
return False
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
def items (self):
return self._normalize().items()
def keys (self):
return self._normalize().keys()
def values (self):
return self._normalize().values()
| doffm/dbuf | src/dbuf/util.py | Python | bsd-3-clause | 3,108 |
import os
from sublime import active_window
from sublime import find_resources
from sublime import load_settings
from sublime import save_settings
import sublime_plugin
def _load_preferences():
return load_settings('Preferences.sublime-settings')
def _save_preferences():
return save_settings('Preferences.sublime-settings')
class ClearWindowCommand(sublime_plugin.WindowCommand):
def run(self):
if self.window.is_sidebar_visible():
self.window.set_sidebar_visible(False)
if self.window.is_minimap_visible():
self.window.set_minimap_visible(False)
if self.window.is_menu_visible():
self.window.set_menu_visible(False)
if self.window.is_status_bar_visible():
self.window.set_status_bar_visible(False)
self.window.run_command('resize_groups_almost_equally')
preferences = _load_preferences()
preferences.set('indent_guide_options', [])
preferences.set('line_numbers', False)
preferences.set('draw_white_space', 'selection')
preferences.set('rulers', [])
_save_preferences()
self.window.run_command('sort_user_settings')
class EnableColorSchemeCommand(sublime_plugin.ApplicationCommand):
def run(self):
self.color_schemes = []
for color_scheme in find_resources('*.tmTheme'):
ignore = False
for exclude in ['(SL)', 'Color Highlighter', 'tests']:
if exclude in color_scheme:
ignore = True
if not ignore:
self.color_schemes.append(color_scheme)
if len(self.color_schemes) > 1:
color_scheme = _load_preferences().get('color_scheme')
if color_scheme not in self.color_schemes:
self.color_schemes.insert(0, color_scheme)
self.window = active_window()
self.window.show_quick_panel(
self.color_schemes,
self.on_done,
0,
self.color_schemes.index(color_scheme),
self.on_select
)
def on_select(self, index):
if index == -1:
return
color_scheme = self.color_schemes[index]
for group in range(0, self.window.num_groups()):
active_view_in_group = self.window.active_view_in_group(group)
if active_view_in_group:
active_view_in_group.settings().set('color_scheme', color_scheme)
def on_done(self, index):
if index == -1:
for view in self.window.views():
view.settings().erase('color_scheme')
return
color_scheme = self.color_schemes[index]
preferences = _load_preferences()
preferences.set('color_scheme', color_scheme)
_save_preferences()
for view in self.window.views():
view.settings().erase('color_scheme')
class EnableThemeCommand(sublime_plugin.ApplicationCommand):
def run(self):
self.themes = []
for theme in find_resources('*.sublime-theme'):
ignore = False
for exclude in ['Addon', 'tests']:
if exclude in theme:
ignore = True
if not ignore:
self.themes.append(os.path.basename(theme))
if len(self.themes) > 1:
active_window().show_quick_panel(self.themes, self.on_done)
def on_done(self, index):
if index == -1:
return
theme = self.themes[index]
preferences = _load_preferences()
preferences.set('theme', theme)
_save_preferences()
class OverlayOpenFileCommand(sublime_plugin.WindowCommand):
"""Open File; Inspired by Vim CtrlP (https://kien.github.io/ctrlp.vim)."""
def run(self, tab=None, split=None, vsplit=None):
"""
Open file from overlay.
:param tab:
Open the selected file in a new tab
:param split:
Open the selected file in a horizontal split
:param vsplit:
Open the selected file in a vertical split
Defaults to opening in a new tab.
"""
transient_view = self.window.transient_view_in_group(self.window.active_group())
if not transient_view:
return
fname = transient_view.file_name()
if not fname:
return
if vsplit:
self.open_file_in_vertical_split(fname)
elif split:
self.open_file_in_horizontal_split(fname)
elif tab:
self.open_file_in_tab(fname)
else:
self.open_file_in_tab(fname)
self.window.run_command('hide_overlay')
def is_enabled(self):
view = self.window.active_view()
if view:
return bool(view.settings().get('polyfill.experimental_features'))
return False
def open_file_in_vertical_split(self, fname):
self.window.open_file(fname)
self.window.run_command('create_pane_with_file', {'direction': 'right'})
def open_file_in_horizontal_split(self, fname):
self.window.open_file(fname)
self.window.run_command('create_pane_with_file', {'direction': 'down'})
def open_file_in_tab(self, fname):
self.window.open_file(fname)
class PolyfillSetLayoutCommand(sublime_plugin.WindowCommand):
def run(self, cols, rows, cells):
num_groups_before = self.window.num_groups()
active_group_before = self.window.active_group()
self.window.run_command('set_layout', {
'cols': cols,
'rows': rows,
'cells': cells
})
if num_groups_before == self.window.num_groups():
# Fix issue where group focus moves when it probably shouldn't.
# When the layout is not changed then the focus shouldn't change
# either. Previously, if the active view before the layout change
# is transient ST would move the cursor focus to a group with a
# non-transient view. This can be disorienting and interrupt flow
# because where the cursor focus has moved to is not always clear.
self.window.focus_group(active_group_before)
return
if len(self.window.views_in_group(active_group_before)) < 2:
# Only move the active view before layout change to the new group
# if it doesn't leave the previous group without any views.
return
view = self.window.active_view_in_group(active_group_before)
self.window.set_view_index(view, self.window.active_group(), 0)
class ResetWindowCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command('reset_font_size')
view = self.window.active_view()
font_size = view.settings().get('font_size_default') if view else None
if font_size:
preferences = _load_preferences()
preferences.set('font_size', font_size)
_save_preferences()
if not self.window.is_sidebar_visible():
self.window.set_sidebar_visible(True)
if not self.window.is_minimap_visible():
self.window.set_minimap_visible(True)
if not self.window.is_menu_visible():
self.window.set_menu_visible(True)
if not self.window.is_status_bar_visible():
self.window.set_status_bar_visible(True)
self.window.run_command('resize_groups_almost_equally')
class ResizeGroupsAlmostEquallyCommand(sublime_plugin.WindowCommand):
"""
Resize groups equally.
Make all groups (almost) equally high and wide, but use 'winheight' and
'winwidth' for the current window. Windows with 'winfixheight' set keep
their height and windows with 'winfixwidth' set keep their width.
@xxx winheight option
@xxx winwidth option
@xxx winfixheight option
@xxx winfixwidth option
"""
def run(self):
layout = self.window.layout()
col_count = len(layout['cols'])
row_count = len(layout['rows'])
def equalise(count):
size = round(1.0 / (count - 1), 2)
vals = [0.0]
for i in range(1, count - 1):
vals.append(round(size * i, 2))
vals.append(1.0)
return vals
if col_count > 2:
layout['cols'] = equalise(col_count)
if row_count > 2:
layout['rows'] = equalise(row_count)
if col_count > 2 or row_count > 2:
self.window.set_layout(layout)
| gerardroche/sublime-polyfill | ui.py | Python | bsd-3-clause | 8,584 |
from datetime import timedelta
import unittest
import mock
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core import mail
from django.core.paginator import Paginator
from django.db.models.signals import pre_delete, post_delete
from django.utils import timezone
from wagtail.tests.testapp.models import (
SimplePage, EventPage, EventPageCarouselItem,
StandardIndex, StandardChild,
BusinessIndex, BusinessChild, BusinessSubIndex,
TaggedPage, Advert, AdvertPlacement)
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Page, PageRevision
from wagtail.wagtailcore.signals import page_published, page_unpublished
from wagtail.wagtailusers.models import UserProfile
def submittable_timestamp(timestamp):
"""
Helper function to translate a possibly-timezone-aware datetime into the format used in the
go_live_at / expire_at form fields - "YYYY-MM-DD hh:mm", with no timezone indicator.
This will be interpreted as being in the server's timezone (settings.TIME_ZONE), so we
need to pass it through timezone.localtime to ensure that the client and server are in
agreement about what the timestamp means.
"""
return str(timezone.localtime(timestamp)).split('.')[0]
class TestPageExplorer(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(
title="Hello world!",
slug="hello-world",
)
self.root_page.add_child(instance=self.child_page)
# Login
self.login()
def test_explore(self):
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(self.root_page, response.context['parent_page'])
self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.child_page.id).exists())
def test_explore_root(self):
response = self.client.get(reverse('wagtailadmin_explore_root'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(Page.objects.get(id=1), response.context['parent_page'])
self.assertTrue(response.context['pages'].paginator.object_list.filter(id=self.root_page.id).exists())
def test_ordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'content_type'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'content_type')
def test_invalid_ordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'invalid_order'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], '-latest_revision_created_at')
def test_reordering(self):
response = self.client.get(reverse('wagtailadmin_explore_root'), {'ordering': 'ord'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
self.assertEqual(response.context['ordering'], 'ord')
# Pages must not be paginated
self.assertNotIsInstance(response.context['pages'], Paginator)
def make_pages(self):
for i in range(150):
self.root_page.add_child(instance=SimplePage(
title="Page " + str(i),
slug="page-" + str(i),
))
def test_pagination(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got the correct page
self.assertEqual(response.context['pages'].number, 2)
def test_pagination_invalid(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 'Hello World!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got page one
self.assertEqual(response.context['pages'].number, 1)
def test_pagination_out_of_range(self):
self.make_pages()
response = self.client.get(reverse('wagtailadmin_explore', args=(self.root_page.id, )), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/index.html')
# Check that we got the last page
self.assertEqual(response.context['pages'].number, response.context['pages'].paginator.num_pages)
class TestPageCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
def test_add_subpage(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.root_page.id, )))
self.assertEqual(response.status_code, 200)
def test_add_subpage_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get add subpage page
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.root_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_add_subpage_nonexistantparent(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(100000, )))
self.assertEqual(response.status_code, 404)
def test_create_simplepage(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="#content" class="active">Content</a>')
self.assertContains(response, '<a href="#promote" class="">Promote</a>')
def test_create_page_without_promote_tab(self):
"""
Test that the Promote tab is not rendered for page classes that define it as empty
"""
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardindex', self.root_page.id)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="#content" class="active">Content</a>')
self.assertNotContains(response, '<a href="#promote" class="">Promote</a>')
def test_create_page_with_custom_tabs(self):
"""
Test that custom edit handlers are rendered
"""
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardchild', self.root_page.id)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="#content" class="active">Content</a>')
self.assertContains(response, '<a href="#promote" class="">Promote</a>')
self.assertContains(response, '<a href="#dinosaurs" class="">Dinosaurs</a>')
def test_create_simplepage_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get page
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_create_simplepage_post(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
# Should be redirected to edit page
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(page.id, )))
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertFalse(page.live)
self.assertFalse(page.first_published_at)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_create_simplepage_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
# Find the page and check the scheduled times
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.go_live_at.date(), go_live_at.date())
self.assertEqual(page.expire_at.date(), expire_at.date())
self.assertEqual(page.expired, False)
self.assertTrue(page.status_string, "draft")
# No revisions with approved_go_live_at
self.assertFalse(PageRevision.objects.filter(page=page).exclude(approved_go_live_at__isnull=True).exists())
def test_create_simplepage_scheduled_go_live_before_expiry(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(timezone.now() + timedelta(days=2)),
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=1)),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'go_live_at', "Go live date/time must be before expiry date/time")
self.assertFormError(response, 'form', 'expire_at', "Go live date/time must be before expiry date/time")
def test_create_simplepage_scheduled_expire_in_the_past(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=-1)),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'expire_at', "Expiry date/time must be in the future")
def test_create_simplepage_post_publish(self):
# Connect a mock signal handler to page_published signal
mock_handler = mock.MagicMock()
page_published.connect(mock_handler)
# Post
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertTrue(page.live)
self.assertTrue(page.first_published_at)
# Check that the page_published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], page.specific_class)
self.assertEqual(mock_call['instance'], page)
self.assertIsInstance(mock_call['instance'], page.specific_class)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_create_simplepage_post_publish_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
self.assertEqual(page.go_live_at.date(), go_live_at.date())
self.assertEqual(page.expire_at.date(), expire_at.date())
self.assertEqual(page.expired, False)
# A revision with approved_go_live_at should exist now
self.assertTrue(PageRevision.objects.filter(page=page).exclude(approved_go_live_at__isnull=True).exists())
# But Page won't be live
self.assertFalse(page.live)
self.assertFalse(page.first_published_at)
self.assertTrue(page.status_string, "scheduled")
def test_create_simplepage_post_submit(self):
# Create a moderator user for testing email
moderator = get_user_model().objects.create_superuser('moderator', '[email protected]', 'password')
# Submit
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='hello-world').specific
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
self.assertEqual(page.title, post_data['title'])
self.assertIsInstance(page, SimplePage)
self.assertFalse(page.live)
self.assertFalse(page.first_published_at)
# The latest revision for the page should now be in moderation
self.assertTrue(page.get_latest_revision().submitted_for_moderation)
# Check that the moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
self.assertEqual(mail.outbox[0].subject, 'The page "New page!" has been submitted for moderation')
def test_create_simplepage_post_existing_slug(self):
# This tests the existing slug checking on page save
# Create a page
self.child_page = SimplePage()
self.child_page.title = "Hello world!"
self.child_page.slug = "hello-world"
self.root_page.add_child(instance=self.child_page)
# Attempt to create a new one with the same slug
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'slug', "This slug is already in use")
def test_create_nonexistantparent(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', 100000)))
self.assertEqual(response.status_code, 404)
def test_create_nonpagetype(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('wagtailimages', 'image', self.root_page.id)))
self.assertEqual(response.status_code, 404)
def test_preview_on_create(self):
post_data = {
'title': "New page!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_preview_on_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Check the response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "New page!")
# Check that the treebeard attributes were set correctly on the page object
self.assertEqual(response.context['self'].depth, self.root_page.depth + 1)
self.assertTrue(response.context['self'].path.startswith(self.root_page.path))
self.assertEqual(response.context['self'].get_parent(), self.root_page)
def test_whitespace_titles(self):
post_data = {
'title': " ", # Single space on purpose
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
'seo_title': '\t',
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.root_page.id)), post_data)
# Check that a form error was raised
self.assertFormError(response, 'form', 'title', "Value cannot be entirely whitespace characters")
self.assertFormError(response, 'form', 'seo_title', "Value cannot be entirely whitespace characters")
class TestPageEdit(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
child_page = SimplePage(
title="Hello world!",
slug="hello-world",
)
self.root_page.add_child(instance=child_page)
child_page.save_revision().publish()
self.child_page = SimplePage.objects.get(id=child_page.id)
# Add event page (to test edit handlers)
self.event_page = EventPage()
self.event_page.title = "Event page"
self.event_page.slug = "event-page"
self.root_page.add_child(instance=self.event_page)
# Login
self.user = self.login()
def test_page_edit(self):
# Tests that the edit page loads
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
def test_page_edit_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get edit page
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_page_edit_post(self):
# Tests simple editing
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to edit page
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# The page should have "has_unpublished_changes" flag set
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertTrue(child_page_new.has_unpublished_changes)
def test_page_edit_post_when_locked(self):
# Tests that trying to edit a locked page results in an error
# Lock the page
self.child_page.locked = True
self.child_page.save()
# Post
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Shouldn't be redirected
self.assertContains(response, "The page could not be saved as it is locked")
# The page shouldn't have "has_unpublished_changes" flag set
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertFalse(child_page_new.has_unpublished_changes)
def test_edit_post_scheduled(self):
# put go_live_at and expire_at several days away from the current date, to avoid
# false matches in content_json__contains tests
go_live_at = timezone.now() + timedelta(days=10)
expire_at = timezone.now() + timedelta(days=20)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page will still be live
self.assertTrue(child_page_new.live)
# A revision with approved_go_live_at should not exist
self.assertFalse(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
# But a revision with go_live_at and expire_at in their content json *should* exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new, content_json__contains=str(go_live_at.date())).exists())
self.assertTrue(PageRevision.objects.filter(page=child_page_new, content_json__contains=str(expire_at.date())).exists())
def test_edit_scheduled_go_live_before_expiry(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'go_live_at': submittable_timestamp(timezone.now() + timedelta(days=2)),
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=1)),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'go_live_at', "Go live date/time must be before expiry date/time")
self.assertFormError(response, 'form', 'expire_at', "Go live date/time must be before expiry date/time")
def test_edit_scheduled_expire_in_the_past(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'expire_at': submittable_timestamp(timezone.now() + timedelta(days=-1)),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'expire_at', "Expiry date/time must be in the future")
def test_page_edit_post_publish(self):
# Connect a mock signal handler to page_published signal
mock_handler = mock.MagicMock()
page_published.connect(mock_handler)
# Set has_unpublished_changes=True on the existing record to confirm that the publish action
# is resetting it (and not just leaving it alone)
self.child_page.has_unpublished_changes = True
self.child_page.save()
# Save current value of first_published_at so we can check that it doesn't change
first_published_at = SimplePage.objects.get(id=self.child_page.id).first_published_at
# Tests publish from edit page
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page was edited
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertEqual(child_page_new.title, post_data['title'])
# Check that the page_published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], child_page_new.specific_class)
self.assertEqual(mock_call['instance'], child_page_new)
self.assertIsInstance(mock_call['instance'], child_page_new.specific_class)
# The page shouldn't have "has_unpublished_changes" flag set
self.assertFalse(child_page_new.has_unpublished_changes)
# first_published_at should not change as it was already set
self.assertEqual(first_published_at, child_page_new.first_published_at)
def test_edit_post_publish_scheduled(self):
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should not be live anymore
self.assertFalse(child_page_new.live)
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
# The page SHOULD have the "has_unpublished_changes" flag set, because the changes are not visible as a live page yet
self.assertTrue(child_page_new.has_unpublished_changes, "A page scheduled for future publishing should have has_unpublished_changes=True")
def test_edit_post_publish_now_an_already_scheduled(self):
# First let's publish a page with a go_live_at in the future
go_live_at = timezone.now() + timedelta(days=1)
expire_at = timezone.now() + timedelta(days=2)
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': submittable_timestamp(go_live_at),
'expire_at': submittable_timestamp(expire_at),
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to edit page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should not be live anymore
self.assertFalse(child_page_new.live)
# Instead a revision with approved_go_live_at should now exist
self.assertTrue(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
# Now, let's edit it and publish it right now
go_live_at = timezone.now()
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-publish': "Publish",
'go_live_at': "",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to edit page
self.assertEqual(response.status_code, 302)
child_page_new = SimplePage.objects.get(id=self.child_page.id)
# The page should be live now
self.assertTrue(child_page_new.live)
# And a revision with approved_go_live_at should not exist
self.assertFalse(PageRevision.objects.filter(page=child_page_new).exclude(approved_go_live_at__isnull=True).exists())
def test_page_edit_post_submit(self):
# Create a moderator user for testing email
moderator = get_user_model().objects.create_superuser('moderator', '[email protected]', 'password')
# Tests submitting from edit page
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# The page should have "has_unpublished_changes" flag set
child_page_new = SimplePage.objects.get(id=self.child_page.id)
self.assertTrue(child_page_new.has_unpublished_changes)
# The latest revision for the page should now be in moderation
self.assertTrue(child_page_new.get_latest_revision().submitted_for_moderation)
# Check that the moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been submitted for moderation') # Note: should this be "I've been edited!"?
def test_page_edit_post_existing_slug(self):
# This tests the existing slug checking on page edit
# Create a page
self.child_page = SimplePage()
self.child_page.title = "Hello world 2"
self.child_page.slug = "hello-world2"
self.root_page.add_child(instance=self.child_page)
# Attempt to change the slug to one thats already in use
post_data = {
'title': "Hello world 2",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'slug', "This slug is already in use")
def test_preview_on_edit(self):
post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
response = self.client.post(reverse('wagtailadmin_pages_preview_on_edit', args=(self.child_page.id, )), post_data)
# Check the response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "I've been edited!")
class TestPageEditReordering(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add event page
self.event_page = EventPage()
self.event_page.title = "Event page"
self.event_page.slug = "event-page"
self.event_page.carousel_items = [
EventPageCarouselItem(caption='1234567', sort_order=1),
EventPageCarouselItem(caption='7654321', sort_order=2),
EventPageCarouselItem(caption='abcdefg', sort_order=3),
]
self.root_page.add_child(instance=self.event_page)
# Login
self.user = self.login()
def check_order(self, response, expected_order):
inline_panel = response.context['edit_handler'].children[0].children[9]
order = [child.form.instance.caption for child in inline_panel.children]
self.assertEqual(order, expected_order)
def test_order(self):
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
self.check_order(response, ['1234567', '7654321', 'abcdefg'])
def test_reorder(self):
post_data = {
'title': "Event page",
'slug': 'event-page',
'date_from': '01/01/2014',
'cost': '$10',
'audience': 'public',
'location': 'somewhere',
'related_links-INITIAL_FORMS': 0,
'related_links-MAX_NUM_FORMS': 1000,
'related_links-TOTAL_FORMS': 0,
'speakers-INITIAL_FORMS': 0,
'speakers-MAX_NUM_FORMS': 1000,
'speakers-TOTAL_FORMS': 0,
'carousel_items-INITIAL_FORMS': 3,
'carousel_items-MAX_NUM_FORMS': 1000,
'carousel_items-TOTAL_FORMS': 3,
'carousel_items-0-id': self.event_page.carousel_items.all()[0].id,
'carousel_items-0-caption': self.event_page.carousel_items.all()[0].caption,
'carousel_items-0-ORDER': 2,
'carousel_items-1-id': self.event_page.carousel_items.all()[1].id,
'carousel_items-1-caption': self.event_page.carousel_items.all()[1].caption,
'carousel_items-1-ORDER': 3,
'carousel_items-2-id': self.event_page.carousel_items.all()[2].id,
'carousel_items-2-caption': self.event_page.carousel_items.all()[2].caption,
'carousel_items-2-ORDER': 1,
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )), post_data)
# Should be redirected back to same page
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
# Check order
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )))
self.assertEqual(response.status_code, 200)
self.check_order(response, ['abcdefg', '1234567', '7654321'])
def test_reorder_with_validation_error(self):
post_data = {
'title': "", # Validation error
'slug': 'event-page',
'date_from': '01/01/2014',
'cost': '$10',
'audience': 'public',
'location': 'somewhere',
'related_links-INITIAL_FORMS': 0,
'related_links-MAX_NUM_FORMS': 1000,
'related_links-TOTAL_FORMS': 0,
'speakers-INITIAL_FORMS': 0,
'speakers-MAX_NUM_FORMS': 1000,
'speakers-TOTAL_FORMS': 0,
'carousel_items-INITIAL_FORMS': 3,
'carousel_items-MAX_NUM_FORMS': 1000,
'carousel_items-TOTAL_FORMS': 3,
'carousel_items-0-id': self.event_page.carousel_items.all()[0].id,
'carousel_items-0-caption': self.event_page.carousel_items.all()[0].caption,
'carousel_items-0-ORDER': 2,
'carousel_items-1-id': self.event_page.carousel_items.all()[1].id,
'carousel_items-1-caption': self.event_page.carousel_items.all()[1].caption,
'carousel_items-1-ORDER': 3,
'carousel_items-2-id': self.event_page.carousel_items.all()[2].id,
'carousel_items-2-caption': self.event_page.carousel_items.all()[2].caption,
'carousel_items-2-ORDER': 1,
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.event_page.id, )), post_data)
self.assertEqual(response.status_code, 200)
self.check_order(response, ['abcdefg', '1234567', '7654321'])
class TestPageDelete(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage()
self.child_page.title = "Hello world!"
self.child_page.slug = "hello-world"
self.root_page.add_child(instance=self.child_page)
# Add a page with child pages of its own
self.child_index = StandardIndex(title="Hello index", slug='hello-index')
self.root_page.add_child(instance=self.child_index)
self.grandchild_page = StandardChild(title="Hello Kitty", slug='hello-kitty')
self.child_index.add_child(instance=self.grandchild_page)
# Login
self.user = self.login()
def test_page_delete(self):
response = self.client.get(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
self.assertEqual(response.status_code, 200)
# deletion should not actually happen on GET
self.assertTrue(SimplePage.objects.filter(id=self.child_page.id).exists())
def test_page_delete_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get delete page
response = self.client.get(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
# Check that the deletion has not happened
self.assertTrue(SimplePage.objects.filter(id=self.child_page.id).exists())
def test_page_delete_post(self):
# Connect a mock signal handler to page_unpublished signal
mock_handler = mock.MagicMock()
page_unpublished.connect(mock_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
# Check that the page is gone
self.assertEqual(Page.objects.filter(path__startswith=self.root_page.path, slug='hello-world').count(), 0)
# Check that the page_unpublished signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], self.child_page.specific_class)
self.assertEqual(mock_call['instance'], self.child_page)
self.assertIsInstance(mock_call['instance'], self.child_page.specific_class)
def test_page_delete_notlive_post(self):
# Same as above, but this makes sure the page_unpublished signal is not fired
# when if the page is not live when it is deleted
# Unpublish the page
self.child_page.live = False
self.child_page.save()
# Connect a mock signal handler to page_unpublished signal
mock_handler = mock.MagicMock()
page_unpublished.connect(mock_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_delete', args=(self.child_page.id, )))
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
# Check that the page is gone
self.assertEqual(Page.objects.filter(path__startswith=self.root_page.path, slug='hello-world').count(), 0)
# Check that the page_unpublished signal was not fired
self.assertEqual(mock_handler.call_count, 0)
def test_subpage_deletion(self):
# Connect mock signal handlers to page_unpublished, pre_delete and post_delete signals
unpublish_signals_received = []
def page_unpublished_handler(sender, instance, **kwargs):
unpublish_signals_received.append((sender, instance.id))
page_unpublished.connect(page_unpublished_handler)
pre_delete_signals_received = []
def pre_delete_handler(sender, instance, **kwargs):
pre_delete_signals_received.append((sender, instance.id))
pre_delete.connect(pre_delete_handler)
post_delete_signals_received = []
def post_delete_handler(sender, instance, **kwargs):
post_delete_signals_received.append((sender, instance.id))
post_delete.connect(post_delete_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_delete', args=(self.child_index.id, )))
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
# Check that the page is gone
self.assertFalse(StandardIndex.objects.filter(id=self.child_index.id).exists())
self.assertFalse(Page.objects.filter(id=self.child_index.id).exists())
# Check that the subpage is also gone
self.assertFalse(StandardChild.objects.filter(id=self.grandchild_page.id).exists())
self.assertFalse(Page.objects.filter(id=self.grandchild_page.id).exists())
# Check that the signals were fired for both pages
self.assertIn((StandardIndex, self.child_index.id), unpublish_signals_received)
self.assertIn((StandardChild, self.grandchild_page.id), unpublish_signals_received)
self.assertIn((StandardIndex, self.child_index.id), pre_delete_signals_received)
self.assertIn((StandardChild, self.grandchild_page.id), pre_delete_signals_received)
self.assertIn((StandardIndex, self.child_index.id), post_delete_signals_received)
self.assertIn((StandardChild, self.grandchild_page.id), post_delete_signals_received)
class TestPageSearch(TestCase, WagtailTestUtils):
def setUp(self):
# Login
self.login()
def get(self, params=None, **extra):
return self.client.get(reverse('wagtailadmin_pages_search'), params or {}, **extra)
def test_view(self):
response = self.get()
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
self.assertEqual(response.status_code, 200)
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
self.assertEqual(response.context['query_string'], "Hello")
def test_ajax(self):
response = self.get({'q': "Hello"}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'wagtailadmin/pages/search.html')
self.assertTemplateUsed(response, 'wagtailadmin/pages/search_results.html')
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'q': "Hello", 'p': page})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/search.html')
def test_root_can_appear_in_search_results(self):
response = self.get({'q': "roo"})
self.assertEqual(response.status_code, 200)
# 'pages' list in the response should contain root
results = response.context['pages']
self.assertTrue(any([r.slug == 'root' for r in results]))
class TestPageMove(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create two sections
self.section_a = SimplePage()
self.section_a.title = "Section A"
self.section_a.slug = "section-a"
self.root_page.add_child(instance=self.section_a)
self.section_b = SimplePage()
self.section_b.title = "Section B"
self.section_b.slug = "section-b"
self.root_page.add_child(instance=self.section_b)
# Add test page into section A
self.test_page = SimplePage()
self.test_page.title = "Hello world!"
self.test_page.slug = "hello-world"
self.section_a.add_child(instance=self.test_page)
# Login
self.user = self.login()
def test_page_move(self):
response = self.client.get(reverse('wagtailadmin_pages_move', args=(self.test_page.id, )))
self.assertEqual(response.status_code, 200)
def test_page_move_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get move page
response = self.client.get(reverse('wagtailadmin_pages_move', args=(self.test_page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_page_move_confirm(self):
response = self.client.get(reverse('wagtailadmin_pages_move_confirm', args=(self.test_page.id, self.section_b.id)))
self.assertEqual(response.status_code, 200)
def test_page_set_page_position(self):
response = self.client.get(reverse('wagtailadmin_pages_set_page_position', args=(self.test_page.id, )))
self.assertEqual(response.status_code, 200)
class TestPageCopy(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create a page
self.test_page = self.root_page.add_child(instance=SimplePage(
title="Hello world!",
slug='hello-world',
live=True,
has_unpublished_changes=False,
))
# Create a couple of child pages
self.test_child_page = self.test_page.add_child(instance=SimplePage(
title="Child page",
slug='child-page',
live=True,
has_unpublished_changes=True,
))
self.test_unpublished_child_page = self.test_page.add_child(instance=SimplePage(
title="Unpublished Child page",
slug='unpublished-child-page',
live=False,
has_unpublished_changes=True,
))
# Login
self.user = self.login()
def test_page_copy(self):
response = self.client.get(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/copy.html')
# Make sure all fields are in the form
self.assertContains(response, "New title")
self.assertContains(response, "New slug")
self.assertContains(response, "New parent page")
self.assertContains(response, "Copy subpages")
self.assertContains(response, "Publish copies")
def test_page_copy_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get copy page
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.test_page.id),
'copy_subpages': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user received a 403 response
self.assertEqual(response.status_code, 403)
def test_page_copy_post(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
'publish_copies': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
self.assertTrue(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were not copied
self.assertEqual(page_copy.get_children().count(), 0)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_copy_subpages(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
self.assertTrue(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# Neither of them should be live
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertFalse(child_copy.live)
self.assertTrue(child_copy.has_unpublished_changes)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
self.assertTrue(unpublished_child_copy.has_unpublished_changes)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_copy_subpages_publish_copies(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': True,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is live
self.assertTrue(page_copy.live)
self.assertFalse(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# The child_copy should be live but the unpublished_child_copy shouldn't
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertTrue(child_copy.live)
self.assertTrue(child_copy.has_unpublished_changes)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
self.assertTrue(unpublished_child_copy.has_unpublished_changes)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_new_parent(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.test_child_page.id),
'copy_subpages': False,
'publish_copies': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the new parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_child_page.id, )))
# Check that the page was copied to the correct place
self.assertTrue(Page.objects.filter(slug='hello-world-2').first().get_parent(), self.test_child_page)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_existing_slug_within_same_parent_page(self):
# This tests the existing slug checking on page copy when not changing the parent page
# Attempt to copy the page but forget to change the slug
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'new_slug', "This slug is already in use within the context of its parent page \"Welcome to your new Wagtail site!\"")
def test_page_copy_post_existing_slug_to_another_parent_page(self):
# This tests the existing slug checking on page copy when changing the parent page
# Attempt to copy the page and changed the parent page
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.test_child_page.id),
'copy_subpages': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_child_page.id, )))
def test_page_copy_post_invalid_slug(self):
# Attempt to copy the page but set an invalid slug string
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello world!',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(response, 'form', 'new_slug', "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.")
def test_page_copy_no_publish_permission(self):
# Turn user into an editor who can add pages but not publish them
self.user.is_superuser = False
self.user.groups.add(
Group.objects.get(name="Editors"),
)
self.user.save()
# Get copy page
response = self.client.get(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )))
# The user should have access to the copy page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/copy.html')
# Make sure the "publish copies" field is hidden
self.assertNotContains(response, "Publish copies")
def test_page_copy_no_publish_permission_post_copy_subpages_publish_copies(self):
# This tests that unprivileged users cannot publish copied pages even if they hack their browser
# Turn user into an editor who can add pages but not publish them
self.user.is_superuser = False
self.user.groups.add(
Group.objects.get(name="Editors"),
)
self.user.save()
# Post
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': True,
}
response = self.client.post(reverse('wagtailadmin_pages_copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# Neither of them should be live
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertFalse(child_copy.live)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
class TestPageUnpublish(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
# Create a page to unpublish
self.root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
live=True,
)
self.root_page.add_child(instance=self.page)
def test_unpublish_view(self):
"""
This tests that the unpublish view responds with an unpublish confirm page
"""
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )))
# Check that the user recieved an unpublish confirm page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/confirm_unpublish.html')
def test_unpublish_view_invalid_page_id(self):
"""
This tests that the unpublish view returns an error if the page id is invalid
"""
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(12345, )))
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_unpublish_view_bad_permissions(self):
"""
This tests that the unpublish view doesn't allow users without unpublish permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get unpublish page
response = self.client.get(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_unpublish_view_post(self):
"""
This posts to the unpublish view and checks that the page was unpublished
"""
# Connect a mock signal handler to page_unpublished signal
mock_handler = mock.MagicMock()
page_unpublished.connect(mock_handler)
# Post to the unpublish page
response = self.client.post(reverse('wagtailadmin_pages_unpublish', args=(self.page.id, )))
# Should be redirected to explorer page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page was unpublished
self.assertFalse(SimplePage.objects.get(id=self.page.id).live)
# Check that the page_unpublished signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], self.page.specific_class)
self.assertEqual(mock_call['instance'], self.page)
self.assertIsInstance(mock_call['instance'], self.page.specific_class)
class TestApproveRejectModeration(TestCase, WagtailTestUtils):
def setUp(self):
self.submitter = get_user_model().objects.create_superuser(
username='submitter',
email='[email protected]',
password='password',
)
self.user = self.login()
# Create a page and submit it for moderation
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
live=False,
has_unpublished_changes=True,
)
root_page.add_child(instance=self.page)
self.page.save_revision(user=self.submitter, submitted_for_moderation=True)
self.revision = self.page.get_latest_revision()
def test_approve_moderation_view(self):
"""
This posts to the approve moderation view and checks that the page was approved
"""
# Connect a mock signal handler to page_published signal
mock_handler = mock.MagicMock()
page_published.connect(mock_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
page = Page.objects.get(id=self.page.id)
# Page must be live
self.assertTrue(page.live, "Approving moderation failed to set live=True")
# Page should now have no unpublished changes
self.assertFalse(page.has_unpublished_changes, "Approving moderation failed to set has_unpublished_changes=False")
# Check that the page_published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], self.page.specific_class)
self.assertEqual(mock_call['instance'], self.page)
self.assertIsInstance(mock_call['instance'], self.page.specific_class)
def test_approve_moderation_when_later_revision_exists(self):
self.page.title = "Goodbye world!"
self.page.save_revision(user=self.submitter, submitted_for_moderation=False)
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
page = Page.objects.get(id=self.page.id)
# Page must be live
self.assertTrue(page.live, "Approving moderation failed to set live=True")
# Page content should be the submitted version, not the published one
self.assertEqual(page.title, "Hello world!")
# Page should still have unpublished changes
self.assertTrue(page.has_unpublished_changes, "has_unpublished_changes incorrectly cleared on approve_moderation when a later revision exists")
def test_approve_moderation_view_bad_revision_id(self):
"""
This tests that the approve moderation view handles invalid revision ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(12345, )))
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_approve_moderation_view_bad_permissions(self):
"""
This tests that the approve moderation view doesn't allow users without moderation permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Post
response = self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_reject_moderation_view(self):
"""
This posts to the reject moderation view and checks that the page was rejected
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Page must not be live
self.assertFalse(Page.objects.get(id=self.page.id).live)
# Revision must no longer be submitted for moderation
self.assertFalse(PageRevision.objects.get(id=self.revision.id).submitted_for_moderation)
def test_reject_moderation_view_bad_revision_id(self):
"""
This tests that the reject moderation view handles invalid revision ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(12345, )))
# Check that the user recieved a 404 response
self.assertEqual(response.status_code, 404)
def test_reject_moderation_view_bad_permissions(self):
"""
This tests that the reject moderation view doesn't allow users without moderation permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Post
response = self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
def test_preview_for_moderation(self):
response = self.client.get(reverse('wagtailadmin_pages_preview_for_moderation', args=(self.revision.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "Hello world!")
class TestContentTypeUse(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.login()
def test_content_type_use(self):
# Get use of event page
response = self.client.get(reverse('wagtailadmin_pages_type_use', args=('tests', 'eventpage')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/content_type_use.html')
self.assertContains(response, "Christmas")
class TestSubpageBusinessRules(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Add standard page (allows subpages of any type)
self.standard_index = StandardIndex()
self.standard_index.title = "Standard Index"
self.standard_index.slug = "standard-index"
self.root_page.add_child(instance=self.standard_index)
# Add business page (allows BusinessChild and BusinessSubIndex as subpages)
self.business_index = BusinessIndex()
self.business_index.title = "Business Index"
self.business_index.slug = "business-index"
self.root_page.add_child(instance=self.business_index)
# Add business child (allows no subpages)
self.business_child = BusinessChild()
self.business_child.title = "Business Child"
self.business_child.slug = "business-child"
self.business_index.add_child(instance=self.business_child)
# Add business subindex (allows only BusinessChild as subpages)
self.business_subindex = BusinessSubIndex()
self.business_subindex.title = "Business Subindex"
self.business_subindex.slug = "business-subindex"
self.business_index.add_child(instance=self.business_subindex)
# Login
self.login()
def test_standard_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.standard_index.id, ))
# explorer should contain a link to 'add child page'
response = self.client.get(reverse('wagtailadmin_explore', args=(self.standard_index.id, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, add_subpage_url)
# add_subpage should give us choices of StandardChild, and BusinessIndex.
# BusinessSubIndex and BusinessChild are not allowed
response = self.client.get(add_subpage_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, StandardChild.get_verbose_name())
self.assertContains(response, BusinessIndex.get_verbose_name())
self.assertNotContains(response, BusinessSubIndex.get_verbose_name())
self.assertNotContains(response, BusinessChild.get_verbose_name())
def test_business_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.business_index.id, ))
# explorer should contain a link to 'add child page'
response = self.client.get(reverse('wagtailadmin_explore', args=(self.business_index.id, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, add_subpage_url)
# add_subpage should give us a cut-down set of page types to choose
response = self.client.get(add_subpage_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, StandardIndex.get_verbose_name())
self.assertNotContains(response, StandardChild.get_verbose_name())
self.assertContains(response, BusinessSubIndex.get_verbose_name())
self.assertContains(response, BusinessChild.get_verbose_name())
def test_business_child_subpage(self):
add_subpage_url = reverse('wagtailadmin_pages_add_subpage', args=(self.business_child.id, ))
# explorer should not contain a link to 'add child page', as this page doesn't accept subpages
response = self.client.get(reverse('wagtailadmin_explore', args=(self.business_child.id, )))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, add_subpage_url)
# this also means that fetching add_subpage is blocked at the permission-check level
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.business_child.id, )))
self.assertEqual(response.status_code, 403)
def test_cannot_add_invalid_subpage_type(self):
# cannot add StandardChild as a child of BusinessIndex, as StandardChild is not present in subpage_types
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardchild', self.business_index.id)))
self.assertEqual(response.status_code, 403)
# likewise for BusinessChild which has an empty subpage_types list
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardchild', self.business_child.id)))
self.assertEqual(response.status_code, 403)
# cannot add BusinessChild to StandardIndex, as BusinessChild restricts is parent page types
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'businesschild', self.standard_index.id)))
self.assertEqual(response.status_code, 403)
# but we can add a BusinessChild to BusinessIndex
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'businesschild', self.business_index.id)))
self.assertEqual(response.status_code, 200)
def test_not_prompted_for_page_type_when_only_one_choice(self):
response = self.client.get(reverse('wagtailadmin_pages_add_subpage', args=(self.business_subindex.id, )))
# BusinessChild is the only valid subpage type of BusinessSubIndex, so redirect straight there
self.assertRedirects(response, reverse('wagtailadmin_pages_create', args=('tests', 'businesschild', self.business_subindex.id)))
class TestNotificationPreferences(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
# Create two moderator users for testing 'submitted' email
User = get_user_model()
self.moderator = User.objects.create_superuser('moderator', '[email protected]', 'password')
self.moderator2 = User.objects.create_superuser('moderator2', '[email protected]', 'password')
# Create a submitter for testing 'rejected' and 'approved' emails
self.submitter = User.objects.create_user('submitter', '[email protected]', 'password')
# User profiles for moderator2 and the submitter
self.moderator2_profile = UserProfile.get_for_user(self.moderator2)
self.submitter_profile = UserProfile.get_for_user(self.submitter)
# Create a page and submit it for moderation
self.child_page = SimplePage(
title="Hello world!",
slug='hello-world',
live=False,
)
self.root_page.add_child(instance=self.child_page)
# POST data to edit the page
self.post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
def submit(self):
return self.client.post(reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )), self.post_data)
def silent_submit(self):
"""
Sets up the child_page as needing moderation, without making a request
"""
self.child_page.save_revision(user=self.submitter, submitted_for_moderation=True)
self.revision = self.child_page.get_latest_revision()
def approve(self):
return self.client.post(reverse('wagtailadmin_pages_approve_moderation', args=(self.revision.id, )))
def reject(self):
return self.client.post(reverse('wagtailadmin_pages_reject_moderation', args=(self.revision.id, )))
def test_vanilla_profile(self):
# Check that the vanilla profile has rejected notifications on
self.assertEqual(self.submitter_profile.rejected_notifications, True)
# Check that the vanilla profile has approved notifications on
self.assertEqual(self.submitter_profile.approved_notifications, True)
def test_submit_notifications_sent(self):
# Submit
self.submit()
# Check that both the moderators got an email, and no others
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.moderator.email, mail.outbox[0].to)
self.assertIn(self.moderator2.email, mail.outbox[0].to)
self.assertEqual(len(mail.outbox[0].to), 2)
def test_submit_notification_preferences_respected(self):
# moderator2 doesn't want emails
self.moderator2_profile.submitted_notifications = False
self.moderator2_profile.save()
# Submit
self.submit()
# Check that only one moderator got an email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual([self.moderator.email], mail.outbox[0].to)
def test_approved_notifications(self):
# Set up the page version
self.silent_submit()
# Approve
self.approve()
# Submitter must recieve an approved email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been approved')
def test_approved_notifications_preferences_respected(self):
# Submitter doesn't want 'approved' emails
self.submitter_profile.approved_notifications = False
self.submitter_profile.save()
# Set up the page version
self.silent_submit()
# Approve
self.approve()
# No email to send
self.assertEqual(len(mail.outbox), 0)
def test_rejected_notifications(self):
# Set up the page version
self.silent_submit()
# Reject
self.reject()
# Submitter must recieve a rejected email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['[email protected]'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been rejected')
def test_rejected_notification_preferences_respected(self):
# Submitter doesn't want 'rejected' emails
self.submitter_profile.rejected_notifications = False
self.submitter_profile.save()
# Set up the page version
self.silent_submit()
# Reject
self.reject()
# No email to send
self.assertEqual(len(mail.outbox), 0)
class TestLocking(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
# Create a page and submit it for moderation
self.child_page = SimplePage(
title="Hello world!",
slug='hello-world',
live=False,
)
self.root_page.add_child(instance=self.child_page)
def test_lock_post(self):
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_lock_get(self):
response = self.client.get(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )))
# Check response
self.assertEqual(response.status_code, 405)
# Check that the page is still unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_lock_post_already_locked(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is still locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_lock_post_with_good_redirect(self):
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )), {
'next': reverse('wagtailadmin_pages_edit', args=(self.child_page.id, ))
})
# Check response
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# Check that the page is locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_with_bad_redirect(self):
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )), {
'next': 'http://www.google.co.uk'
})
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is locked
self.assertTrue(page.objects.get(id=self.child_page.id).locked)
def test_lock_post_bad_page(self):
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(9999, )))
# Check response
self.assertEqual(response.status_code, 404)
# Check that the page is still unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_lock_post_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
response = self.client.post(reverse('wagtailadmin_pages_lock', args=(self.child_page.id, )))
# Check response
self.assertEqual(response.status_code, 403)
# Check that the page is still unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_get(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.get(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )))
# Check response
self.assertEqual(response.status_code, 405)
# Check that the page is still locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_already_unlocked(self):
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )))
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is still unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_with_good_redirect(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )), {
'next': reverse('wagtailadmin_pages_edit', args=(self.child_page.id, ))
})
# Check response
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(self.child_page.id, )))
# Check that the page is unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_with_bad_redirect(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )), {
'next': 'http://www.google.co.uk'
})
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that the page is unlocked
self.assertFalse(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_bad_page(self):
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(9999, )))
# Check response
self.assertEqual(response.status_code, 404)
# Check that the page is still locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
def test_unlock_post_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Lock the page
self.child_page.locked = True
self.child_page.save()
response = self.client.post(reverse('wagtailadmin_pages_unlock', args=(self.child_page.id, )))
# Check response
self.assertEqual(response.status_code, 403)
# Check that the page is still locked
self.assertTrue(Page.objects.get(id=self.child_page.id).locked)
class TestIssue197(TestCase, WagtailTestUtils):
def test_issue_197(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create a tagged page with no tags
self.tagged_page = self.root_page.add_child(instance=TaggedPage(
title="Tagged page",
slug='tagged-page',
live=False,
))
# Login
self.user = self.login()
# Add some tags and publish using edit view
post_data = {
'title': "Tagged page",
'slug':'tagged-page',
'tags': "hello, world",
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.tagged_page.id, )), post_data)
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Check that both tags are in the pages tag set
page = TaggedPage.objects.get(id=self.tagged_page.id)
self.assertIn('hello', page.tags.slugs())
self.assertIn('world', page.tags.slugs())
class TestChildRelationsOnSuperclass(TestCase, WagtailTestUtils):
# In our test models we define AdvertPlacement as a child relation on the Page model.
# Here we check that this behaves correctly when exposed on the edit form of a Page
# subclass (StandardIndex here).
fixtures = ['test.json']
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
self.test_advert = Advert.objects.get(id=1)
# Add child page
self.index_page = StandardIndex(
title="My lovely index",
slug="my-lovely-index",
advert_placements=[AdvertPlacement(advert=self.test_advert)]
)
self.root_page.add_child(instance=self.index_page)
# Login
self.login()
def test_get_create_form(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'standardindex', self.root_page.id)))
self.assertEqual(response.status_code, 200)
# Response should include an advert_placements formset labelled Adverts
self.assertContains(response, "Adverts")
self.assertContains(response, "id_advert_placements-TOTAL_FORMS")
def test_post_create_form(self):
post_data = {
'title': "New index!",
'slug': 'new-index',
'advert_placements-TOTAL_FORMS': '1',
'advert_placements-INITIAL_FORMS': '0',
'advert_placements-MAX_NUM_FORMS': '1000',
'advert_placements-0-advert': '1',
'advert_placements-0-colour': 'yellow',
'advert_placements-0-id': '',
}
response = self.client.post(reverse('wagtailadmin_pages_create', args=('tests', 'standardindex', self.root_page.id)), post_data)
# Find the page and check it
page = Page.objects.get(path__startswith=self.root_page.path, slug='new-index').specific
# Should be redirected to edit page
self.assertRedirects(response, reverse('wagtailadmin_pages_edit', args=(page.id, )))
self.assertEqual(page.advert_placements.count(), 1)
self.assertEqual(page.advert_placements.first().advert.text, 'test_advert')
def test_get_edit_form(self):
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.index_page.id, )))
self.assertEqual(response.status_code, 200)
# Response should include an advert_placements formset labelled Adverts
self.assertContains(response, "Adverts")
self.assertContains(response, "id_advert_placements-TOTAL_FORMS")
# the formset should be populated with an existing form
self.assertContains(response, "id_advert_placements-0-advert")
self.assertContains(response, '<option value="1" selected="selected">test_advert</option>')
def test_post_edit_form(self):
post_data = {
'title': "My lovely index",
'slug': 'my-lovely-index',
'advert_placements-TOTAL_FORMS': '2',
'advert_placements-INITIAL_FORMS': '1',
'advert_placements-MAX_NUM_FORMS': '1000',
'advert_placements-0-advert': '1',
'advert_placements-0-colour': 'yellow',
'advert_placements-0-id': self.index_page.advert_placements.first().id,
'advert_placements-1-advert': '1',
'advert_placements-1-colour': 'purple',
'advert_placements-1-id': '',
'action-publish': "Publish",
}
response = self.client.post(reverse('wagtailadmin_pages_edit', args=(self.index_page.id, )), post_data)
# Should be redirected to explorer
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Find the page and check it
page = Page.objects.get(id=self.index_page.id).specific
self.assertEqual(page.advert_placements.count(), 2)
self.assertEqual(page.advert_placements.all()[0].advert.text, 'test_advert')
self.assertEqual(page.advert_placements.all()[1].advert.text, 'test_advert')
| jorge-marques/wagtail | wagtail/wagtailadmin/tests/test_pages_views.py | Python | bsd-3-clause | 93,571 |
def get_file_extension(filename):
return filename.split(".")[-1]
| finiteloopsoftware/django-compressor | compress/utils.py | Python | bsd-3-clause | 69 |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from .....extern.six.moves import range
from ..mle import design_matrix, periodic_fit
@pytest.fixture
def t():
rand = np.random.RandomState(42)
return 10 * rand.rand(10)
@pytest.mark.parametrize('freq', [1.0, 2])
@pytest.mark.parametrize('dy', [None, 2.0])
@pytest.mark.parametrize('bias', [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1. / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize('nterms', range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1. / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize('nterms', range(1, 4))
@pytest.mark.parametrize('freq', [1, 2])
@pytest.mark.parametrize('fit_mean', [True, False])
def test_exact_mle_fit(nterms, freq, fit_mean):
rand = np.random.RandomState(42)
t = 10 * rand.rand(30)
theta = -1 + rand.rand(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,
center_data=False, fit_mean=fit_mean)
assert_allclose(y, y_fit)
| kelle/astropy | astropy/stats/lombscargle/implementations/tests/test_mle.py | Python | bsd-3-clause | 1,921 |
"""The WaveBlocks Project
IOM plugin providing functions for handling various
overlap matrices of linear combinations of general
wavepackets.
@author: R. Bourquin
@copyright: Copyright (C) 2013 R. Bourquin
@license: Modified BSD License
"""
import numpy as np
def add_overlaplcwp(self, parameters, timeslots=None, matrixsize=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Add storage for various overlap matrices. We can store one matrix type
per key.
========= ======
Key name Matrix
========= ======
``ov`` :math:`\langle\Upsilon | \Upsilon\rangle`
``ovkin`` :math:`\langle\Upsilon | T | \Upsilon\rangle`
``ovpot`` :math:`\langle\Upsilon | V(\underline{x}) | \Upsilon\rangle`
========= ======
Note that 'strange' errors occur if we later try to load or save
matrices for a key we did not initialise with this function.
:param parameters: A :py:class:`ParameterProvider` instance. It can
be empty and is not used at the moment.
:param timeslots: The number of time slots we need. Can be set to ``None``
to get automatically growing datasets.
:param matrixsize: The (maximal) size of each of the overlap matrices. If specified
this remains fixed for all timeslots. Can be set to ``None`` (default)
to get automatically growing datasets.
:type matrixsize: Pair of integers or ``None``.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
valid_keys = ("ov", "ovkin", "ovpot")
# Create the dataset with appropriate parameters
grp_ov = self._srf[self._prefixb + str(blockid)].create_group("overlaplcwp")
if timeslots is None:
T = 0
Ts = None
csTs = 128
else:
T = timeslots
Ts = timeslots
csTs = min(128, Ts)
if matrixsize is None:
Jr = 0
Jc = 0
Jrs = None
Jcs = None
csJrs = 128
csJcs = 128
else:
Jr, Jc = matrixsize
Jrs, Jcs = matrixsize
csJrs = min(128, Jrs)
csJcs = min(128, Jcs)
for k in key:
if k not in valid_keys:
raise ValueError("Unknown key value " + str(k))
name = k[2:]
daset_tg = grp_ov.create_dataset("timegrid" + name, (T,), dtype=np.integer, chunks=True, maxshape=(Ts,), fillvalue=-1)
grp_ov.create_dataset("shape" + name, (T, 2), dtype=np.integer, chunks=(csTs, 2), maxshape=(Ts, 2))
grp_ov.create_dataset("overlap" + name, (T, Jr, Jc), dtype=np.complexfloating, chunks=(1, csJrs, csJcs), maxshape=(Ts, Jrs, Jcs))
daset_tg.attrs["pointer"] = 0
def delete_overlaplcwp(self, blockid=0):
r"""Remove the stored overlap matrices.
:param blockid: The ID of the data block to operate on.
"""
try:
del self._srf[self._prefixb + str(blockid) + "/overlaplcwp"]
except KeyError:
pass
def has_overlaplcwp(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Ask if the specified data block has the desired data tensor.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
r = True
r &= ("overlaplcwp" in self._srf[self._prefixb + str(blockid)].keys())
if r and "ov" in key:
r &= ("overlap" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
if r and "ovpot" in key:
r &= ("overlappot" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
if r and "ovkin" in key:
r &= ("overlapkin" in self._srf[self._prefixb + str(blockid)]["overlaplcwp"].keys())
return r
def save_overlaplcwp(self, data, timestep=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Save overlap matrices of linear combinations of general wavepackets.
In principle this function also supports non-square matrices.
:param data: The data matrices to save.
:type data: A list of :py:class:`ndarray` entries.
:param timestep: The timestep at which we save the data.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
"""
for item, datum in zip(key, data):
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlap"
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlapkin"
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlappot"
else:
raise ValueError("Unknown key value {}".format(item))
timeslot = self._srf[pathtg].attrs["pointer"]
# Write the data
self.must_resize(pathd, timeslot)
data = np.atleast_2d(np.squeeze(data))
rows, cols = data.shape
self.must_resize(pathd, rows - 1, axis=1)
self.must_resize(pathd, cols - 1, axis=2)
self._srf[pathd][timeslot, :rows, :cols] = data
self.must_resize(pathsh, timeslot)
self._srf[pathsh][timeslot, :] = np.array([rows, cols])
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathtg].attrs["pointer"] += 1
def load_overlaplcwp_timegrid(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load the timegrid corresponding to the overlap matrices specified.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to load. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` each having one column.
"""
tg = []
for item in key:
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
tg.append(self._srf[pathtg][:])
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
tg.append(self._srf[pathtg][:])
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
tg.append(self._srf[pathtg][:])
else:
raise ValueError("Unknown key value {}".format(item))
if len(tg) == 1:
print(tg)
return tg[0]
else:
return tuple(tg)
def load_overlaplcwp_shape(self, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load the shape of the overlap matrices specified.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` each having two columns.
"""
tg = []
for item in key:
if item == "ov":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
tg.append(self._srf[pathsh][:])
elif item == "ovkin":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
tg.append(self._srf[pathsh][:])
elif item == "ovpot":
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
tg.append(self._srf[pathsh][:])
else:
raise ValueError("Unknown key value {}".format(item))
if len(tg) == 1:
print(tg)
return tg[0]
else:
return tuple(tg)
def load_overlaplcwp(self, timestep=None, blockid=0, key=("ov", "ovkin", "ovpot")):
r"""Load overlap matrices of linear combinations of general wavepackets.
:param timestep: Load only the data of this timestep.
:param split: Split the data array into one array for each component.
:param blockid: The ID of the data block to operate on.
:param key: Specify which overlap matrices to save. All are independent.
:type key: Tuple of valid identifier strings that are ``ov``, ``ovkin`` and ``ovpot``.
Default is ``("ov", "ovkin", "ovpot")``.
:return: A list of :py:class:`ndarray` items. Their shapes depend on the
exact value of the above arguments.
"""
result = []
for item in key:
if item == "ov":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegrid"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shape"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlap"
elif item == "ovkin":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridkin"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapekin"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlapkin"
elif item == "ovpot":
pathtg = "/" + self._prefixb + str(blockid) + "/overlaplcwp/timegridpot"
pathsh = "/" + self._prefixb + str(blockid) + "/overlaplcwp/shapepot"
pathd = "/" + self._prefixb + str(blockid) + "/overlaplcwp/overlappot"
else:
raise ValueError("Unknown key value {}".format(item))
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
shape = self._srf[pathsh][index, :]
datum = self._srf[pathd][index, :shape[0], :shape[1]]
else:
datum = self._srf[pathd][:, :, :]
result.append(datum)
if len(result) == 1:
return result[0]
else:
return tuple(result)
| WaveBlocks/WaveBlocksND | WaveBlocksND/IOM_plugin_overlaplcwp.py | Python | bsd-3-clause | 10,811 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2011, Monash e-Research Centre
# (Monash University, Australia)
# Copyright (c) 2010-2011, VeRSI Consortium
# (Victorian eResearch Strategic Initiative, Australia)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the VeRSI, the VeRSI Consortium members, nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
__init__.py
.. moduleauthor:: Russell Sim <[email protected]>
"""
import logging
from django.conf import settings
from django.utils.importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.db.models.signals import post_save
from django.core.exceptions import MiddlewareNotUsed
logger = logging.getLogger(__name__)
class FilterInitMiddleware(object):
def __init__(self):
from tardis.tardis_portal.models import Dataset_File
for f in settings.POST_SAVE_FILTERS:
cls = f[0]
args = []
kw = {}
if len(f) == 2:
args = f[1]
if len(f) == 3:
kw = f[2]
hook = self._safe_import(cls, args, kw)
# XXX seems to requre a strong ref else it won't fire,
# could be because some hooks are classes not functions.
post_save.connect(hook, sender=Dataset_File, weak=False)
logger.debug('Initialised postsave hook %s' % post_save.receivers)
# disable middleware
raise MiddlewareNotUsed()
def _safe_import(self, path, args, kw):
try:
dot = path.rindex('.')
except ValueError:
raise ImproperlyConfigured('%s isn\'t a filter module' % path)
filter_module, filter_classname = path[:dot], path[dot + 1:]
try:
mod = import_module(filter_module)
except ImportError, e:
raise ImproperlyConfigured('Error importing filter %s: "%s"' %
(filter_module, e))
try:
filter_class = getattr(mod, filter_classname)
except AttributeError:
raise ImproperlyConfigured('Filter module "%s" does not define a "%s" class' %
(filter_module, filter_classname))
filter_instance = filter_class(*args, **kw)
return filter_instance | aaryani/CoreTardis | tardis/apps/microtardis/filters/__init__.py | Python | bsd-3-clause | 3,708 |
# proxy module
from __future__ import absolute_import
from envisage.ui.single_project.action.configure_action import *
| enthought/etsproxy | enthought/envisage/ui/single_project/action/configure_action.py | Python | bsd-3-clause | 119 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-19 19:08
from __future__ import unicode_literals
import channels.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("channels", "0001_add_tokens"),
]
operations = [
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("post_id", channels.models.Base36IntegerField()),
("comment_id", channels.models.Base36IntegerField(null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AlterUniqueTogether(
name="subscription",
unique_together=set([("user", "post_id", "comment_id")]),
),
migrations.AlterIndexTogether(
name="subscription", index_together=set([("post_id", "comment_id")])
),
]
| mitodl/open-discussions | channels/migrations/0002_add_subscription.py | Python | bsd-3-clause | 1,675 |
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
os.system("chmod 777 %s/stablonglast2d/*.sh" % SCRIPT_DIR)
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| yugang/crosswalk-test-suite | stability/wrt-stablonglast2d-android-tests/inst.apk.py | Python | bsd-3-clause | 3,243 |
# -*- coding: utf-8 -*-
"""
Author
------
Bo Zhang
Email
-----
[email protected]
Created on
----------
- Sat Sep 03 12:00:00 2016
Modifications
-------------
- Sat Sep 03 12:00:00 2016
Aims
----
- normalization
Notes
-----
This is migrated from **SLAM** package
"""
from __future__ import division
import numpy as np
from joblib import Parallel, delayed
from .extern.interpolate import SmoothSpline
def normalize_spectrum(wave, flux, norm_range, dwave,
p=(1E-6, 1E-6), q=0.5, ivar=None, eps=1e-10,
rsv_frac=1.):
""" A double smooth normalization of a spectrum
Converted from Chao Liu's normSpectrum.m
Updated by Bo Zhang
Parameters
----------
wave: ndarray (n_pix, )
wavelegnth array
flux: ndarray (n_pix, )
flux array
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
ivar: ndarray (n_pix, ) | None
ivar array, default is None
eps: float
the ivar threshold
rsv_frac: float
the fraction of pixels reserved in terms of std. default is 3.
Returns
-------
flux_norm: ndarray
normalized flux
flux_cont: ndarray
continuum flux
Example
-------
>>> flux_norm, flux_cont = normalize_spectrum(
>>> wave, flux, (4000., 8000.), 100., p=(1E-8, 1E-7), q=0.5,
>>> rsv_frac=2.0)
"""
if ivar is not None:
# ivar is set
ivar = np.where(np.logical_or(wave < norm_range[0],
wave > norm_range[1]), 0, ivar)
ivar = np.where(ivar <= eps, eps, ivar)
# mask = ivar <= eps
var = 1. / ivar
else:
# default config is even weight
var = np.ones_like(flux)
# wave = wave[~mask]
# flux = flux[~mask]
# check q region
assert 0. < q < 1.
# n_iter = len(p)
n_bin = np.int(np.fix(np.diff(norm_range) / dwave) + 1)
wave1 = norm_range[0]
# SMOOTH 1
# print(wave.shape, flux.shape, var.shape)
if ivar is not None:
ind_good_init = 1. * (ivar > 0.) * (flux > 0.)
else:
ind_good_init = 1. * (flux > 0.)
ind_good_init = ind_good_init.astype(np.bool)
# print("@Cham: sum(ind_good_init)", np.sum(ind_good_init))
flux_smoothed1 = SmoothSpline(wave[ind_good_init], flux[ind_good_init],
p=p[0], var=var[ind_good_init])(wave)
dflux = flux - flux_smoothed1
# collecting continuum pixels --> ITERATION 1
ind_good = np.zeros(wave.shape, dtype=np.bool)
for i_bin in range(n_bin):
ind_bin = np.logical_and(wave > wave1 + (i_bin - 0.5) * dwave,
wave <= wave1 + (i_bin + 0.5) * dwave)
if np.sum(ind_bin > 0):
# median & sigma
bin_median = np.median(dflux[ind_bin])
bin_std = np.median(np.abs(dflux - bin_median))
# within 1 sigma with q-percentile
ind_good_ = ind_bin * (
np.abs(dflux - np.nanpercentile(dflux[ind_bin], q * 100.)) < (
rsv_frac * bin_std))
ind_good = np.logical_or(ind_good, ind_good_)
ind_good = np.logical_and(ind_good, ind_good_init)
# assert there is continuum pixels
try:
assert np.sum(ind_good) > 0
except AssertionError:
Warning("@Keenan.normalize_spectrum(): unable to find continuum! ")
ind_good = np.ones(wave.shape, dtype=np.bool)
# SMOOTH 2
# continuum flux
flux_smoothed2 = SmoothSpline(
wave[ind_good], flux[ind_good], p=p[1], var=var[ind_good])(wave)
# normalized flux
flux_norm = flux / flux_smoothed2
return flux_norm, flux_smoothed2
def normalize_spectra_block(wave, flux_block, norm_range, dwave,
p=(1E-6, 1E-6), q=0.5, ivar_block=None, eps=1e-10,
rsv_frac=3., n_jobs=1, verbose=10):
""" normalize multiple spectra using the same configuration
This is specially designed for TheKeenan
Parameters
----------
wave: ndarray (n_pix, )
wavelegnth array
flux_block: ndarray (n_obs, n_pix)
flux array
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
ivar_block: ndarray (n_pix, ) | None
ivar array, default is None
eps: float
the ivar threshold
rsv_frac: float
the fraction of pixels reserved in terms of std. default is 3.
n_jobs: int
number of processes launched by joblib
verbose: int / bool
verbose level
Returns
-------
flux_norm: ndarray
normalized flux
"""
if ivar_block is None:
ivar_block = np.ones_like(flux_block)
if flux_block.ndim == 1:
flux_block.reshape(1, -1)
n_spec = flux_block.shape[0]
results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(normalize_spectrum)(
wave, flux_block[i], norm_range, dwave, p=p, q=q,
ivar=ivar_block[i], eps=eps, rsv_frac=rsv_frac)
for i in range(n_spec))
# unpack results
flux_norm_block = []
flux_cont_block = []
for result in results:
flux_norm_block.append(result[0])
flux_cont_block.append(result[1])
return np.array(flux_norm_block), np.array(flux_cont_block)
def get_stable_pixels(pixel_disp, wave_arm=100, frac=0.20):
"""
Parameters
----------
pixel_disp: np.ndarray
dispersion array
wave_arm: int
the arm length in terms of pixels
frac: float
the reserved fraction, between 0.00 and 1.00
Returns
-------
ind_stable
"""
ind_stable = np.zeros_like(pixel_disp, dtype=np.bool)
for i in range(len(ind_stable)):
edge_l = np.max([i - wave_arm, 0])
edge_r = np.min([i + wave_arm, len(pixel_disp)])
if pixel_disp[i] <= \
np.percentile(pixel_disp[edge_l:edge_r], frac * 100.):
ind_stable[i] = True
return ind_stable
# TODO: this is a generalized version
def normalize_spectra(wave_flux_tuple_list, norm_range, dwave,
p=(1E-6, 1E-6), q=50, n_jobs=1, verbose=False):
""" normalize multiple spectra using the same configuration
Parameters
----------
wave_flux_tuple_list: list[n_obs]
a list of (wave, flux) tuple
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
n_jobs: int
number of processes launched by joblib
verbose: int / bool
verbose level
Returns
-------
flux_norm: ndarray
normalized flux
"""
pass
def test_normaliza_spectra_block():
import os
os.chdir('/pool/projects/TheKeenan/data/TheCannonData')
from TheCannon import apogee
import matplotlib.pyplot as plt
tr_ID, wl, tr_flux, tr_ivar = apogee.load_spectra("example_DR10/Data")
tr_label = apogee.load_labels("example_DR10/reference_labels.csv")
test_ID = tr_ID
test_flux = tr_flux
test_ivar = tr_ivar
r = normalize_spectra_block(wl, tr_flux, (15200., 16900.), 30., q=0.9,
rsv_frac=0.5,
p=(1E-10, 1E-10), ivar_block=tr_ivar,
n_jobs=10, verbose=10)
flux_norm, flux_cont = r
flux_norm = np.array(flux_norm)
flux_cont = np.array(flux_cont)
flux_ivar = tr_ivar * flux_cont ** 2
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(10, 20):
ofst = i * 0.5
ax.plot(wl, tr_flux[i] + ofst, 'b')
ax.plot(wl, flux_cont[i] + ofst, 'r')
fig.tight_layout()
fig.savefig(
'/pool/projects/TheKeenan/data/TheCannonData/test_norm_spec_1.pdf')
if __name__ == '__main__':
test_normaliza_spectra_block()
| hypergravity/hrs | twodspec/normalization.py | Python | bsd-3-clause | 8,493 |
#####################################################################
#
# metro.py
#
# Copyright (c) 2016, Eran Egozy
#
# Released under the MIT License (http://opensource.org/licenses/MIT)
#
#####################################################################
from clock import kTicksPerQuarter, quantize_tick_up
class Metronome(object):
"""Plays a steady click every beat.
"""
def __init__(self, sched, synth, channel = 0, patch=(128, 0), pitch = 60):
super(Metronome, self).__init__()
self.sched = sched
self.synth = synth
self.channel = channel
self.patch = patch
self.pitch = pitch
self.beat_len = kTicksPerQuarter
# run-time variables
self.on_cmd = None
self.off_cmd = None
self.playing = False
def start(self):
if self.playing:
return
self.playing = True
# set up the correct sound (program change)
self.synth.program(self.channel, self.patch[0], self.patch[1])
# find the tick of the next beat, and make it "beat aligned"
now = self.sched.get_tick()
next_beat = quantize_tick_up(now, self.beat_len)
# now, post the _noteon function (and remember this command)
self.on_cmd = self.sched.post_at_tick(next_beat, self._noteon)
def stop(self):
if not self.playing:
return
self.playing = False
# in case there is a note on hanging, turn it off immediately
if self.off_cmd:
self.off_cmd.execute()
# cancel anything pending in the future.
self.sched.remove(self.on_cmd)
self.sched.remove(self.off_cmd)
# reset these so we don't have a reference to old commands.
self.on_cmd = None
self.off_cmd = None
def toggle(self):
if self.playing:
self.stop()
else:
self.start()
def _noteon(self, tick, ignore):
# play the note right now:
self.synth.noteon(self.channel, self.pitch, 100)
# post the note off for half a beat later:
self.off_cmd = self.sched.post_at_tick(tick + self.beat_len/2, self._noteoff, self.pitch)
# schedule the next noteon for one beat later
next_beat = tick + self.beat_len
self.on_cmd = self.sched.post_at_tick(next_beat, self._noteon)
def _noteoff(self, tick, pitch):
# just turn off the currently sounding note.
self.synth.noteoff(self.channel, pitch)
| rusch95/calypso | src/common/metro.py | Python | bsd-3-clause | 2,521 |
from django.core import mail
from django.test import TestCase
from users.default_roles import DefaultGroups
from users.models import Invitation, Membership, OCUser
from communities.tests.common import create_sample_community
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
class InvitationTest(TestCase):
def setUp(self):
(self.community, self.members, self.chairmens) = create_sample_community()
def tearDown(self):
mail.outbox = []
def test_send_invitation(self):
i = Invitation.objects.create(community=self.community,
created_by=self.members[0],
email="[email protected]")
i.send(self.members[0])
self.assertEqual(len(mail.outbox), 1)
self.assertIn(self.community.name, mail.outbox[0].subject)
self.assertIn(i.get_absolute_url(), mail.outbox[0].body)
class InvitationViewTest(TestCase):
def setUp(self):
(self.community, self.members, self.chairmen) = create_sample_community()
def tearDown(self):
mail.outbox = []
def post_invite(self, data=None):
if not data:
data = {"email": "[email protected]",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"}
return self.client.post(reverse("members"
, kwargs={"community_id": self.community.id}),
data)
def login_chairmen(self):
self.client.login(username=self.chairmen[0].email, password="password")
def test_view(self):
self.login_chairmen()
response = self.post_invite({"email": "[email protected]",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"})
self.assertEqual(Invitation.objects.all().count(), 1)
invitation = Invitation.objects.all()[0]
self.assertEqual(invitation.community, self.community)
self.assertEqual(invitation.created_by, self.chairmen[0])
self.assertEqual(invitation.message, "the message")
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(response.status_code, 200)
#the response is an ajax response the show the user as added
#to the list of members
self.assertIn("delete-invitation", response.content)
self.assertIn("[email protected]", response.content)
def test_no_invite_permission(self):
self.client.login(username=self.members[6].email, password="password")
response = self.post_invite()
self.assertEqual(response.status_code, 403)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(Invitation.objects.all().count(), 0)
def test_bad_email(self):
self.login_chairmen()
response = self.post_invite({"email": "not a real email",
"default_group_name": DefaultGroups.MEMBER,
"message": "the message"})
self.assertEqual(response.status_code, 400)
self.assertEqual(_("Form error. Please supply a valid email."), response.content)
def test_invitee_already_invited(self):
Invitation.objects.create(community=self.community,
created_by=self.chairmen[0],
email="[email protected]")
self.login_chairmen()
response = self.post_invite()
self.assertEqual(response.status_code, 400)
self.assertEqual(_("This user is already invited to this community."), response.content)
def test_invitee_already_a_member(self):
u = OCUser.objects.create_user("[email protected]",
"sample user", password="password")
Membership.objects.create(user=u, community=self.community, default_group_name=DefaultGroups.MEMBER)
self.login_chairmen()
response = self.post_invite()
self.assertEqual(response.status_code, 400)
self.assertEqual(_("This user already a member of this community."), response.content) | hasadna/OpenCommunity | src/users/tests/invitation_test.py | Python | bsd-3-clause | 4,187 |
"""
gatefilter.py
Driver function that creates an ARTView display and initiates Gatefilter.
"""
import os
import sys
from ..core import Variable, QtGui, QtCore
from ..components import RadarDisplay, Menu
from ._common import _add_all_advanced_tools, _parse_dir, _parse_field
from ..plugins import GateFilter
def run(DirIn=None, filename=None, field=None):
"""
artview execution for filtering gates radar display
"""
DirIn = _parse_dir(DirIn)
app = QtGui.QApplication(sys.argv)
# start Menu and initiate Vradar
menu = Menu(DirIn, filename, mode=("Radar",), name="Menu")
Vradar = menu.Vradar
field = _parse_field(Vradar.value, field)
# start Displays
Vtilt = Variable(0)
plot1 = RadarDisplay(Vradar, Variable(field), Vtilt, name="Display",
parent=menu)
filt = GateFilter(Vradar=Vradar, Vgatefilter=plot1.Vgatefilter,
name="GateFilter", parent=None)
# plot1._gatefilter_toggle_on()
menu.addLayoutWidget(filt)
# add graphical starts
_add_all_advanced_tools(menu)
menu.setGeometry(0, 0, 500, 300)
# start program
app.exec_()
| jjhelmus/artview | artview/scripts/gatefilter.py | Python | bsd-3-clause | 1,159 |
import logging
def same_ocr_mrz(mrz_data, zones):
last_name_is_valid = mrz_data["last_name"][:25] == zones["last_name"]["value"][:25]
logging.debug(
"MRZ last name: {}; OCR last name: {}; matching {}".format(
mrz_data["last_name"], zones["last_name"]["value"], last_name_is_valid
)
)
first_names_ocr = zones["first_name"]["value"].split()
first_names_ocr_joined = "".join(first_names_ocr)
first_names_mrz = mrz_data["first_name"].split()
first_names_mrz_joined = "".join(first_names_mrz)
length_checked = min([
len(first_names_mrz_joined),
12
])
first_name_is_valid = first_names_mrz_joined[:length_checked] == first_names_ocr_joined[:length_checked]
logging.debug(
"MRZ first names: {}; OCR first names: {}; matching {}".format(
first_names_mrz, first_names_ocr, first_name_is_valid
)
)
return last_name_is_valid and first_name_is_valid
| LouisTrezzini/projet-mairie | api/franceocr/check_mrz_ocr.py | Python | bsd-3-clause | 968 |
#!/usr/bin/env python3
import predict
import sklearn.metrics
import argparse, sys
import os
import numpy as np
import glob
import re
import matplotlib.pyplot as plt
def calc_auc(predictions):
y_true =[]
y_score=[]
for line in predictions:
values= line.split(" ")
y_true.append(float(values[1]))
y_score.append(float(values[0]))
auc = sklearn.metrics.roc_auc_score(y_true,y_score)
return auc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='bootstrap(sampling with replacement) test')
parser.add_argument('-m','--model',type=str,required=True,help="Model template. Must use TESTFILE with unshuffled, unbalanced input")
parser.add_argument('-w','--weights',type=str,required=True,help="Model weights (.caffemodel)")
parser.add_argument('-i','--input',type=str,required=True,help="Input .types file to predict")
parser.add_argument('-g','--gpu',type=int,help='Specify GPU to run on',default=-1)
parser.add_argument('-o','--output',type=str,default='',help='Output file name,default= predict_[model]_[input]')
parser.add_argument('--iterations',type=int,default=1000,help="number of times to bootstrap")
parser.add_argument('-k','--keep',action='store_true',default=False,help="Don't delete prototxt files")
parser.add_argument('-n', '--number',action='store_true',default=False,help="if true uses caffemodel/input as is. if false uses all folds")
parser.add_argument('--max_score',action='store_true',default=False,help="take max score per ligand as its score")
parser.add_argument('--notcalc_predictions', type=str, default='',help='file of predictions')
args = parser.parse_args()
if args.output == '':
output = 'bootstrap_%s_%s'%(args.model, args.input)
else:
output = args.output
outname=output
predictions=[]
if args.notcalc_predictions=='':
cm = args.weights
ts = args.input
if not args.number:
foldnum = re.search('.[0-9]_iter',cm).group()
cm=cm.replace(foldnum, '.[0-9]_iter')
foldnum = re.search('[0-9].types',ts).group()
ts=ts.replace(foldnum, '[NUMBER].types')
for caffemodel in glob.glob(cm):
testset = ts
if not args.number:
num = re.search('.[0-9]_iter',caffemodel).group()
num=re.search(r'\d+', num).group()
testset = ts.replace('[NUMBER]',num)
args.input = testset
args.weights = caffemodel
predictions.extend(predict.predict_lines(args))
elif args.notcalc_predictions != '':
for line in open(args.notcalc_predictions).readlines():
predictions.append(line)
all_aucs=[]
for _ in range(args.iterations):
sample = np.random.choice(predictions,len(predictions), replace=True)
all_aucs.append(calc_auc(sample))
mean=np.mean(all_aucs)
std_dev = np.std(all_aucs)
txt = 'mean: %.2f standard deviation: %.2f'%(mean,std_dev)
print(txt)
output = open(output, 'w')
output.writelines('%.2f\n' %auc for auc in all_aucs)
output.write(txt)
output.close()
plt.figure()
plt.boxplot(all_aucs,0,'rs',0)
plt.title('%s AUCs'%args.output, fontsize=22)
plt.xlabel('AUC(%s)'%txt, fontsize=18)
plt.savefig('%s_plot.pdf'%outname,bbox_inches='tight')
| gnina/scripts | bootstrap.py | Python | bsd-3-clause | 3,080 |
import re
from crispy_forms import layout
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import HttpResponseRedirect, Http404
from django.utils.translation import gettext_lazy
from cradmin_legacy.crispylayouts import PrimarySubmit
from cradmin_legacy.viewhelpers import formbase
from devilry.devilry_account.models import PermissionGroup
class AbstractTypeInUsersView(formbase.FormView):
users_blob_split_pattern = re.compile(r'[,;\s]+')
create_button_label = gettext_lazy('Save')
template_name = 'devilry_admin/common/abstract-type-in-users.django.html'
def dispatch(self, request, *args, **kwargs):
requestuser_devilryrole = request.cradmin_instance.get_devilryrole_for_requestuser()
if requestuser_devilryrole != PermissionGroup.GROUPTYPE_DEPARTMENTADMIN:
raise Http404()
return super(AbstractTypeInUsersView, self).dispatch(request=request, *args, **kwargs)
def get_backlink_url(self):
raise NotImplementedError()
def get_backlink_label(self):
raise NotImplementedError()
@classmethod
def split_users_blob(cls, users_blob):
"""
Split the given string of users by ``,`` and whitespace.
Returns a set.
"""
users_blob_split = cls.users_blob_split_pattern.split(users_blob)
if len(users_blob_split) == 0:
return []
if users_blob_split[0] == '':
del users_blob_split[0]
if len(users_blob_split) > 0 and users_blob_split[-1] == '':
del users_blob_split[-1]
return set(users_blob_split)
def __get_users_blob_help_text(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
return gettext_lazy('Type or paste in email addresses separated by comma (","), space or one user on each line.')
else:
return gettext_lazy('Type or paste in usernames separated by comma (","), space or one user on each line.')
def __get_users_blob_placeholder(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
return gettext_lazy('[email protected]\[email protected]')
else:
return gettext_lazy('jane\njohn')
def get_form_class(self):
users_blob_help_text = self.__get_users_blob_help_text()
class UserImportForm(forms.Form):
users_blob = forms.CharField(
widget=forms.Textarea,
required=True,
help_text=users_blob_help_text
)
def __validate_users_blob_emails(self, emails):
invalid_emails = []
for email in emails:
try:
validate_email(email)
except ValidationError:
invalid_emails.append(email)
if invalid_emails:
self.add_error(
'users_blob',
gettext_lazy('Invalid email addresses: %(emails)s') % {
'emails': ', '.join(sorted(invalid_emails))
}
)
def __validate_users_blob_usernames(self, usernames):
valid_username_pattern = re.compile(
getattr(settings, 'DEVILRY_VALID_USERNAME_PATTERN', r'^[a-z0-9]+$'))
invalid_usernames = []
for username in usernames:
if not valid_username_pattern.match(username):
invalid_usernames.append(username)
if invalid_usernames:
self.add_error(
'users_blob',
gettext_lazy('Invalid usernames: %(usernames)s') % {
'usernames': ', '.join(sorted(invalid_usernames))
}
)
def clean(self):
cleaned_data = super(UserImportForm, self).clean()
users_blob = cleaned_data.get('users_blob', None)
if users_blob:
users = AbstractTypeInUsersView.split_users_blob(users_blob)
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
self.__validate_users_blob_emails(emails=users)
else:
self.__validate_users_blob_usernames(usernames=users)
self.cleaned_users_set = users
return UserImportForm
def get_field_layout(self):
return [
layout.Div(
layout.Field('users_blob', placeholder=self.__get_users_blob_placeholder()),
css_class='cradmin-globalfields cradmin-legacy-formfield-label-sr-only')
]
def get_buttons(self):
return [
PrimarySubmit('save', self.create_button_label),
]
def get_success_url(self):
return self.request.cradmin_app.reverse_appindexurl()
def import_users_from_emails(self, emails):
raise NotImplementedError()
def import_users_from_usernames(self, usernames):
raise NotImplementedError()
def form_valid(self, form):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
self.import_users_from_emails(emails=form.cleaned_users_set)
else:
self.import_users_from_usernames(usernames=form.cleaned_users_set)
return HttpResponseRedirect(str(self.get_success_url()))
def get_context_data(self, **kwargs):
context = super(AbstractTypeInUsersView, self).get_context_data(**kwargs)
context['backlink_url'] = self.get_backlink_url()
context['backlink_label'] = self.get_backlink_label()
context['uses_email_auth_backend'] = settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND
return context
| devilry/devilry-django | devilry/devilry_admin/views/common/bulkimport_users_common.py | Python | bsd-3-clause | 5,904 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "unyt-"
cfg.versionfile_source = "unyt/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| yt-project/unyt | unyt/_version.py | Python | bsd-3-clause | 18,446 |
"""An example of a python script which can be executed by the task queue
"""
import sys
def execute():
"""Simply write the python executable
"""
sys.stdout.write(sys.executable)
if __name__ == '__main__':
execute()
| quantmind/pulsar-queue | tests/example/executable.py | Python | bsd-3-clause | 235 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
from django.template.context import Context
from django.template.loader import get_template_from_string
from django.utils import timezone
import swapper
class BaseLogEntry(models.Model):
"""
A base class that implements the interface necessary to log an event.
The render method returns a representation of this event.
"""
event = models.ForeignKey(swapper.get_model_name('loggit', 'LogEvent'),
related_name='(classname)%s')
created_ts = models.DateTimeField(default=timezone.now)
def render(self, **kwargs):
"""
A method to render this entry. This can be overridden to provide custom
behavior. For example, if you want the entry to return a different
rendering based on who was viewing it, or when it is being viewed.
This default rendering just returns the event's rendering.
"""
return self.event.render(self, **kwargs)
class Meta:
abstract = True
class ActorMixin(models.Model):
actor = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="(classname)%s")
class Meta:
abstract = True
class LogEntry(BaseLogEntry, ActorMixin):
class Meta(BaseLogEntry.Meta):
swappable = swapper.swappable_setting('loggit', 'LogEntry')
class BaseLogEvent(models.Model):
"""
The base class that defines some event happening.
"""
name = models.CharField(max_length=255)
class Meta:
abstract = True
def __unicode__(self):
return self.name
def get_context(self, **kwargs):
return Context(kwargs)
def render(self, entry, **kwargs):
"""
Render method for a log event. This base method just returns the name.
"""
return unicode(self.name)
class TemplateLogEvent(BaseLogEvent):
"""
A subclass of BaseLogEvent that renders a template using django's
templating engine. The current entry is added to the context that is passed
to the template.
"""
template = models.TextField()
class Meta:
abstract = True
def render(self, entry, **kwargs):
kwargs['entry'] = entry
context = self.get_context(**kwargs)
return get_template_from_string(self.template).render(context)
class LogEvent(TemplateLogEvent):
class Meta(TemplateLogEvent.Meta):
swappable = swapper.swappable_setting('loggit', 'LogEvent')
# Optional models using django-genericm2m that allow attaching of objects to
# a log event
try:
from collections import defaultdict
from genericm2m.models import RelatedObjectsDescriptor
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
class RelatedObject(models.Model):
log_entry = models.ForeignKey(swapper.get_model_name('loggit', 'LogEntry'),
related_name='log_entries')
# ACTUAL RELATED OBJECT:
content_type = models.ForeignKey(ContentType, related_name="related_%(class)s")
object_id = models.IntegerField(db_index=True)
object = GenericForeignKey(fk_field="object_id")
label = models.CharField(max_length=255)
class M2MLogEntryMixin(models.Model):
related = RelatedObjectsDescriptor(RelatedObject, 'log_entry', 'object')
class Meta:
abstract = True
class M2MLogEventMixin(object):
def get_context(self, **kwargs):
entry = kwargs.pop('entry')
models_context = defaultdict(list)
for relation in entry.related.order_by('label'):
models_context[relation.label].append(relation.object)
return super(M2MLogEventMixin, self).get_context(**models_context)
except ImportError:
pass
| jpulec/django-loggit | loggit/models.py | Python | bsd-3-clause | 3,864 |
# Copyright The IETF Trust 2008, All Rights Reserved
from django.conf.urls.defaults import patterns, include
from ietf.wginfo import views, edit, milestones
from django.views.generic.simple import redirect_to
urlpatterns = patterns('',
(r'^$', views.wg_dir),
(r'^summary.txt', redirect_to, { 'url':'/wg/1wg-summary.txt' }),
(r'^summary-by-area.txt', redirect_to, { 'url':'/wg/1wg-summary.txt' }),
(r'^summary-by-acronym.txt', redirect_to, { 'url':'/wg/1wg-summary-by-acronym.txt' }),
(r'^1wg-summary.txt', views.wg_summary_area),
(r'^1wg-summary-by-acronym.txt', views.wg_summary_acronym),
(r'^1wg-charters.txt', views.wg_charters),
(r'^1wg-charters-by-acronym.txt', views.wg_charters_by_acronym),
(r'^chartering/$', views.chartering_wgs),
(r'^bofs/$', views.bofs),
(r'^chartering/create/$', edit.edit, {'action': "charter"}, "wg_create"),
(r'^bofs/create/$', edit.edit, {'action': "create"}, "bof_create"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/documents/txt/$', views.wg_documents_txt),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/$', views.wg_documents_html, None, "wg_docs"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/charter/$', views.wg_charter, None, 'wg_charter'),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/init-charter/', edit.submit_initial_charter, None, "wg_init_charter"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/history/$', views.history),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/edit/$', edit.edit, {'action': "edit"}, "wg_edit"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/conclude/$', edit.conclude, None, "wg_conclude"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/$', milestones.edit_milestones, {'milestone_set': "current"}, "wg_edit_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/charter/$', milestones.edit_milestones, {'milestone_set': "charter"}, "wg_edit_charter_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/charter/reset/$', milestones.reset_charter_milestones, None, "wg_reset_charter_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/ajax/searchdocs/$', milestones.ajax_search_docs, None, "wg_ajax_search_docs"),
(r'^(?P<acronym>[^/]+)/management/', include('ietf.wgchairs.urls')),
)
| mcr/ietfdb | ietf/wginfo/urls.py | Python | bsd-3-clause | 2,177 |
# -*-coding:Utf-8 -*
# Copyright (c) 2012 NOEL-BARON Léo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'recettes' et ses sous-commandes.
Dans ce fichier se trouve la commande même.
"""
from primaires.interpreteur.commande.commande import Commande
from .editer import PrmEditer
from .lister import PrmLister
from .supprimer import PrmSupprimer
class CmdRecettes(Commande):
"""Commande 'recettes'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "recettes", "recipes")
self.groupe = "administrateur"
self.aide_courte = "manipulation des recettes"
self.aide_longue = \
""
def ajouter_parametres(self):
"""Ajout des paramètres"""
self.ajouter_parametre(PrmEditer())
self.ajouter_parametre(PrmLister())
self.ajouter_parametre(PrmSupprimer())
| stormi/tsunami | src/secondaires/cuisine/commandes/recettes/__init__.py | Python | bsd-3-clause | 2,392 |
# Helper for the mirror on GAE
# GAE GETs an action gae_file, giving GAE host and a secret
# PyPI GETs /mkupload/secret, learning path and upload session
# PyPI POSTs to upload session
import urllib2, httplib, threading, os, binascii, urlparse
POST="""\
--%(boundary)s
Content-Disposition: form-data; name="secret"
%(secret)s
--%(boundary)s
Content-Disposition: form-data; name="path"
%(path)s
--%(boundary)s
Content-Disposition: form-data; name="file"; filename="%(path)s"
Content-Type: application/octet-stream
%(data)s
--%(boundary)s
"""
POST = "\r\n".join(POST.splitlines())+"\r\n"
def doit(host, secret, srcdir):
x = urllib2.urlopen('http://%s/mkupload/%s' % (host, secret))
if x.code != 200:
return
path,url = x.read().splitlines()
host, session = urlparse.urlsplit(url)[1:3]
try:
data = open(srcdir+"/"+path).read()
except IOError, e:
return
boundary = ""
while boundary in data:
boundary = binascii.hexlify(os.urandom(10))
body = POST % locals()
if ':' in host:
host, port = host.split(':')
else:
port = 80
c = httplib.HTTPConnection(host, port)
c.request('POST', session,
headers = {'Content-type':'multipart/form-data; boundary='+boundary,
'Content-length':str(len(body)),
'Host':host},
body=body)
resp = c.getresponse()
data = resp.read()
# result code should be redirect
c.close()
def transfer(host, secret, srcdir):
secret = secret.encode('ascii')
t = threading.Thread(target=doit, args=(host, secret, srcdir))
t.start()
| ericholscher/pypi | gae.py | Python | bsd-3-clause | 1,646 |
from __future__ import unicode_literals
import time
from django.conf import settings
from django.test import TestCase
from django.test.client import FakePayload, Client
from django.utils.encoding import force_text
from tastefulpy.serializers import Serializer
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestApiClient(object):
def __init__(self, serializer=None):
"""
Sets up a fresh ``TestApiClient`` instance.
If you are employing a custom serializer, you can pass the class to the
``serializer=`` kwarg.
"""
self.client = Client()
self.serializer = serializer
if not self.serializer:
self.serializer = Serializer()
def get_content_type(self, short_format):
"""
Given a short name (such as ``json`` or ``xml``), returns the full content-type
for it (``application/json`` or ``application/xml`` in this case).
"""
return self.serializer.content_types.get(short_format, 'json')
def get(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``GET`` request to the provided URI.
Optionally accepts a ``data`` kwarg, which in the case of ``GET``, lets you
send along ``GET`` parameters. This is useful when testing filtering or other
things that read off the ``GET`` params. Example::
from tastefulpy.test import TestApiClient
client = TestApiClient()
response = client.get('/api/v1/entry/1/', data={'format': 'json', 'title__startswith': 'a', 'limit': 20, 'offset': 60})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['HTTP_ACCEPT'] = content_type
# GET & DELETE are the only times we don't serialize the data.
if data is not None:
kwargs['data'] = data
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.get(uri, **kwargs)
def post(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``POST`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``POST`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastefulpy.test import TestApiClient
client = TestApiClient()
response = client.post('/api/v1/entry/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.post(uri, **kwargs)
def put(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``PUT`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``PUT`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastefulpy.test import TestApiClient
client = TestApiClient()
response = client.put('/api/v1/entry/1/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.put(uri, **kwargs)
def patch(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``PATCH`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``PATCH`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastefulpy.test import TestApiClient
client = TestApiClient()
response = client.patch('/api/v1/entry/1/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
# This hurts because Django doesn't support PATCH natively.
parsed = urlparse(uri)
r = {
'CONTENT_LENGTH': len(kwargs['data']),
'CONTENT_TYPE': content_type,
'PATH_INFO': self.client._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PATCH',
'wsgi.input': FakePayload(kwargs['data']),
}
r.update(kwargs)
return self.client.request(**r)
def delete(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``DELETE`` request to the provided URI.
Optionally accepts a ``data`` kwarg, which in the case of ``DELETE``, lets you
send along ``DELETE`` parameters. This is useful when testing filtering or other
things that read off the ``DELETE`` params. Example::
from tastefulpy.test import TestApiClient
client = TestApiClient()
response = client.delete('/api/v1/entry/1/', data={'format': 'json'})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
# GET & DELETE are the only times we don't serialize the data.
if data is not None:
kwargs['data'] = data
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.delete(uri, **kwargs)
class ResourceTestCase(TestCase):
"""
A useful base class for the start of testing Tastefulpy APIs.
"""
def setUp(self):
super(ResourceTestCase, self).setUp()
self.serializer = Serializer()
self.api_client = TestApiClient()
def get_credentials(self):
"""
A convenience method for the user as a way to shorten up the
often repetitious calls to create the same authentication.
Raises ``NotImplementedError`` by default.
Usage::
class MyResourceTestCase(ResourceTestCase):
def get_credentials(self):
return self.create_basic('daniel', 'pass')
# Then the usual tests...
"""
raise NotImplementedError("You must return the class for your Resource to test.")
def create_basic(self, username, password):
"""
Creates & returns the HTTP ``Authorization`` header for use with BASIC
Auth.
"""
import base64
return 'Basic %s' % base64.b64encode(':'.join([username, password]).encode('utf-8')).decode('utf-8')
def create_apikey(self, username, api_key):
"""
Creates & returns the HTTP ``Authorization`` header for use with
``ApiKeyAuthentication``.
"""
return 'ApiKey %s:%s' % (username, api_key)
def create_digest(self, username, api_key, method, uri):
"""
Creates & returns the HTTP ``Authorization`` header for use with Digest
Auth.
"""
from tastefulpy.authentication import hmac, sha1, uuid, python_digest
new_uuid = uuid.uuid4()
opaque = hmac.new(str(new_uuid).encode('utf-8'), digestmod=sha1).hexdigest().decode('utf-8')
return python_digest.build_authorization_request(
username,
method.upper(),
uri,
1, # nonce_count
digest_challenge=python_digest.build_digest_challenge(time.time(), getattr(settings, 'SECRET_KEY', ''), 'django-tastefulpy', opaque, False),
password=api_key
)
def create_oauth(self, user):
"""
Creates & returns the HTTP ``Authorization`` header for use with Oauth.
"""
from oauth_provider.models import Consumer, Token, Resource
# Necessary setup for ``oauth_provider``.
resource, _ = Resource.objects.get_or_create(url='test', defaults={
'name': 'Test Resource'
})
consumer, _ = Consumer.objects.get_or_create(key='123', defaults={
'name': 'Test',
'description': 'Testing...'
})
token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={
'consumer': consumer,
'resource': resource,
'secret': '',
'user': user,
})
# Then generate the header.
oauth_data = {
'oauth_consumer_key': '123',
'oauth_nonce': 'abc',
'oauth_signature': '&',
'oauth_signature_method': 'PLAINTEXT',
'oauth_timestamp': str(int(time.time())),
'oauth_token': 'foo',
}
return 'OAuth %s' % ','.join([key+'='+value for key, value in oauth_data.items()])
def assertHttpOK(self, resp):
"""
Ensures the response is returning a HTTP 200.
"""
return self.assertEqual(resp.status_code, 200)
def assertHttpCreated(self, resp):
"""
Ensures the response is returning a HTTP 201.
"""
return self.assertEqual(resp.status_code, 201)
def assertHttpAccepted(self, resp):
"""
Ensures the response is returning either a HTTP 202 or a HTTP 204.
"""
return self.assertIn(resp.status_code, [202, 204])
def assertHttpMultipleChoices(self, resp):
"""
Ensures the response is returning a HTTP 300.
"""
return self.assertEqual(resp.status_code, 300)
def assertHttpSeeOther(self, resp):
"""
Ensures the response is returning a HTTP 303.
"""
return self.assertEqual(resp.status_code, 303)
def assertHttpNotModified(self, resp):
"""
Ensures the response is returning a HTTP 304.
"""
return self.assertEqual(resp.status_code, 304)
def assertHttpBadRequest(self, resp):
"""
Ensures the response is returning a HTTP 400.
"""
return self.assertEqual(resp.status_code, 400)
def assertHttpUnauthorized(self, resp):
"""
Ensures the response is returning a HTTP 401.
"""
return self.assertEqual(resp.status_code, 401)
def assertHttpForbidden(self, resp):
"""
Ensures the response is returning a HTTP 403.
"""
return self.assertEqual(resp.status_code, 403)
def assertHttpNotFound(self, resp):
"""
Ensures the response is returning a HTTP 404.
"""
return self.assertEqual(resp.status_code, 404)
def assertHttpMethodNotAllowed(self, resp):
"""
Ensures the response is returning a HTTP 405.
"""
return self.assertEqual(resp.status_code, 405)
def assertHttpConflict(self, resp):
"""
Ensures the response is returning a HTTP 409.
"""
return self.assertEqual(resp.status_code, 409)
def assertHttpGone(self, resp):
"""
Ensures the response is returning a HTTP 410.
"""
return self.assertEqual(resp.status_code, 410)
def assertHttpUnprocessableEntity(self, resp):
"""
Ensures the response is returning a HTTP 422.
"""
return self.assertEqual(resp.status_code, 422)
def assertHttpTooManyRequests(self, resp):
"""
Ensures the response is returning a HTTP 429.
"""
return self.assertEqual(resp.status_code, 429)
def assertHttpApplicationError(self, resp):
"""
Ensures the response is returning a HTTP 500.
"""
return self.assertEqual(resp.status_code, 500)
def assertHttpNotImplemented(self, resp):
"""
Ensures the response is returning a HTTP 501.
"""
return self.assertEqual(resp.status_code, 501)
def assertValidJSON(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid JSON &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_json(data)
def assertValidXML(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid XML &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_xml(data)
def assertValidYAML(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid YAML &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_yaml(data)
def assertValidPlist(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid
binary plist & can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_plist(data)
def assertValidJSONResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/json``)
* The content is valid JSON
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/json'))
self.assertValidJSON(force_text(resp.content))
def assertValidXMLResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/xml``)
* The content is valid XML
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/xml'))
self.assertValidXML(force_text(resp.content))
def assertValidYAMLResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``text/yaml``)
* The content is valid YAML
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('text/yaml'))
self.assertValidYAML(force_text(resp.content))
def assertValidPlistResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/x-plist``)
* The content is valid binary plist data
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/x-plist'))
self.assertValidPlist(force_text(resp.content))
def deserialize(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, this method
checks the ``Content-Type`` header & attempts to deserialize the data based on
that.
It returns a Python datastructure (typically a ``dict``) of the serialized data.
"""
return self.serializer.deserialize(resp.content, format=resp['Content-Type'])
def serialize(self, data, format='application/json'):
"""
Given a Python datastructure (typically a ``dict``) & a desired content-type,
this method will return a serialized string of that data.
"""
return self.serializer.serialize(data, format=format)
def assertKeys(self, data, expected):
"""
This method ensures that the keys of the ``data`` match up to the keys of
``expected``.
It covers the (extremely) common case where you want to make sure the keys of
a response match up to what is expected. This is typically less fragile than
testing the full structure, which can be prone to data changes.
"""
self.assertEqual(sorted(data.keys()), sorted(expected))
| mjschultz/django-tastefulpy | tastefulpy/test.py | Python | bsd-3-clause | 18,927 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0212
import fcntl
import inspect
import logging
import os
import psutil
import textwrap
from devil import base_error
from devil import devil_env
from devil.android import device_errors
from devil.android.constants import file_system
from devil.android.sdk import adb_wrapper
from devil.android.valgrind_tools import base_tool
from devil.utils import cmd_helper
logger = logging.getLogger(__name__)
# If passed as the device port, this will tell the forwarder to allocate
# a dynamic port on the device. The actual port can then be retrieved with
# Forwarder.DevicePortForHostPort.
DYNAMIC_DEVICE_PORT = 0
def _GetProcessStartTime(pid):
p = psutil.Process(pid)
if inspect.ismethod(p.create_time):
return p.create_time()
else: # Process.create_time is a property in old versions of psutil.
return p.create_time
def _DumpHostLog():
# The host forwarder daemon logs to /tmp/host_forwarder_log, so print the end
# of that.
try:
with open('/tmp/host_forwarder_log') as host_forwarder_log:
logger.info('Last 50 lines of the host forwarder daemon log:')
for line in host_forwarder_log.read().splitlines()[-50:]:
logger.info(' %s', line)
except Exception: # pylint: disable=broad-except
# Grabbing the host forwarder log is best-effort. Ignore all errors.
logger.warning('Failed to get the contents of host_forwarder_log.')
def _LogMapFailureDiagnostics(device):
_DumpHostLog()
# The device forwarder daemon logs to the logcat, so print the end of that.
try:
logger.info('Last 50 lines of logcat:')
for logcat_line in device.adb.Logcat(dump=True)[-50:]:
logger.info(' %s', logcat_line)
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
# Grabbing the device forwarder log is also best-effort. Ignore all errors.
logger.warning('Failed to get the contents of the logcat.')
# Log alive device forwarders.
try:
ps_out = device.RunShellCommand(['ps'], check_return=True)
logger.info('Currently running device_forwarders:')
for line in ps_out:
if 'device_forwarder' in line:
logger.info(' %s', line)
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.warning('Failed to list currently running device_forwarder '
'instances.')
class _FileLock(object):
"""With statement-aware implementation of a file lock.
File locks are needed for cross-process synchronization when the
multiprocessing Python module is used.
"""
def __init__(self, path):
self._fd = -1
self._path = path
def __enter__(self):
self._fd = os.open(self._path, os.O_RDONLY | os.O_CREAT)
if self._fd < 0:
raise Exception('Could not open file %s for reading' % self._path)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def __exit__(self, _exception_type, _exception_value, traceback):
fcntl.flock(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
class HostForwarderError(base_error.BaseError):
"""Exception for failures involving host_forwarder."""
def __init__(self, message):
super(HostForwarderError, self).__init__(message)
class Forwarder(object):
"""Thread-safe class to manage port forwards from the device to the host."""
_DEVICE_FORWARDER_FOLDER = (file_system.TEST_EXECUTABLE_DIR + '/forwarder/')
_DEVICE_FORWARDER_PATH = (
file_system.TEST_EXECUTABLE_DIR + '/forwarder/device_forwarder')
_LOCK_PATH = '/tmp/chrome.forwarder.lock'
# Defined in host_forwarder_main.cc
_HOST_FORWARDER_LOG = '/tmp/host_forwarder_log'
_TIMEOUT = 60 # seconds
_instance = None
@staticmethod
def Map(port_pairs, device, tool=None):
"""Runs the forwarder.
Args:
port_pairs: A list of tuples (device_port, host_port) to forward. Note
that you can specify 0 as a device_port, in which case a
port will by dynamically assigned on the device. You can
get the number of the assigned port using the
DevicePortForHostPort method.
device: A DeviceUtils instance.
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
Raises:
Exception on failure to forward the port.
"""
if not tool:
tool = base_tool.BaseTool()
with _FileLock(Forwarder._LOCK_PATH):
instance = Forwarder._GetInstanceLocked(tool)
instance._InitDeviceLocked(device, tool)
device_serial = str(device)
map_arg_lists = [[
'--adb=' + adb_wrapper.AdbWrapper.GetAdbPath(),
'--serial-id=' + device_serial, '--map',
str(device_port),
str(host_port)
] for device_port, host_port in port_pairs]
logger.info('Forwarding using commands: %s', map_arg_lists)
for map_arg_list in map_arg_lists:
try:
map_cmd = [instance._host_forwarder_path] + map_arg_list
(exit_code, output) = cmd_helper.GetCmdStatusAndOutputWithTimeout(
map_cmd, Forwarder._TIMEOUT)
except cmd_helper.TimeoutError as e:
raise HostForwarderError(
'`%s` timed out:\n%s' % (' '.join(map_cmd), e.output))
except OSError as e:
if e.errno == 2:
raise HostForwarderError('Unable to start host forwarder. '
'Make sure you have built host_forwarder.')
else:
raise
if exit_code != 0:
try:
instance._KillDeviceLocked(device, tool)
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
# We don't want the failure to kill the device forwarder to
# supersede the original failure to map.
logger.warning(
'Failed to kill the device forwarder after map failure: %s',
str(e))
_LogMapFailureDiagnostics(device)
formatted_output = ('\n'.join(output)
if isinstance(output, list) else output)
raise HostForwarderError(
'`%s` exited with %d:\n%s' % (' '.join(map_cmd), exit_code,
formatted_output))
tokens = output.split(':')
if len(tokens) != 2:
raise HostForwarderError('Unexpected host forwarder output "%s", '
'expected "device_port:host_port"' % output)
device_port = int(tokens[0])
host_port = int(tokens[1])
serial_with_port = (device_serial, device_port)
instance._device_to_host_port_map[serial_with_port] = host_port
instance._host_to_device_port_map[host_port] = serial_with_port
logger.info('Forwarding device port: %d to host port: %d.', device_port,
host_port)
@staticmethod
def UnmapDevicePort(device_port, device):
"""Unmaps a previously forwarded device port.
Args:
device: A DeviceUtils instance.
device_port: A previously forwarded port (through Map()).
"""
with _FileLock(Forwarder._LOCK_PATH):
Forwarder._UnmapDevicePortLocked(device_port, device)
@staticmethod
def UnmapAllDevicePorts(device):
"""Unmaps all the previously forwarded ports for the provided device.
Args:
device: A DeviceUtils instance.
port_pairs: A list of tuples (device_port, host_port) to unmap.
"""
with _FileLock(Forwarder._LOCK_PATH):
instance = Forwarder._GetInstanceLocked(None)
unmap_all_cmd = [
instance._host_forwarder_path,
'--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(),
'--serial-id=%s' % device.serial, '--unmap-all'
]
try:
exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
unmap_all_cmd, Forwarder._TIMEOUT)
except cmd_helper.TimeoutError as e:
raise HostForwarderError(
'`%s` timed out:\n%s' % (' '.join(unmap_all_cmd), e.output))
if exit_code != 0:
error_msg = [
'`%s` exited with %d' % (' '.join(unmap_all_cmd), exit_code)
]
if isinstance(output, list):
error_msg += output
else:
error_msg += [output]
raise HostForwarderError('\n'.join(error_msg))
# Clean out any entries from the device & host map.
device_map = instance._device_to_host_port_map
host_map = instance._host_to_device_port_map
for device_serial_and_port, host_port in device_map.items():
device_serial = device_serial_and_port[0]
if device_serial == device.serial:
del device_map[device_serial_and_port]
del host_map[host_port]
# Kill the device forwarder.
tool = base_tool.BaseTool()
instance._KillDeviceLocked(device, tool)
@staticmethod
def DevicePortForHostPort(host_port):
"""Returns the device port that corresponds to a given host port."""
with _FileLock(Forwarder._LOCK_PATH):
serial_and_port = Forwarder._GetInstanceLocked(
None)._host_to_device_port_map.get(host_port)
return serial_and_port[1] if serial_and_port else None
@staticmethod
def RemoveHostLog():
if os.path.exists(Forwarder._HOST_FORWARDER_LOG):
os.unlink(Forwarder._HOST_FORWARDER_LOG)
@staticmethod
def GetHostLog():
if not os.path.exists(Forwarder._HOST_FORWARDER_LOG):
return ''
with file(Forwarder._HOST_FORWARDER_LOG, 'r') as f:
return f.read()
@staticmethod
def _GetInstanceLocked(tool):
"""Returns the singleton instance.
Note that the global lock must be acquired before calling this method.
Args:
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
if not Forwarder._instance:
Forwarder._instance = Forwarder(tool)
return Forwarder._instance
def __init__(self, tool):
"""Constructs a new instance of Forwarder.
Note that Forwarder is a singleton therefore this constructor should be
called only once.
Args:
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
assert not Forwarder._instance
self._tool = tool
self._initialized_devices = set()
self._device_to_host_port_map = dict()
self._host_to_device_port_map = dict()
self._host_forwarder_path = devil_env.config.FetchPath('forwarder_host')
assert os.path.exists(self._host_forwarder_path), 'Please build forwarder2'
self._InitHostLocked()
@staticmethod
def _UnmapDevicePortLocked(device_port, device):
"""Internal method used by UnmapDevicePort().
Note that the global lock must be acquired before calling this method.
"""
instance = Forwarder._GetInstanceLocked(None)
serial = str(device)
serial_with_port = (serial, device_port)
if serial_with_port not in instance._device_to_host_port_map:
logger.error('Trying to unmap non-forwarded port %d', device_port)
return
host_port = instance._device_to_host_port_map[serial_with_port]
del instance._device_to_host_port_map[serial_with_port]
del instance._host_to_device_port_map[host_port]
unmap_cmd = [
instance._host_forwarder_path,
'--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(),
'--serial-id=%s' % serial, '--unmap',
str(device_port)
]
try:
(exit_code, output) = cmd_helper.GetCmdStatusAndOutputWithTimeout(
unmap_cmd, Forwarder._TIMEOUT)
except cmd_helper.TimeoutError as e:
raise HostForwarderError(
'`%s` timed out:\n%s' % (' '.join(unmap_cmd), e.output))
if exit_code != 0:
logger.error('`%s` exited with %d:\n%s', ' '.join(unmap_cmd), exit_code,
'\n'.join(output) if isinstance(output, list) else output)
@staticmethod
def _GetPidForLock():
"""Returns the PID used for host_forwarder initialization.
The PID of the "sharder" is used to handle multiprocessing. The "sharder"
is the initial process that forks that is the parent process.
"""
return os.getpgrp()
def _InitHostLocked(self):
"""Initializes the host forwarder daemon.
Note that the global lock must be acquired before calling this method. This
method kills any existing host_forwarder process that could be stale.
"""
# See if the host_forwarder daemon was already initialized by a concurrent
# process or thread (in case multi-process sharding is not used).
# TODO(crbug.com/762005): Consider using a different implemention; relying
# on matching the string represantion of the process start time seems
# fragile.
pid_for_lock = Forwarder._GetPidForLock()
fd = os.open(Forwarder._LOCK_PATH, os.O_RDWR | os.O_CREAT)
with os.fdopen(fd, 'r+') as pid_file:
pid_with_start_time = pid_file.readline()
if pid_with_start_time:
(pid, process_start_time) = pid_with_start_time.split(':')
if pid == str(pid_for_lock):
if process_start_time == str(_GetProcessStartTime(pid_for_lock)):
return
self._KillHostLocked()
pid_file.seek(0)
pid_file.write(
'%s:%s' % (pid_for_lock, str(_GetProcessStartTime(pid_for_lock))))
pid_file.truncate()
def _InitDeviceLocked(self, device, tool):
"""Initializes the device_forwarder daemon for a specific device (once).
Note that the global lock must be acquired before calling this method. This
method kills any existing device_forwarder daemon on the device that could
be stale, pushes the latest version of the daemon (to the device) and starts
it.
Args:
device: A DeviceUtils instance.
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
device_serial = str(device)
if device_serial in self._initialized_devices:
return
try:
self._KillDeviceLocked(device, tool)
except device_errors.CommandFailedError:
logger.warning('Failed to kill device forwarder. Rebooting.')
device.Reboot()
forwarder_device_path_on_host = devil_env.config.FetchPath(
'forwarder_device', device=device)
forwarder_device_path_on_device = (
Forwarder._DEVICE_FORWARDER_FOLDER
if os.path.isdir(forwarder_device_path_on_host) else
Forwarder._DEVICE_FORWARDER_PATH)
device.PushChangedFiles([(forwarder_device_path_on_host,
forwarder_device_path_on_device)])
cmd = [Forwarder._DEVICE_FORWARDER_PATH]
wrapper = tool.GetUtilWrapper()
if wrapper:
cmd.insert(0, wrapper)
device.RunShellCommand(
cmd,
env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER},
check_return=True)
self._initialized_devices.add(device_serial)
@staticmethod
def KillHost():
"""Kills the forwarder process running on the host."""
with _FileLock(Forwarder._LOCK_PATH):
Forwarder._GetInstanceLocked(None)._KillHostLocked()
def _KillHostLocked(self):
"""Kills the forwarder process running on the host.
Note that the global lock must be acquired before calling this method.
"""
logger.info('Killing host_forwarder.')
try:
kill_cmd = [self._host_forwarder_path, '--kill-server']
(exit_code, output) = cmd_helper.GetCmdStatusAndOutputWithTimeout(
kill_cmd, Forwarder._TIMEOUT)
if exit_code != 0:
logger.warning('Forwarder unable to shut down:\n%s', output)
kill_cmd = ['pkill', '-9', 'host_forwarder']
(exit_code, output) = cmd_helper.GetCmdStatusAndOutputWithTimeout(
kill_cmd, Forwarder._TIMEOUT)
if exit_code in (0, 1):
# pkill exits with a 0 if it was able to signal at least one process.
# pkill exits with a 1 if it wasn't able to singal a process because
# no matching process existed. We're ok with either.
return
_, ps_output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
['ps', 'aux'], Forwarder._TIMEOUT)
host_forwarder_lines = [line for line in ps_output.splitlines()
if 'host_forwarder' in line]
if host_forwarder_lines:
logger.error('Remaining host_forwarder processes:\n %s',
'\n '.join(host_forwarder_lines))
else:
logger.error('No remaining host_forwarder processes?')
_DumpHostLog()
error_msg = textwrap.dedent("""\
`{kill_cmd}` failed to kill host_forwarder.
exit_code: {exit_code}
output:
{output}
""")
raise HostForwarderError(
error_msg.format(
kill_cmd=' '.join(kill_cmd), exit_code=str(exit_code),
output='\n'.join(' %s' % l for l in output.splitlines())))
except cmd_helper.TimeoutError as e:
raise HostForwarderError(
'`%s` timed out:\n%s' % (' '.join(kill_cmd), e.output))
@staticmethod
def KillDevice(device, tool=None):
"""Kills the forwarder process running on the device.
Args:
device: Instance of DeviceUtils for talking to the device.
tool: Wrapper tool (e.g. valgrind) that can be used to execute the device
forwarder (see valgrind_tools.py).
"""
with _FileLock(Forwarder._LOCK_PATH):
Forwarder._GetInstanceLocked(None)._KillDeviceLocked(
device, tool or base_tool.BaseTool())
def _KillDeviceLocked(self, device, tool):
"""Kills the forwarder process running on the device.
Note that the global lock must be acquired before calling this method.
Args:
device: Instance of DeviceUtils for talking to the device.
tool: Wrapper tool (e.g. valgrind) that can be used to execute the device
forwarder (see valgrind_tools.py).
"""
logger.info('Killing device_forwarder.')
self._initialized_devices.discard(device.serial)
if not device.FileExists(Forwarder._DEVICE_FORWARDER_PATH):
return
cmd = [Forwarder._DEVICE_FORWARDER_PATH, '--kill-server']
wrapper = tool.GetUtilWrapper()
if wrapper:
cmd.insert(0, wrapper)
device.RunShellCommand(
cmd,
env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER},
check_return=True)
| endlessm/chromium-browser | third_party/catapult/devil/devil/android/forwarder.py | Python | bsd-3-clause | 18,608 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(1, 'default_100_percent'), (2, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
assert data[:8] == '\x89PNG\r\n\x1A\n' and data[12:16] == 'IHDR'
return struct.unpack('>ii', data[16:24])
# TODO(flackr): This should allow some flexibility for non-integer scale
# factors such as allowing any size between the floor and ceiling of
# base * scale.
def ExpectedSize(base_width, base_height, scale):
return round(base_width * scale), round(base_height * scale)
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
base_width, base_height = ImageSize(base_image)
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
exp_width, exp_height = ExpectedSize(base_width, base_height,
self.paths[i][0])
width, height = ImageSize(image_path)
if width != exp_width or height != exp_height:
results.append(self.output_api.PresubmitError(
'Image %s is %dx%d, expected to be %dx%d' % (
self.input_api.os_path.join(repository_path, image_path),
width, height, exp_width, exp_height)))
return results
| keishi/chromium | ui/resources/resource_check/resource_scale_factors.py | Python | bsd-3-clause | 3,857 |
#!/usr/bin/env python
"""
lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
from __future__ import absolute_import
import math, os, platform, random, re, sys, time
import lit.ProgressBar
import lit.LitConfig
import lit.Test
import lit.run
import lit.util
import lit.discovery
class TestingProgressDisplay(object):
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.current = None
self.progressBar = progressBar
self.completed = 0
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.quiet:
pass
elif self.opts.succinct:
sys.stdout.write('\n')
def update(self, test):
self.completed += 1
if self.opts.incremental:
update_incremental_cache(test)
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
shouldShow = test.result.code.isFailure or \
self.opts.showAllOutput or \
(not self.opts.quiet and not self.opts.succinct)
if not shouldShow:
return
if self.progressBar:
self.progressBar.clear()
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
# Show the test failure output, if requested.
if (test.result.code.isFailure and self.opts.showOutput) or \
self.opts.showAllOutput:
if test.result.code.isFailure:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Ensure the output is flushed.
sys.stdout.flush()
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
tests_data.append(test_data)
# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
def update_incremental_cache(test):
if not test.result.code.isFailure:
return
fname = test.getFilePath()
os.utime(fname, None)
def sort_by_incremental_cache(run):
def sortIndex(test):
fname = test.getFilePath()
try:
return -os.path.getmtime(fname)
except:
return 0
run.tests.sort(key = lambda t: sortIndex(t))
def main(builtinParameters = {}):
# Use processes by default on Unix platforms.
isWindows = platform.system() == 'Windows'
useProcessesIsDefault = not isWindows
global options
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {file-or-path}")
parser.add_option("", "--version", dest="show_version",
help="Show version and exit",
action="store_true", default=False)
parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
help="Number of testing threads",
type=int, action="store", default=None)
parser.add_option("", "--config-prefix", dest="configPrefix",
metavar="NAME", help="Prefix for 'lit' config files",
action="store", default=None)
parser.add_option("-D", "--param", dest="userParameters",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
type=str, action="append", default=[])
group = OptionGroup(parser, "Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
group.add_option("-q", "--quiet", dest="quiet",
help="Suppress no error output",
action="store_true", default=False)
group.add_option("-s", "--succinct", dest="succinct",
help="Reduce amount of output",
action="store_true", default=False)
group.add_option("-v", "--verbose", dest="showOutput",
help="Show test output for failures",
action="store_true", default=False)
group.add_option("-a", "--show-all", dest="showAllOutput",
help="Display all commandlines and output",
action="store_true", default=False)
group.add_option("-o", "--output", dest="output_path",
help="Write test results to the provided path",
action="store", type=str, metavar="PATH")
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
group.add_option("", "--show-unsupported", dest="show_unsupported",
help="Show unsupported tests",
action="store_true", default=False)
group.add_option("", "--show-xfail", dest="show_xfail",
help="Show tests that were expected to fail",
action="store_true", default=False)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Execution")
group.add_option("", "--path", dest="path",
help="Additional paths to add to testing environment",
action="append", type=str, default=[])
group.add_option("", "--vg", dest="useValgrind",
help="Run tests under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
help="Specify an extra argument for valgrind",
type=str, action="append", default=[])
group.add_option("", "--time-tests", dest="timeTests",
help="Track elapsed wall time for each test",
action="store_true", default=False)
group.add_option("", "--no-execute", dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true", default=False)
group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
help=("Write XUnit-compatible XML test reports to the"
" specified file"), default=None)
group.add_option("", "--timeout", dest="maxIndividualTestTime",
help="Maximum time to spend running a single test (in seconds)."
"0 means no time limit. [Default: 0]",
type=int, default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Selection")
group.add_option("", "--max-tests", dest="maxTests", metavar="N",
help="Maximum number of tests to run",
action="store", type=int, default=None)
group.add_option("", "--max-time", dest="maxTime", metavar="N",
help="Maximum time to spend testing (in seconds)",
action="store", type=float, default=None)
group.add_option("", "--shuffle", dest="shuffle",
help="Run tests in random order",
action="store_true", default=False)
group.add_option("-i", "--incremental", dest="incremental",
help="Run modified and failing tests first (updates "
"mtimes)",
action="store_true", default=False)
group.add_option("", "--filter", dest="filter", metavar="REGEX",
help=("Only run tests with paths matching the given "
"regular expression"),
action="store", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Debug and Experimental Options")
group.add_option("", "--debug", dest="debug",
help="Enable debugging (for 'lit' development)",
action="store_true", default=False)
group.add_option("", "--show-suites", dest="showSuites",
help="Show discovered test suites",
action="store_true", default=False)
group.add_option("", "--show-tests", dest="showTests",
help="Show all discovered tests",
action="store_true", default=False)
group.add_option("", "--use-processes", dest="useProcesses",
help="Run tests in parallel with processes (not threads)",
action="store_true", default=useProcessesIsDefault)
group.add_option("", "--use-threads", dest="useProcesses",
help="Run tests in parallel with threads (not processes)",
action="store_false", default=useProcessesIsDefault)
parser.add_option_group(group)
(opts, args) = parser.parse_args()
if opts.show_version:
print("lit %s" % (lit.__version__,))
return
if not args:
parser.error('No inputs specified')
if opts.numThreads is None:
# Python <2.5 has a race condition causing lit to always fail with numThreads>1
# http://bugs.python.org/issue1731717
# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
# threads by default there.
if sys.hexversion >= 0x2050200:
opts.numThreads = lit.util.detectCPUs()
else:
opts.numThreads = 1
inputs = args
# Create the user defined parameters.
userParams = dict(builtinParameters)
for entry in opts.userParameters:
if '=' not in entry:
name,opc = entry,''
else:
name,opc = entry.split('=', 1)
userParams[name] = opc
# Decide what the requested maximum indvidual test time should be
if opts.maxIndividualTestTime != None:
maxIndividualTestTime = opts.maxIndividualTestTime
else:
# Default is zero
maxIndividualTestTime = 0
# Create the global config object.
litConfig = lit.LitConfig.LitConfig(
progname = os.path.basename(sys.argv[0]),
path = opts.path,
quiet = opts.quiet,
useValgrind = opts.useValgrind,
valgrindLeakCheck = opts.valgrindLeakCheck,
valgrindArgs = opts.valgrindArgs,
noExecute = opts.noExecute,
debug = opts.debug,
isWindows = isWindows,
params = userParams,
config_prefix = opts.configPrefix,
maxIndividualTestTime = maxIndividualTestTime)
# Perform test discovery.
run = lit.run.Run(litConfig,
lit.discovery.find_tests_for_inputs(litConfig, inputs))
# After test discovery the configuration might have changed
# the maxIndividualTestTime. If we explicitly set this on the
# command line then override what was set in the test configuration
if opts.maxIndividualTestTime != None:
if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime:
litConfig.note(('The test suite configuration requested an individual'
' test timeout of {0} seconds but a timeout of {1} seconds was'
' requested on the command line. Forcing timeout to be {1}'
' seconds')
.format(litConfig.maxIndividualTestTime,
opts.maxIndividualTestTime))
litConfig.maxIndividualTestTime = opts.maxIndividualTestTime
if opts.showSuites or opts.showTests:
# Aggregate the tests by suite.
suitesAndTests = {}
for result_test in run.tests:
if result_test.suite not in suitesAndTests:
suitesAndTests[result_test.suite] = []
suitesAndTests[result_test.suite].append(result_test)
suitesAndTests = list(suitesAndTests.items())
suitesAndTests.sort(key = lambda item: item[0].name)
# Show the suites, if requested.
if opts.showSuites:
print('-- Test Suites --')
for ts,ts_tests in suitesAndTests:
print(' %s - %d tests' %(ts.name, len(ts_tests)))
print(' Source Root: %s' % ts.source_root)
print(' Exec Root : %s' % ts.exec_root)
if ts.config.available_features:
print(' Available Features : %s' % ' '.join(
sorted(ts.config.available_features)))
# Show the tests, if requested.
if opts.showTests:
print('-- Available Tests --')
for ts,ts_tests in suitesAndTests:
ts_tests.sort(key = lambda test: test.path_in_suite)
for test in ts_tests:
print(' %s' % (test.getFullName(),))
# Exit.
sys.exit(0)
# Select and order the tests.
numTotalTests = len(run.tests)
# First, select based on the filter expression if given.
if opts.filter:
try:
rex = re.compile(opts.filter)
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
run.tests = [result_test for result_test in run.tests
if rex.search(result_test.getFullName())]
# Then select the order.
if opts.shuffle:
random.shuffle(run.tests)
elif opts.incremental:
sort_by_incremental_cache(run)
else:
run.tests.sort(key = lambda t: (not t.isEarlyTest(), t.getFullName()))
# Finally limit the number of tests, if desired.
if opts.maxTests is not None:
run.tests = run.tests[:opts.maxTests]
# Don't create more threads than tests.
opts.numThreads = min(len(run.tests), opts.numThreads)
# Because some tests use threads internally, and at least on Linux each
# of these threads counts toward the current process limit, try to
# raise the (soft) process limit so that tests don't fail due to
# resource exhaustion.
try:
cpus = lit.util.detectCPUs()
desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
# Import the resource module here inside this try block because it
# will likely fail on Windows.
import resource
max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
desired_limit = min(desired_limit, max_procs_hard)
if max_procs_soft < desired_limit:
resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
litConfig.note('raised the process limit from %d to %d' % \
(max_procs_soft, desired_limit))
except:
pass
extra = ''
if len(run.tests) != numTotalTests:
extra = ' of %d' % numTotalTests
header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
opts.numThreads)
progressBar = None
if not opts.quiet:
if opts.succinct and opts.useProgressBar:
try:
tc = lit.ProgressBar.TerminalController()
progressBar = lit.ProgressBar.ProgressBar(tc, header)
except ValueError:
print(header)
progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
else:
print(header)
startTime = time.time()
display = TestingProgressDisplay(opts, len(run.tests), progressBar)
try:
run.execute_tests(display, opts.numThreads, opts.maxTime,
opts.useProcesses)
except KeyboardInterrupt:
sys.exit(2)
display.finish()
testing_time = time.time() - startTime
if not opts.quiet:
print('Testing Time: %.2fs' % (testing_time,))
# Write out the test data, if requested.
if opts.output_path is not None:
write_test_results(run, litConfig, testing_time, opts.output_path)
# List test results organized by kind.
hasFailures = False
byCode = {}
for test in run.tests:
if test.result.code not in byCode:
byCode[test.result.code] = []
byCode[test.result.code].append(test)
if test.result.code.isFailure:
hasFailures = True
# Print each test in any of the failing groups.
for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
('Failing Tests', lit.Test.FAIL),
('Unresolved Tests', lit.Test.UNRESOLVED),
('Unsupported Tests', lit.Test.UNSUPPORTED),
('Expected Failing Tests', lit.Test.XFAIL),
('Timed Out Tests', lit.Test.TIMEOUT)):
if (lit.Test.XFAIL == code and not opts.show_xfail) or \
(lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
continue
elts = byCode.get(code)
if not elts:
continue
print('*'*20)
print('%s (%d):' % (title, len(elts)))
for test in elts:
print(' %s' % test.getFullName())
sys.stdout.write('\n')
if opts.timeTests and run.tests:
# Order by time.
test_times = [(test.getFullName(), test.result.elapsed)
for test in run.tests]
lit.util.printHistogram(test_times, title='Tests')
for name,code in (('Expected Passes ', lit.Test.PASS),
('Passes With Retry ', lit.Test.FLAKYPASS),
('Expected Failures ', lit.Test.XFAIL),
('Unsupported Tests ', lit.Test.UNSUPPORTED),
('Unresolved Tests ', lit.Test.UNRESOLVED),
('Unexpected Passes ', lit.Test.XPASS),
('Unexpected Failures', lit.Test.FAIL),
('Individual Timeouts', lit.Test.TIMEOUT)):
if opts.quiet and not code.isFailure:
continue
N = len(byCode.get(code,[]))
if N:
print(' %s: %d' % (name,N))
if opts.xunit_output_file:
# Collect the tests, indexed by test suite
by_suite = {}
for result_test in run.tests:
suite = result_test.suite.config.name
if suite not in by_suite:
by_suite[suite] = {
'passes' : 0,
'failures' : 0,
'tests' : [] }
by_suite[suite]['tests'].append(result_test)
if result_test.result.code.isFailure:
by_suite[suite]['failures'] += 1
else:
by_suite[suite]['passes'] += 1
xunit_output_file = open(opts.xunit_output_file, "w")
xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
xunit_output_file.write("<testsuites>\n")
for suite_name, suite in by_suite.items():
safe_suite_name = suite_name.replace(".", "-")
xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
xunit_output_file.write(" tests='" + str(suite['passes'] +
suite['failures']) + "'")
xunit_output_file.write(" failures='" + str(suite['failures']) +
"'>\n")
for result_test in suite['tests']:
xunit_output_file.write(result_test.getJUnitXML() + "\n")
xunit_output_file.write("</testsuite>\n")
xunit_output_file.write("</testsuites>")
xunit_output_file.close()
# If we encountered any additional errors, exit abnormally.
if litConfig.numErrors:
sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
sys.exit(2)
# Warn about warnings.
if litConfig.numWarnings:
sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
if hasFailures:
sys.exit(1)
sys.exit(0)
if __name__=='__main__':
main()
| JianpingZeng/xcc | xcc/java/utils/lit/lit/main.py | Python | bsd-3-clause | 21,621 |
from django.template.defaultfilters import rjust
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class RjustTests(SimpleTestCase):
@setup({'rjust01': '{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}'})
def test_rjust01(self):
output = self.engine.render_to_string('rjust01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
@setup({'rjust02': '.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.'})
def test_rjust02(self):
output = self.engine.render_to_string('rjust02', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
class FunctionTests(SimpleTestCase):
def test_rjust(self):
self.assertEqual(rjust('test', 10), ' test')
def test_less_than_string_length(self):
self.assertEqual(rjust('test', 3), 'test')
def test_non_string_input(self):
self.assertEqual(rjust(123, 4), ' 123')
| yephper/django | tests/template_tests/filter_tests/test_rjust.py | Python | bsd-3-clause | 1,060 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("LogisticRegression" , "BinaryClass_10" , "db2")
| antoinecarme/sklearn2sql_heroku | tests/classification/BinaryClass_10/ws_BinaryClass_10_LogisticRegression_db2_code_gen.py | Python | bsd-3-clause | 145 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
basic.py:
Basic table read / write functionality for simple character
delimited files with various options for column header definition.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
from __future__ import absolute_import, division, print_function
import re
from . import core
from ...extern.six.moves import zip
class BasicHeader(core.BaseHeader):
"""
Basic table Header Reader
Set a few defaults for common ascii table formats
(start at line 0, comments begin with ``#`` and possibly white space)
"""
start_line = 0
comment = r'\s*#'
write_comment = '# '
class BasicData(core.BaseData):
"""
Basic table Data Reader
Set a few defaults for common ascii table formats
(start at line 1, comments begin with ``#`` and possibly white space)
"""
start_line = 1
comment = r'\s*#'
write_comment = '# '
class Basic(core.BaseReader):
r"""
Read a character-delimited table with a single header line at the top
followed by data lines to the end of the table. Lines beginning with # as
the first non-whitespace character are comments. This reader is highly
configurable.
::
rdr = ascii.get_reader(Reader=ascii.Basic)
rdr.header.splitter.delimiter = ' '
rdr.data.splitter.delimiter = ' '
rdr.header.start_line = 0
rdr.data.start_line = 1
rdr.data.end_line = None
rdr.header.comment = r'\s*#'
rdr.data.comment = r'\s*#'
Example table::
# Column definition is the first uncommented line
# Default delimiter is the space character.
apples oranges pears
# Data starts after the header column definition, blank lines ignored
1 2 3
4 5 6
"""
_format_name = 'basic'
_description = 'Basic table with custom delimiters'
header_class = BasicHeader
data_class = BasicData
class NoHeaderHeader(BasicHeader):
"""
Reader for table header without a header
Set the start of header line number to `None`, which tells the basic
reader there is no header line.
"""
start_line = None
class NoHeaderData(BasicData):
"""
Reader for table data without a header
Data starts at first uncommented line since there is no header line.
"""
start_line = 0
class NoHeader(Basic):
"""
Read a table with no header line. Columns are autonamed using
header.auto_format which defaults to "col%d". Otherwise this reader
the same as the :class:`Basic` class from which it is derived. Example::
# Table data
1 2 "hello there"
3 4 world
"""
_format_name = 'no_header'
_description = 'Basic table with no headers'
header_class = NoHeaderHeader
data_class = NoHeaderData
class CommentedHeaderHeader(BasicHeader):
"""
Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""
Return only lines that start with the comment regexp. For these
lines strip out the matching characters.
"""
re_comment = re.compile(self.comment)
for line in lines:
match = re_comment.match(line)
if match:
yield line[match.end():]
def write(self, lines):
lines.append(self.write_comment + self.splitter.join(self.colnames))
class CommentedHeader(Basic):
"""
Read a file where the column names are given in a line that begins with
the header comment character. ``header_start`` can be used to specify the
line index of column names, and it can be a negative index (for example -1
for the last commented line). The default delimiter is the <space>
character.::
# col1 col2 col3
# Comment line
1 2 3
4 5 6
"""
_format_name = 'commented_header'
_description = 'Column names in a commented line'
header_class = CommentedHeaderHeader
data_class = NoHeaderData
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super(CommentedHeader, self).read(table)
# Strip off first comment since this is the header line for
# commented_header format.
if 'comments' in out.meta:
out.meta['comments'] = out.meta['comments'][1:]
if not out.meta['comments']:
del out.meta['comments']
return out
def write_header(self, lines, meta):
"""
Write comment lines after, rather than before, the header.
"""
self.header.write(lines)
self.header.write_comments(lines, meta)
class TabHeaderSplitter(core.DefaultSplitter):
"""Split lines on tab and do not remove whitespace"""
delimiter = '\t'
process_line = None
class TabDataSplitter(TabHeaderSplitter):
"""
Don't strip data value whitespace since that is significant in TSV tables
"""
process_val = None
skipinitialspace = False
class TabHeader(BasicHeader):
"""
Reader for header of tables with tab separated header
"""
splitter_class = TabHeaderSplitter
class TabData(BasicData):
"""
Reader for data of tables with tab separated data
"""
splitter_class = TabDataSplitter
class Tab(Basic):
"""
Read a tab-separated file. Unlike the :class:`Basic` reader, whitespace is
not stripped from the beginning and end of either lines or individual column
values.
Example::
col1 <tab> col2 <tab> col3
# Comment line
1 <tab> 2 <tab> 5
"""
_format_name = 'tab'
_description = 'Basic table with tab-separated values'
header_class = TabHeader
data_class = TabData
class CsvSplitter(core.DefaultSplitter):
"""
Split on comma for CSV (comma-separated-value) tables
"""
delimiter = ','
class CsvHeader(BasicHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = CsvSplitter
comment = None
write_comment = None
class CsvData(BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = CsvSplitter
fill_values = [(core.masked, '')]
comment = None
write_comment = None
class Csv(Basic):
"""
Read a CSV (comma-separated-values) file.
Example::
num,ra,dec,radius,mag
1,32.23222,10.1211,0.8,18.1
2,38.12321,-88.1321,2.2,17.0
Plain csv (comma separated value) files typically contain as many entries
as there are columns on each line. In contrast, common spreadsheet editors
stop writing if all remaining cells on a line are empty, which can lead to
lines where the rightmost entries are missing. This Reader can deal with
such files.
Masked values (indicated by an empty '' field value when reading) are
written out in the same way with an empty ('') field. This is different
from the typical default for `astropy.io.ascii` in which missing values are
indicated by ``--``.
Example::
num,ra,dec,radius,mag
1,32.23222,10.1211
2,38.12321,-88.1321,2.2,17.0
"""
_format_name = 'csv'
_io_registry_can_write = True
_description = 'Comma-separated-values'
header_class = CsvHeader
data_class = CsvData
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust row if it is too short.
If a data row is shorter than the header, add empty values to make it the
right length.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table.
"""
if len(str_vals) < ncols:
str_vals.extend((ncols - len(str_vals)) * [''])
return str_vals
class RdbHeader(TabHeader):
"""
Header for RDB tables
"""
col_type_map = {'n': core.NumType,
's': core.StrType}
def get_type_map_key(self, col):
return col.raw_type[-1]
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
This is a specialized get_cols for the RDB type:
Line 0: RDB col names
Line 1: RDB col definitions
Line 2+: RDB data rows
Parameters
----------
lines : list
List of table lines
Returns
-------
None
"""
header_lines = self.process_lines(lines) # this is a generator
header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))]
if len(header_vals_list) != 2:
raise ValueError('RDB header requires 2 lines')
self.names, raw_types = header_vals_list
if len(self.names) != len(raw_types):
raise ValueError('RDB header mismatch between number of column names and column types')
if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in raw_types):
raise ValueError('RDB types definitions do not all match [num](N|S): {}'.format(raw_types))
self._set_cols_from_names()
for col, raw_type in zip(self.cols, raw_types):
col.raw_type = raw_type
col.type = self.get_col_type(col)
def write(self, lines):
lines.append(self.splitter.join(self.colnames))
rdb_types = []
for col in self.cols:
# Check if dtype.kind is string or unicode. See help(np.core.numerictypes)
rdb_type = 'S' if col.info.dtype.kind in ('S', 'U') else 'N'
rdb_types.append(rdb_type)
lines.append(self.splitter.join(rdb_types))
class RdbData(TabData):
"""
Data reader for RDB data. Starts reading at line 2.
"""
start_line = 2
class Rdb(Tab):
"""
Read a tab-separated file with an extra line after the column definition
line. The RDB format meets this definition. Example::
col1 <tab> col2 <tab> col3
N <tab> S <tab> N
1 <tab> 2 <tab> 5
In this reader the second line is just ignored.
"""
_format_name = 'rdb'
_io_registry_format_aliases = ['rdb']
_io_registry_suffix = '.rdb'
_description = 'Tab-separated with a type definition header line'
header_class = RdbHeader
data_class = RdbData
| AustereCuriosity/astropy | astropy/io/ascii/basic.py | Python | bsd-3-clause | 10,920 |
# Copyright (C) 2011 Bheesham Persaud
# The license is available in LICENSE
from __future__ import division
import re
from includes.functions import *
class fileserve_com:
def init( self ):
self.url_pattern = re.compile( r'(http://www\.fileserve\.com/file/([A-Za-z0-9]+))', re.I )
self.result_pattern = re.compile( r'<td>(http://www\.fileserve\.com/file/([A-Za-z0-9]+))</td><td>--</td>', re.I )
self.url = 'www.fileserve.com'
return None
def parse( self, text ):
matches = self.url_pattern.findall( text )
result_urls = []
if len( matches ) > 0:
for url in matches:
result_urls.append( url[0] )
return result_urls
else:
return None
def check( self, files_list ):
dead = []
files_split = split_list( files_list, 50 )
for files in files_split:
files_joined = "\r\n".join( files )
params = urllib.urlencode({ 'urls': files_joined, 'submit': ' Check Urls ' })
browser = urllib.urlopen( "http://fileserve.com/link-checker.php", params )
res_urls = browser.read()
res_urls = res_urls.replace( "\r", '' )
res_urls = res_urls.replace( "\n", '' )
res_urls = res_urls.replace( " ", '' )
res_urls = self.result_pattern.findall( res_urls )
for res_url in res_urls:
dead.append( res_url[0] )
files_list.remove( res_url[0] )
return [ self.url, files_list, dead ] | bheesham/PyLinkChecker | hosts/fileserve_com.py | Python | bsd-3-clause | 1,325 |
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.encoding import smart_unicode, iri_to_uri
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from upy.contrib.rosetta.conf import settings as rosetta_settings
from upy.contrib.rosetta.polib import pofile
from upy.contrib.rosetta.poutil import find_pos, pagination_range
from upy.contrib.rosetta.signals import entry_changed, post_save
from upy.contrib.rosetta.storage import get_storage
import re
from upy.contrib import rosetta
import datetime
import unicodedata
import hashlib
import os
def home(request):
"""
Displays a list of messages to be translated
"""
def fix_nls(in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
"""
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
storage = get_storage(request)
version = rosetta.get_version(True)
if storage.has('rosetta_i18n_fn'):
rosetta_i18n_fn = storage.get('rosetta_i18n_fn')
rosetta_i18n_app = get_app_name(rosetta_i18n_fn)
rosetta_i18n_lang_code = storage.get('rosetta_i18n_lang_code')
rosetta_i18n_lang_bidi = rosetta_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI
rosetta_i18n_write = storage.get('rosetta_i18n_write', True)
if rosetta_i18n_write:
rosetta_i18n_pofile = pofile(rosetta_i18n_fn, wrapwidth=rosetta_settings.POFILE_WRAP_WIDTH)
for entry in rosetta_i18n_pofile:
entry.md5hash = hashlib.md5(
entry.msgid.encode("utf8") +
entry.msgstr.encode("utf8") +
(entry.msgctxt and entry.msgctxt.encode("utf8") or "")
).hexdigest()
else:
rosetta_i18n_pofile = storage.get('rosetta_i18n_pofile')
if 'filter' in request.GET:
if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'):
filter_ = request.GET.get('filter')
storage.set('rosetta_i18n_filter', filter_)
return HttpResponseRedirect(reverse('rosetta-home'))
rosetta_i18n_filter = storage.get('rosetta_i18n_filter', 'all')
if '_next' in request.POST:
rx = re.compile(r'^m_([0-9a-f]+)')
rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)')
file_change = False
for key, value in request.POST.items():
md5hash = None
plural_id = None
if rx_plural.match(key):
md5hash = str(rx_plural.match(key).groups()[0])
# polib parses .po files into unicode strings, but
# doesn't bother to convert plural indexes to int,
# so we need unicode here.
plural_id = unicode(rx_plural.match(key).groups()[1])
elif rx.match(key):
md5hash = str(rx.match(key).groups()[0])
if md5hash is not None:
entry = rosetta_i18n_pofile.find(md5hash, 'md5hash')
# If someone did a makemessage, some entries might
# have been removed, so we need to check.
if entry:
old_msgstr = entry.msgstr
if plural_id is not None:
#plural_string = fix_nls(entry.msgstr_plural[plural_id], value)
plural_string = fix_nls(entry.msgid_plural, value)
entry.msgstr_plural[plural_id] = plural_string
else:
entry.msgstr = fix_nls(entry.msgid, value)
is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False))
old_fuzzy = 'fuzzy' in entry.flags
if old_fuzzy and not is_fuzzy:
entry.flags.remove('fuzzy')
elif not old_fuzzy and is_fuzzy:
entry.flags.append('fuzzy')
file_change = True
if old_msgstr != value or old_fuzzy != is_fuzzy:
entry_changed.send(sender=entry,
user=request.user,
old_msgstr=old_msgstr,
old_fuzzy=old_fuzzy,
pofile=rosetta_i18n_fn,
language_code=rosetta_i18n_lang_code,
)
else:
storage.set('rosetta_last_save_error', True)
if file_change and rosetta_i18n_write:
try:
# Provide defaults in case authorization is not required.
request.user.first_name = getattr(request.user, 'first_name', 'Anonymous')
request.user.last_name = getattr(request.user, 'last_name', 'User')
request.user.email = getattr(request.user, 'email', '[email protected]')
rosetta_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (request.user.first_name, request.user.last_name, request.user.email)).encode('ascii', 'ignore')
rosetta_i18n_pofile.metadata['X-Translated-Using'] = u"django-rosetta %s" % rosetta.get_version(False)
rosetta_i18n_pofile.metadata['PO-Revision-Date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M%z')
except UnicodeDecodeError:
pass
try:
rosetta_i18n_pofile.save()
po_filepath, ext = os.path.splitext(rosetta_i18n_fn)
save_as_mo_filepath = po_filepath + '.mo'
rosetta_i18n_pofile.save_as_mofile(save_as_mo_filepath)
post_save.send(sender=None, language_code=rosetta_i18n_lang_code, request=request)
# Try auto-reloading via the WSGI daemon mode reload mechanism
if rosetta_settings.WSGI_AUTO_RELOAD and \
'mod_wsgi.process_group' in request.environ and \
request.environ.get('mod_wsgi.process_group', None) and \
'SCRIPT_FILENAME' in request.environ and \
int(request.environ.get('mod_wsgi.script_reloading', '0')):
try:
os.utime(request.environ.get('SCRIPT_FILENAME'), None)
except OSError:
pass
# Try auto-reloading via uwsgi daemon reload mechanism
if rosetta_settings.UWSGI_AUTO_RELOAD:
try:
import uwsgi
# pretty easy right?
uwsgi.reload()
except:
# we may not be running under uwsgi :P
pass
except:
storage.set('rosetta_i18n_write', False)
storage.set('rosetta_i18n_pofile', rosetta_i18n_pofile)
# Retain query arguments
query_arg = '?_next=1'
if 'query' in request.GET or 'query' in request.POST:
query_arg += '&query=%s' % request.REQUEST.get('query')
if 'page' in request.GET:
query_arg += '&page=%d&_next=1' % int(request.GET.get('page'))
return HttpResponseRedirect(reverse('rosetta-home') + iri_to_uri(query_arg))
rosetta_i18n_lang_name = _(storage.get('rosetta_i18n_lang_name'))
rosetta_i18n_lang_code = storage.get('rosetta_i18n_lang_code')
if 'query' in request.REQUEST and request.REQUEST.get('query', '').strip():
query = request.REQUEST.get('query').strip()
rx = re.compile(re.escape(query), re.IGNORECASE)
paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete and rx.search(smart_unicode(e.msgstr) + smart_unicode(e.msgid) + u''.join([o[0] for o in e.occurrences]))], rosetta_settings.MESSAGES_PER_PAGE)
else:
if rosetta_i18n_filter == 'untranslated':
paginator = Paginator(rosetta_i18n_pofile.untranslated_entries(), rosetta_settings.MESSAGES_PER_PAGE)
elif rosetta_i18n_filter == 'translated':
paginator = Paginator(rosetta_i18n_pofile.translated_entries(), rosetta_settings.MESSAGES_PER_PAGE)
elif rosetta_i18n_filter == 'fuzzy':
paginator = Paginator([e for e in rosetta_i18n_pofile.fuzzy_entries() if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE)
else:
paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE)
if 'page' in request.GET and int(request.GET.get('page')) <= paginator.num_pages and int(request.GET.get('page')) > 0:
page = int(request.GET.get('page'))
else:
page = 1
if '_next' in request.GET or '_next' in request.POST:
page += 1
if page > paginator.num_pages:
page = 1
query_arg = '?page=%d' % page
return HttpResponseRedirect(reverse('rosetta-home') + iri_to_uri(query_arg))
rosetta_messages = paginator.page(page).object_list
if rosetta_settings.MAIN_LANGUAGE and rosetta_settings.MAIN_LANGUAGE != rosetta_i18n_lang_code:
main_language = None
for language in settings.LANGUAGES:
if language[0] == rosetta_settings.MAIN_LANGUAGE:
main_language = _(language[1])
break
fl = ("/%s/" % rosetta_settings.MAIN_LANGUAGE).join(rosetta_i18n_fn.split("/%s/" % rosetta_i18n_lang_code))
po = pofile(fl)
main_messages = []
for message in rosetta_messages:
message.main_lang = po.find(message.msgid).msgstr
needs_pagination = paginator.num_pages > 1
if needs_pagination:
if paginator.num_pages >= 10:
page_range = pagination_range(1, paginator.num_pages, page)
else:
page_range = range(1, 1 + paginator.num_pages)
try:
ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX
ADMIN_IMAGE_DIR = ADMIN_MEDIA_PREFIX + 'img/admin/'
except AttributeError:
ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/'
ADMIN_IMAGE_DIR = ADMIN_MEDIA_PREFIX + 'img/'
ENABLE_TRANSLATION_SUGGESTIONS = rosetta_settings.BING_APP_ID and rosetta_settings.ENABLE_TRANSLATION_SUGGESTIONS
BING_APP_ID = rosetta_settings.BING_APP_ID
MESSAGES_SOURCE_LANGUAGE_NAME = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_NAME
MESSAGES_SOURCE_LANGUAGE_CODE = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_CODE
if storage.has('rosetta_last_save_error'):
storage.delete('rosetta_last_save_error')
rosetta_last_save_error = True
return render_to_response('pofile.html', locals(), context_instance=RequestContext(request))
else:
return list_languages(request, do_session_warn=True)
home = never_cache(home)
home = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(home)
def download_file(request):
import zipfile
from StringIO import StringIO
storage = get_storage(request)
# original filename
rosetta_i18n_fn = storage.get('rosetta_i18n_fn', None)
# in-session modified catalog
rosetta_i18n_pofile = storage.get('rosetta_i18n_pofile', None)
# language code
rosetta_i18n_lang_code = storage.get('rosetta_i18n_lang_code', None)
if not rosetta_i18n_lang_code or not rosetta_i18n_pofile or not rosetta_i18n_fn:
return HttpResponseRedirect(reverse('rosetta-home'))
try:
if len(rosetta_i18n_fn.split('/')) >= 5:
offered_fn = '_'.join(rosetta_i18n_fn.split('/')[-5:])
else:
offered_fn = rosetta_i18n_fn.split('/')[-1]
po_fn = str(rosetta_i18n_fn.split('/')[-1])
mo_fn = str(po_fn.replace('.po', '.mo')) # not so smart, huh
zipdata = StringIO()
zipf = zipfile.ZipFile(zipdata, mode="w")
zipf.writestr(po_fn, unicode(rosetta_i18n_pofile).encode("utf8"))
zipf.writestr(mo_fn, rosetta_i18n_pofile.to_binary())
zipf.close()
zipdata.seek(0)
response = HttpResponse(zipdata.read())
response['Content-Disposition'] = 'attachment; filename=%s.%s.zip' % (offered_fn, rosetta_i18n_lang_code)
response['Content-Type'] = 'application/x-zip'
return response
except Exception:
return HttpResponseRedirect(reverse('rosetta-home'))
download_file = never_cache(download_file)
download_file = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(download_file)
def list_languages(request, do_session_warn=False):
"""
Lists the languages for the current project, the gettext catalog files
that can be translated and their translation progress
"""
storage = get_storage(request)
languages = []
if 'filter' in request.GET:
if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'):
filter_ = request.GET.get('filter')
storage.set('rosetta_i18n_catalog_filter', filter_)
return HttpResponseRedirect(reverse('rosetta-pick-file'))
rosetta_i18n_catalog_filter = storage.get('rosetta_i18n_catalog_filter', 'project')
third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')
django_apps = rosetta_i18n_catalog_filter in ('all', 'django')
project_apps = rosetta_i18n_catalog_filter in ('all', 'project')
has_pos = False
for language in settings.LANGUAGES:
pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)
has_pos = has_pos or len(pos)
languages.append(
(language[0],
_(language[1]),
[(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos],
)
)
try:
ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX
except AttributeError:
ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/'
version = rosetta.get_version(True)
do_session_warn = do_session_warn and 'SessionRosettaStorage' in rosetta_settings.STORAGE_CLASS and 'signed_cookies' in settings.SESSION_ENGINE
return render_to_response('rosetta_languages.html', locals(), context_instance=RequestContext(request))
list_languages = never_cache(list_languages)
list_languages = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(list_languages)
def get_app_name(path):
app = path.split("/locale")[0].split("/")[-1]
return app
def lang_sel(request, langid, idx):
"""
Selects a file to be translated
"""
storage = get_storage(request)
if langid not in [l[0] for l in settings.LANGUAGES]:
raise Http404
else:
rosetta_i18n_catalog_filter = storage.get('rosetta_i18n_catalog_filter', 'project')
third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')
django_apps = rosetta_i18n_catalog_filter in ('all', 'django')
project_apps = rosetta_i18n_catalog_filter in ('all', 'project')
file_ = find_pos(langid, project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)[int(idx)]
storage.set('rosetta_i18n_lang_code', langid)
storage.set('rosetta_i18n_lang_name', unicode([l[1] for l in settings.LANGUAGES if l[0] == langid][0]))
storage.set('rosetta_i18n_fn', file_)
po = pofile(file_)
for entry in po:
entry.md5hash = hashlib.md5(
entry.msgid.encode("utf8") +
entry.msgstr.encode("utf8") +
(entry.msgctxt and entry.msgctxt.encode("utf8") or "")
).hexdigest()
storage.set('rosetta_i18n_pofile', po)
try:
os.utime(file_, None)
storage.set('rosetta_i18n_write', True)
except OSError:
storage.set('rosetta_i18n_write', False)
return HttpResponseRedirect(reverse('rosetta-home'))
lang_sel = never_cache(lang_sel)
lang_sel = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(lang_sel)
def can_translate(user):
if not getattr(settings, 'ROSETTA_REQUIRES_AUTH', True):
return True
if not user.is_authenticated():
return False
elif user.is_superuser:
return True
elif getattr(settings, 'ALLOW_STAFF_TO_ROSETTA') and user.is_staff:
return True
else:
try:
from django.contrib.auth.models import Group
translators = Group.objects.get(name='translators')
return translators in user.groups.all()
except Group.DoesNotExist:
return False
| 20tab/upy | upy/contrib/rosetta/views.py | Python | bsd-3-clause | 18,100 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier définit un objet 'importeur', chargé de contrôler le mécanisme
d'importation, initialisation, configuration, déroulement et arrêt
des modules primaires et secondaires.
On parcourt les sous-dossiers définis dans les variables :
- REP_PRIMAIRES : répertoire des modules primaires
- REP_SECONDAIRES : répertoire des modules secondaires
Il est possible de changer ces variables mais dans ce cas, une réorganisation
du projet s'impose.
Dans chaque module, on s'occupera de charger l'objet le représentant.
Par exemple, le module anaconf se définit comme suit :
* un package anaconf contenu dans REP_PRIMAIRES
* un fichier __init__.py
* une classe Anaconf
On créée un objet chargé de représenter le module. C'est cet objet qui
possède les méthodes génériques chargées d'initialiser, configurer, lancer
et arrêter un module. Les autres fichiers du module sont une boîte noir
inconnu pour l'importeur.
"""
import os
import sys
from abstraits.module import *
REP_PRIMAIRES = "primaires"
REP_SECONDAIRES = "secondaires"
class Importeur:
"""Classe chargée de créer un objet Importeur. Il contient sous la forme
d'attributs les modules primaires et secondaires chargés. Les modules
primaires et secondaires ne sont pas distingués.
On ne doit créer qu'un seul objet Importeur.
"""
nb_importeurs = 0
def __init__(self):
"""Constructeur de l'importeur. Il vérifie surtout
qu'un seul est créé.
Il prend en paramètre le parser de commande qu'il doit transmettre
à chaque module.
"""
Importeur.nb_importeurs += 1
if Importeur.nb_importeurs > 1:
raise RuntimeError("{0} importeurs ont été créés".format( \
Importeur.nb_importeurs))
def __str__(self):
"""Retourne sous ue forme un peu plus lisible les modules importés."""
ret = []
for nom_module in self.__dict__.keys():
ret.append("{0}: {1}".format(nom_module, getattr(self, \
nom_module)))
ret.sort()
return "\n".join(ret)
def tout_charger(self):
"""Méthode appelée pour charger les modules primaires et secondaires.
Par défaut, on importe tout mais on ne créée rien.
"""
# On commence par parcourir les modules primaires
for nom_package in os.listdir(os.getcwd() + "/" + REP_PRIMAIRES):
if not nom_package.startswith("__"):
package = __import__(REP_PRIMAIRES + "." + nom_package)
module = getattr(getattr(package, nom_package), \
nom_package.capitalize())
setattr(self, nom_package, module)
# On fait de même avec les modules secondaires
for nom_package in os.listdir(os.getcwd() + "/" + REP_SECONDAIRES):
if not nom_package.startswith("__"):
package = __import__(REP_SECONDAIRES + "." + nom_package)
module = getattr(getattr(package, nom_package), \
nom_package.capitalize())
setattr(self, nom_package, module)
def tout_instancier(self, parser_cmd):
"""Cette méthode permet d'instancier les modules chargés auparavant.
On se base sur le type du module (classe ou objet)
pour le créer ou non.
En effet, cette méthode doit pouvoir être appelée quand certains
modules sont instanciés, et d'autres non.
NOTE IMPORTANTE: on passe au constructeur de chaque module
self, c'est-à-dire l'importeur. Les modules en ont en effet
besoin pour interragir entre eux.
"""
for nom_module, module in self.__dict__.items():
if type(module) is type: # on doit l'instancier
setattr(self, nom_module, module(self, parser_cmd))
def tout_configurer(self):
"""Méthode permettant de configurer tous les modules qui en ont besoin.
Les modules qui doivent être configuré sont ceux instanciés.
Attention: les modules non encore instanciés sont à l'état de classe.
Tous les modules doivent donc être instanciés au minimum avant
que cette méthode ne soit appelée. Autrement dit, la méthode
tout_instancier doit être appelée auparavant.
"""
for module in self.__dict__.values():
if module.statut == INSTANCIE:
module.config()
def tout_initialiser(self):
"""Méthode permettant d'initialiser tous les modules qui en ont besoin.
Les modules à initialiser sont ceux configuré.
"""
for module in self.__dict__.values():
if module.statut == CONFIGURE:
module.init()
def tout_detruire(self):
"""Méthode permettant de détruire tous les modules qui en ont besoin.
Les modules à détruire sont ceux initialisés.
"""
for module in self.__dict__.values():
if module.statut == INITIALISE:
module.detruire()
def boucle(self):
"""Méthode appelée à chaque tour de boucle synchro.
Elle doit faire appel à la méthode boucle de chaque module primaire
ou secondaire.
"""
for module in self.__dict__.values():
module.boucle()
def module_est_charge(self, nom):
"""Retourne True si le module est déjà chargé, False sinon.
On n'a pas besoin du type du module, les modules primaires
et secondaires étant stockés de la même façon.
Attention: un module peut être chargé sans être instancié,
configuré ou initialisé.
"""
return nom in self.__dict__.keys()
def charger_module(self, parser_cmd, m_type, nom):
"""Méthode permettant de charger un module en fonction de son type et
de son nom.
Si le module est déjà chargé, on ne fait rien.
Note: à la différence de tout_charger, cette méthode créée directement
l'objet gérant le module.
"""
if m_type == "primaire":
rep = REP_PRIMAIRES
elif m_type == "secondaire":
rep = REP_SECONDAIRES
else:
raise ValueError("le type {0} n'est ni primaire ni secondaire" \
.format(type))
if self.module_est_charge(nom):
print("Le module {0} est déjà chargé.".format(nom))
else:
package = __import__(rep + "." + nom)
module = getattr(getattr(package, nom), \
nom.capitalize())
setattr(self, nom, module(self, parser_cmd))
def decharger_module(self, m_type, nom):
"""Méthode permettant de décharger un module.
Elle se charge :
- d'appeler la méthode detruire du module
- de supprimer le module des modules dans sys.modules
- de supprimer l'instance du module dans self
"""
if m_type == "primaire":
rep = REP_PRIMAIRES
elif m_type == "secondaire":
rep = REP_SECONDAIRES
else:
raise ValueError("le type {0} n'est ni primaire ni secondaire" \
.format(type))
nom_complet = rep + "." + nom
for cle in list(sys.modules.keys()):
if cle.startswith(nom_complet + "."):
del sys.modules[cle]
if self.module_est_charge(nom):
getattr(self, nom).detuire()
delattr(self, nom)
else:
print("{0} n'est pas dans les attributs de l'importeur".format(nom))
def recharger_module(self, parser_cmd, m_type, nom):
"""Cette méthode permet de recharger un module. Elle passe par :
- decharger_module
- charger_module
"""
self.decharger_module(parser_cmd, m_type, nom)
self.charger_module(m_type, nom)
def config_module(self, nom):
"""Méthode chargée de configurer ou reconfigurer un module."""
if self.module_est_charge(nom):
getattr(self, nom).config()
else:
print("{0} n'existe pas ou n'est pas chargé.".format(nom))
def init_module(self, nom):
"""Méthode chargée d'initialiser un module."""
if self.module_est_charge(nom) and getattr(self, nom).statut == \
CONFIGURE:
getattr(self, nom).init()
else:
print("{0} n'existe pas ou n'est pas configuré.".format(nom))
| TheProjecter/kassie | src/bases/importeur.py | Python | bsd-3-clause | 10,203 |
input_name = '../examples/diffusion/poisson.py'
output_name = 'test_poisson.vtk'
from testsBasic import TestInput
class Test( TestInput ):
pass
| olivierverdier/sfepy | tests/test_input_poisson.py | Python | bsd-3-clause | 149 |
"""
XStatic resource package
See package 'XStatic' for documentation and basic tools.
"""
# official name, upper/lowercase allowed, no spaces
DISPLAY_NAME = 'Angular-lrdragndrop'
# name used for PyPi
PACKAGE_NAME = 'XStatic-%s' % DISPLAY_NAME
NAME = __name__.split('.')[-1] # package name (e.g. 'foo' or 'foo_bar')
# please use a all-lowercase valid python
# package name
VERSION = '1.0.2' # version of the packaged files, please use the upstream
# version number
BUILD = '4' # our package build number, so we can release new builds
# with fixes for xstatic stuff.
PACKAGE_VERSION = VERSION + '.' + BUILD # version used for PyPi
DESCRIPTION = "%s %s (XStatic packaging standard)" % (DISPLAY_NAME, VERSION)
PLATFORMS = 'any'
CLASSIFIERS = []
KEYWORDS = 'drag-n-drop angular table lrdragndrop xstatic'
# XStatic-* package maintainer:
MAINTAINER = 'Thai Tran'
MAINTAINER_EMAIL = '[email protected]'
# this refers to the project homepage of the stuff we packaged:
HOMEPAGE = 'https://github.com/lorenzofox3/lrDragNDrop'
# this refers to all files:
LICENSE = 'MIT'
from os.path import join, dirname
BASE_DIR = join(dirname(__file__), 'data')
# linux package maintainers just can point to their file locations like this:
#BASE_DIR = '/usr/share/javascript/' + NAME
# location of the Javascript file that's the entry point for this package, if
# one exists, relative to BASE_DIR
MAIN='lrdragndrop.js'
LOCATIONS = {
# CDN locations (if no public CDN exists, use an empty dict)
# if value is a string, it is a base location, just append relative
# path/filename. if value is a dict, do another lookup using the
# relative path/filename you want.
# your relative path/filenames should usually be without version
# information, because either the base dir/url is exactly for this
# version or the mapping will care for accessing this version.
}
| stackforge/xstatic-angular-lrdragndrop | xstatic/pkg/angular_lrdragndrop/__init__.py | Python | mit | 1,969 |
# coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You need to install matplotlib and restart your session for plot_example.py.')
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'num_leaves': 5,
'metric': ('l1', 'l2'),
'verbose': 0
}
evals_result = {} # to record eval results for plotting
print('Starting training...')
# train
gbm = lgb.train(
params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
categorical_feature=[21],
callbacks=[
lgb.log_evaluation(10),
lgb.record_evaluation(evals_result)
]
)
print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1')
plt.show()
print('Plotting feature importances...')
ax = lgb.plot_importance(gbm, max_num_features=10)
plt.show()
print('Plotting split value histogram...')
ax = lgb.plot_split_value_histogram(gbm, feature='f26', bins='auto')
plt.show()
print('Plotting 54th tree...') # one tree use categorical feature to split
ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=['split_gain'])
plt.show()
print('Plotting 54th tree with graphviz...')
graph = lgb.create_tree_digraph(gbm, tree_index=53, name='Tree54')
graph.render(view=True)
| henry0312/LightGBM | examples/python-guide/plot_example.py | Python | mit | 2,004 |
from .sql import *
__all__ = ['DBAdapter', 'get_db_adapter', 'async_atomic', 'async_atomic_func', 'get_db_settings']
| technomaniac/trelliopg | trelliopg/__init__.py | Python | mit | 118 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Thierry Sallé (@seuf)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'
}
DOCUMENTATION = '''
---
module: grafana_dashboard
author:
- Thierry Sallé (@seuf)
version_added: "2.5"
short_description: Manage Grafana dashboards
description:
- Create, update, delete, export Grafana dashboards via API.
options:
url:
description:
- The Grafana URL.
required: true
aliases: [ grafana_url ]
version_added: 2.7
url_username:
description:
- The Grafana API user.
default: admin
aliases: [ grafana_user ]
version_added: 2.7
url_password:
description:
- The Grafana API password.
default: admin
aliases: [ grafana_password ]
version_added: 2.7
grafana_api_key:
description:
- The Grafana API key.
- If set, I(grafana_user) and I(grafana_password) will be ignored.
org_id:
description:
- The Grafana Organisation ID where the dashboard will be imported / exported.
- Not used when I(grafana_api_key) is set, because the grafana_api_key only belongs to one organisation..
default: 1
state:
description:
- State of the dashboard.
required: true
choices: [ absent, export, present ]
default: present
slug:
description:
- Deprecated since Grafana 5. Use grafana dashboard uid instead.
- slug of the dashboard. It's the friendly url name of the dashboard.
- When C(state) is C(present), this parameter can override the slug in the meta section of the json file.
- If you want to import a json dashboard exported directly from the interface (not from the api),
you have to specify the slug parameter because there is no meta section in the exported json.
uid:
version_added: 2.7
description:
- uid of the dasboard to export when C(state) is C(export) or C(absent).
path:
description:
- The path to the json file containing the Grafana dashboard to import or export.
overwrite:
description:
- Override existing dashboard when state is present.
type: bool
default: 'no'
message:
description:
- Set a commit message for the version history.
- Only used when C(state) is C(present).
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, client_key is not required
version_added: 2.7
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client
- authentication. If client_cert contains both the certificate and key, this option is not required
version_added: 2.7
use_proxy:
description:
- Boolean of whether or not to use proxy.
default: 'yes'
type: bool
version_added: 2.7
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: Import Grafana dashboard foo
grafana_dashboard:
grafana_url: http://grafana.company.com
grafana_api_key: "{{ grafana_api_key }}"
state: present
message: Updated by ansible
overwrite: yes
path: /path/to/dashboards/foo.json
- name: Export dashboard
grafana_dashboard:
grafana_url: http://grafana.company.com
grafana_user: "admin"
grafana_password: "{{ grafana_password }}"
org_id: 1
state: export
uid: "000000653"
path: "/path/to/dashboards/000000653.json"
'''
RETURN = '''
---
uid:
description: uid or slug of the created / deleted / exported dashboard.
returned: success
type: string
sample: 000000063
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url, url_argument_spec
from ansible.module_utils._text import to_native
__metaclass__ = type
class GrafanaAPIException(Exception):
pass
class GrafanaMalformedJson(Exception):
pass
class GrafanaExportException(Exception):
pass
class GrafanaDeleteException(Exception):
pass
def grafana_switch_organisation(module, grafana_url, org_id, headers):
r, info = fetch_url(module, '%s/api/user/using/%s' % (grafana_url, org_id), headers=headers, method='POST')
if info['status'] != 200:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (org_id, info))
def grafana_headers(module, data):
headers = {'content-type': 'application/json; charset=utf8'}
if 'grafana_api_key' in data and data['grafana_api_key']:
headers['Authorization'] = "Bearer %s" % data['grafana_api_key']
else:
module.params['force_basic_auth'] = True
grafana_switch_organisation(module, data['grafana_url'], data['org_id'], headers)
return headers
def get_grafana_version(module, grafana_url, headers):
grafana_version = None
r, info = fetch_url(module, '%s/api/frontend/settings' % grafana_url, headers=headers, method='GET')
if info['status'] == 200:
try:
settings = json.loads(r.read())
grafana_version = str.split(settings['buildInfo']['version'], '.')[0]
except Exception as e:
raise GrafanaAPIException(e)
else:
raise GrafanaAPIException('Unable to get grafana version : %s' % info)
return int(grafana_version)
def grafana_dashboard_exists(module, grafana_url, uid, headers):
dashboard_exists = False
dashboard = {}
grafana_version = get_grafana_version(module, grafana_url, headers)
if grafana_version >= 5:
r, info = fetch_url(module, '%s/api/dashboards/uid/%s' % (grafana_url, uid), headers=headers, method='GET')
else:
r, info = fetch_url(module, '%s/api/dashboards/db/%s' % (grafana_url, uid), headers=headers, method='GET')
if info['status'] == 200:
dashboard_exists = True
try:
dashboard = json.loads(r.read())
except Exception as e:
raise GrafanaAPIException(e)
elif info['status'] == 404:
dashboard_exists = False
else:
raise GrafanaAPIException('Unable to get dashboard %s : %s' % (uid, info))
return dashboard_exists, dashboard
def grafana_create_dashboard(module, data):
# define data payload for grafana API
try:
with open(data['path'], 'r') as json_file:
payload = json.load(json_file)
except Exception as e:
raise GrafanaAPIException("Can't load json file %s" % to_native(e))
# define http header
headers = grafana_headers(module, data)
grafana_version = get_grafana_version(module, data['grafana_url'], headers)
if grafana_version < 5:
if data.get('slug'):
uid = data['slug']
elif 'meta' in payload and 'slug' in payload['meta']:
uid = payload['meta']['slug']
else:
raise GrafanaMalformedJson('No slug found in json. Needed with grafana < 5')
else:
if data.get('uid'):
uid = data['uid']
elif 'uid' in payload['dashboard']:
uid = payload['dashboard']['uid']
else:
uid = None
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], uid, headers=headers)
result = {}
if dashboard_exists is True:
if dashboard == payload:
# unchanged
result['uid'] = uid
result['msg'] = "Dashboard %s unchanged." % uid
result['changed'] = False
else:
# update
if 'overwrite' in data and data['overwrite']:
payload['overwrite'] = True
if 'message' in data and data['message']:
payload['message'] = data['message']
r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST')
if info['status'] == 200:
if grafana_version >= 5:
try:
dashboard = json.loads(r.read())
uid = dashboard['uid']
except Exception as e:
raise GrafanaAPIException(e)
result['uid'] = uid
result['msg'] = "Dashboard %s updated" % uid
result['changed'] = True
else:
body = json.loads(info['body'])
raise GrafanaAPIException('Unable to update the dashboard %s : %s' % (uid, body['message']))
else:
# create
if 'dashboard' not in payload:
payload = {'dashboard': payload}
r, info = fetch_url(module, '%s/api/dashboards/db' % data['grafana_url'], data=json.dumps(payload), headers=headers, method='POST')
if info['status'] == 200:
result['msg'] = "Dashboard %s created" % uid
result['changed'] = True
if grafana_version >= 5:
try:
dashboard = json.loads(r.read())
uid = dashboard['uid']
except Exception as e:
raise GrafanaAPIException(e)
result['uid'] = uid
else:
raise GrafanaAPIException('Unable to create the new dashboard %s : %s - %s.' % (uid, info['status'], info))
return result
def grafana_delete_dashboard(module, data):
# define http headers
headers = grafana_headers(module, data)
grafana_version = get_grafana_version(module, data['grafana_url'], headers)
if grafana_version < 5:
if data.get('slug'):
uid = data['slug']
else:
raise GrafanaMalformedJson('No slug parameter. Needed with grafana < 5')
else:
if data.get('uid'):
uid = data['uid']
else:
raise GrafanaDeleteException('No uid specified %s')
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], uid, headers=headers)
result = {}
if dashboard_exists is True:
# delete
if grafana_version < 5:
r, info = fetch_url(module, '%s/api/dashboards/db/%s' % (data['grafana_url'], uid), headers=headers, method='DELETE')
else:
r, info = fetch_url(module, '%s/api/dashboards/uid/%s' % (data['grafana_url'], uid), headers=headers, method='DELETE')
if info['status'] == 200:
result['msg'] = "Dashboard %s deleted" % uid
result['changed'] = True
result['uid'] = uid
else:
raise GrafanaAPIException('Unable to update the dashboard %s : %s' % (uid, info))
else:
# dashboard does not exist, do nothing
result = {'msg': "Dashboard %s does not exist." % uid,
'changed': False,
'uid': uid}
return result
def grafana_export_dashboard(module, data):
# define http headers
headers = grafana_headers(module, data)
grafana_version = get_grafana_version(module, data['grafana_url'], headers)
if grafana_version < 5:
if data.get('slug'):
uid = data['slug']
else:
raise GrafanaMalformedJson('No slug parameter. Needed with grafana < 5')
else:
if data.get('uid'):
uid = data['uid']
else:
raise GrafanaExportException('No uid specified')
# test if dashboard already exists
dashboard_exists, dashboard = grafana_dashboard_exists(module, data['grafana_url'], uid, headers=headers)
if dashboard_exists is True:
try:
with open(data['path'], 'w') as f:
f.write(json.dumps(dashboard))
except Exception as e:
raise GrafanaExportException("Can't write json file : %s" % to_native(e))
result = {'msg': "Dashboard %s exported to %s" % (uid, data['path']),
'uid': uid,
'changed': True}
else:
result = {'msg': "Dashboard %s does not exist." % uid,
'uid': uid,
'changed': False}
return result
def main():
# use the predefined argument spec for url
argument_spec = url_argument_spec()
# remove unnecessary arguments
del argument_spec['force']
del argument_spec['force_basic_auth']
del argument_spec['http_agent']
argument_spec.update(
state=dict(choices=['present', 'absent', 'export'], default='present'),
url=dict(aliases=['grafana_url'], required=True),
url_username=dict(aliases=['grafana_user'], default='admin'),
url_password=dict(aliases=['grafana_password'], default='admin', no_log=True),
grafana_api_key=dict(type='str', no_log=True),
org_id=dict(default=1, type='int'),
uid=dict(type='str'),
slug=dict(type='str'),
path=dict(type='str'),
overwrite=dict(type='bool', default=False),
message=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_together=[['url_username', 'url_password', 'org_id']],
mutually_exclusive=[['grafana_user', 'grafana_api_key'], ['uid', 'slug']],
)
try:
if module.params['state'] == 'present':
result = grafana_create_dashboard(module, module.params)
elif module.params['state'] == 'absent':
result = grafana_delete_dashboard(module, module.params)
else:
result = grafana_export_dashboard(module, module.params)
except GrafanaAPIException as e:
module.fail_json(
failed=True,
msg="error : %s" % to_native(e)
)
return
except GrafanaMalformedJson as e:
module.fail_json(
failed=True,
msg="error : json file does not contain a meta section with a slug parameter, or you did'nt specify the slug parameter"
)
return
except GrafanaDeleteException as e:
module.fail_json(
failed=True,
msg="error : Can't delete dashboard : %s" % to_native(e)
)
return
except GrafanaExportException as e:
module.fail_json(
failed=True,
msg="error : Can't export dashboard : %s" % to_native(e)
)
return
module.exit_json(
failed=False,
**result
)
return
if __name__ == '__main__':
main()
| sgerhart/ansible | lib/ansible/modules/monitoring/grafana_dashboard.py | Python | mit | 14,915 |
################ Lispy: Scheme Interpreter in Python
## (c) Peter Norvig, 2010-14; See http://norvig.com/lispy.html
################ Types
from __future__ import division
Symbol = str # A Lisp Symbol is implemented as a Python str
List = list # A Lisp List is implemented as a Python list
Number = (int, float) # A Lisp Number is implemented as a Python int or float
################ Parsing: parse, tokenize, and read_from_tokens
def parse(program):
"Read a Scheme expression from a string."
return read_from_tokens(tokenize(program))
def tokenize(s):
"Convert a string into a list of tokens."
return s.replace('(',' ( ').replace(')',' ) ').split()
def read_from_tokens(tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token):
"Numbers become numbers; every other token is a symbol."
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
################ Environments
def standard_env():
"An environment with some Scheme standard procedures."
import math, operator as op
env = Env()
env.update(vars(math)) # sin, cos, sqrt, pi, ...
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.div,
'>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'abs': abs,
'append': op.add,
'apply': apply,
'begin': lambda *x: x[-1],
'car': lambda x: x[0],
'cdr': lambda x: x[1:],
'cons': lambda x,y: [x] + y,
'eq?': op.is_,
'equal?': op.eq,
'length': len,
'list': lambda *x: list(x),
'list?': lambda x: isinstance(x,list),
'map': map,
'max': max,
'min': min,
'not': op.not_,
'null?': lambda x: x == [],
'number?': lambda x: isinstance(x, Number),
'procedure?': callable,
'round': round,
'symbol?': lambda x: isinstance(x, Symbol),
})
return env
class Env(dict):
"An environment: a dict of {'var':val} pairs, with an outer Env."
def __init__(self, parms=(), args=(), outer=None):
self.update(zip(parms, args))
self.outer = outer
def find(self, var):
"Find the innermost Env where var appears."
return self if (var in self) else self.outer.find(var)
global_env = standard_env()
################ Interaction: A REPL
def repl(prompt='lis.py> '):
"A prompt-read-eval-print loop."
while True:
val = eval(parse(raw_input(prompt)))
if val is not None:
print(lispstr(val))
def lispstr(exp):
"Convert a Python object back into a Lisp-readable string."
if isinstance(exp, list):
return '(' + ' '.join(map(lispstr, exp)) + ')'
else:
return str(exp)
################ Procedures
class Procedure(object):
"A user-defined Scheme procedure."
def __init__(self, parms, body, env):
self.parms, self.body, self.env = parms, body, env
def __call__(self, *args):
return eval(self.body, Env(self.parms, args, self.env))
################ eval
def eval(x, env=global_env):
"Evaluate an expression in an environment."
if isinstance(x, Symbol): # variable reference
return env.find(x)[x]
elif not isinstance(x, List): # constant literal
return x
elif x[0] == 'quote': # (quote exp)
(_, exp) = x
return exp
elif x[0] == 'if': # (if test conseq alt)
(_, test, conseq, alt) = x
exp = (conseq if eval(test, env) else alt)
return eval(exp, env)
elif x[0] == 'define': # (define var exp)
(_, var, exp) = x
env[var] = eval(exp, env)
elif x[0] == 'set!': # (set! var exp)
(_, var, exp) = x
env.find(var)[var] = eval(exp, env)
elif x[0] == 'lambda': # (lambda (var...) body)
(_, parms, body) = x
return Procedure(parms, body, env)
else: # (proc arg...)
proc = eval(x[0], env)
args = [eval(exp, env) for exp in x[1:]]
return proc(*args)
| obask/lispify | src/python3/lis.py | Python | mit | 4,615 |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
)
class DisconnectBanTest(GuldenTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban(subnet="127.0.0.1", command="add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, node_id=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(node_id=id1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
| nlgcoin/guldencoin-official | test/functional/p2p_disconnect_ban.py | Python | mit | 5,333 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/collectible/collectible_parts/shared_sculpture_structure_04.iff"
result.attribute_template_id = -1
result.stfName("collectible_loot_items_n","sculpture_structure_04")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/tangible/loot/collectible/collectible_parts/shared_sculpture_structure_04.py | Python | mit | 508 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr
from frappe.model.document import Document
class LDAPSettings(Document):
def validate(self):
if not self.flags.ignore_mandatory:
self.validate_ldap_credentails()
def validate_ldap_credentails(self):
try:
import ldap
conn = ldap.initialize(self.ldap_server_url)
try:
if self.ssl_tls_mode == 'StartTLS':
conn.set_option(ldap.OPT_X_TLS_DEMAND, True)
if self.require_trusted_certificate == 'Yes':
conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
conn.start_tls_s()
except:
frappe.throw(_("StartTLS is not supported"))
conn.simple_bind_s(self.base_dn, self.get_password(raise_exception=False))
except ImportError:
msg = """
<div>
{{_("Seems ldap is not installed on system.<br>Guidelines to install ldap dependancies and python package")}},
<a href="https://discuss.erpnext.com/t/frappe-v-7-1-beta-ldap-dependancies/15841" target="_blank">{{_("Click here")}}</a>,
</div>
"""
frappe.throw(msg, title=_("LDAP Not Installed"))
except ldap.LDAPError:
conn.unbind_s()
frappe.throw(_("Incorrect UserId or Password"))
def get_ldap_settings():
try:
settings = frappe.get_doc("LDAP Settings")
settings.update({
"method": "frappe.integrations.doctype.ldap_settings.ldap_settings.login"
})
return settings
except Exception:
# this will return blank settings
return frappe._dict()
@frappe.whitelist(allow_guest=True)
def login():
#### LDAP LOGIN LOGIC #####
args = frappe.form_dict
user = authenticate_ldap_user(frappe.as_unicode(args.usr), frappe.as_unicode(args.pwd))
frappe.local.login_manager.user = user.name
frappe.local.login_manager.post_login()
# because of a GET request!
frappe.db.commit()
def authenticate_ldap_user(user=None, password=None):
dn = None
params = {}
settings = get_ldap_settings()
try:
import ldap
except:
msg = """
<div>
{{_("Seems ldap is not installed on system.")}}<br>
<a href"https://discuss.erpnext.com/t/frappe-v-7-1-beta-ldap-dependancies/15841">{{_("Click here")}}</a>,
{{_("Guidelines to install ldap dependancies and python")}}
</div>
"""
frappe.throw(msg, title=_("LDAP Not Installed"))
conn = ldap.initialize(settings.ldap_server_url)
try:
try:
# set TLS settings for secure connection
if settings.ssl_tls_mode == 'StartTLS':
conn.set_option(ldap.OPT_X_TLS_DEMAND, True)
if settings.require_trusted_certificate == 'Yes':
conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
conn.start_tls_s()
except:
frappe.throw(_("StartTLS is not supported"))
# simple_bind_s is synchronous binding to server, it takes two param DN and password
conn.simple_bind_s(settings.base_dn, settings.get_password(raise_exception=False))
#search for surnames beginning with a
#available options for how deep a search you want.
#LDAP_SCOPE_BASE, LDAP_SCOPE_ONELEVEL,LDAP_SCOPE_SUBTREE,
result = conn.search_s(settings.organizational_unit, ldap.SCOPE_SUBTREE,
settings.ldap_search_string.format(user))
for dn, r in result:
dn = cstr(dn)
params["email"] = cstr(r[settings.ldap_email_field][0])
params["username"] = cstr(r[settings.ldap_username_field][0])
params["first_name"] = cstr(r[settings.ldap_first_name_field][0])
if dn:
conn.simple_bind_s(dn, frappe.as_unicode(password))
return create_user(params)
else:
frappe.throw(_("Not a valid LDAP user"))
except ldap.LDAPError:
conn.unbind_s()
frappe.throw(_("Incorrect UserId or Password"))
def create_user(params):
if frappe.db.exists("User", params["email"]):
return frappe.get_doc("User", params["email"])
else:
params.update({
"doctype": "User",
"send_welcome_email": 0,
"language": "",
"user_type": "System User",
"roles": [{
"role": _("Blogger")
}]
})
user = frappe.get_doc(params).insert(ignore_permissions=True)
frappe.db.commit()
return user
| ESS-LLP/frappe | frappe/integrations/doctype/ldap_settings/ldap_settings.py | Python | mit | 4,156 |
#!/usr/bin/env python
#coding: utf-8
## Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param num, a list of integers
# @return a tree node
def sortedArrayToBST(self, num):
if not num: return None
ln = len(num)
if ln == 1: return TreeNode(num[0])
m = ln / 2
root = TreeNode(num[m])
root.left = self.sortedArrayToBST(num[:m])
root.right = self.sortedArrayToBST(num[m+1:])
return root
| wh-acmer/minixalpha-acm | LeetCode/Python/convert_sorted_array_to_binary_search_tree.py | Python | mit | 583 |
from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal
from PyQt5.Qt import QSystemTrayIcon, QIcon
class TrayIcon(QSystemTrayIcon):
ActivationReason = ['Unknown', 'Context', 'DoubleClick', 'Trigger', 'MiddleClick']
onactivate = pyqtSignal(int, str)
onmessageclick = pyqtSignal()
def __init__(self, parent, toolTip = '', icon = ''):
super(TrayIcon, self).__init__(parent)
self.setObjectName('trayIcon')
self.setIcon(icon)
self.setToolTip(toolTip)
self.activated.connect(self.activateHandler)
self.messageClicked.connect(self.onmessageclick)
# Slots
# 设置工具提示
@pyqtSlot(str)
def setToolTip(self, toolTip):
super(TrayIcon, self).setToolTip(toolTip)
# 设置图标
@pyqtSlot(str)
def setIcon(self, icon):
if icon:
icon = QIcon(icon)
else:
icon = self.parent().windowIcon()
super(TrayIcon, self).setIcon(QIcon(icon))
# 设置右键菜单
@pyqtSlot(QObject)
def setContextMenu(self, menu):
super(TrayIcon, self).setContextMenu(menu)
# 获取是否可见
@pyqtSlot(result = bool)
def isVisible(self):
return super(TrayIcon, self).isVisible()
# 获取是否支持消息弹泡
@pyqtSlot(result = bool)
def supportsMessages(self):
return super(TrayIcon, self).supportsMessages()
# 获取是否支持系统托盘图标
@pyqtSlot(result = bool)
def isSystemTrayAvailable(self):
return super(TrayIcon, self).isSystemTrayAvailable()
# 显示托盘消息
# showMessage
# 设置可见性
# setVisible
# 显示
# show
# 隐藏
# hide
# Sinals
def activateHandler(self, reason):
self.onactivate.emit(reason, TrayIcon.ActivationReason[reason])
| xiruibing/hae | src/trayicon.py | Python | mit | 1,695 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/reactor/shared_base_reactor_subcomponent_mk5.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","base_reactor_subcomponent_mk5")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/tangible/ship/crafted/reactor/shared_base_reactor_subcomponent_mk5.py | Python | mit | 499 |
# -*- coding: utf-8 -*-
from collections import defaultdict
import pprint
import re
_re_num = re.compile('\s(?P<num>\d+)\s+(?P<name>(RPL|ERR)_\w+)\s*(?P<_>.*)')
_re_mask = re.compile('^\s{24,25}(?P<_>("(<|:).*|\S.*"$))')
def main():
print('Parsing rfc file...')
item = None
items = []
out = open('irc3/_rfc.py', 'w')
with open('irc3/rfc1459.txt') as fd:
for line in fd:
line = line.replace('<host> * <host>', '<host> * <host1>')
line = line.replace('<# visible>', '<visible>')
line = line.replace('<H|G>[*][@|+]', '<modes>')
line = line.replace('<nick!user|*!*>@<host|server>', '<mask>')
match = _re_num.search(line)
if match is not None:
if item:
items.append((int(item['num']), item))
item = defaultdict(list)
match = match.groupdict()
if '_' in match:
match.pop('_')
item.update(match)
match = _re_mask.search(line)
if match is not None:
item['mask'].append(match.groupdict()['_'])
_re_sub = re.compile('(?P<m><[^>]+>)')
out.write('''
class retcode(int):
name = None
re = None
'''.lstrip())
valids = set()
for i, item in sorted(items):
mask = item['mask']
if mask:
num = item['num']
valids.add(i)
out.write('\n')
out.write('%(name)s = retcode(%(num)s)\n' % item)
out.write('%(name)s.name = "%(name)s"\n' % item)
mask = [s.strip('"\\ ') for s in mask]
omask = ' '.join(mask)
params = []
def repl(v):
v = v.lower()
v = v.replace('nickname', 'nick')
v = v.replace('nicks', 'nicknames')
for c in '!@*':
v = v.replace(c, '')
for c in '| ':
v = v.replace(c, '_')
v = v.strip(' _')
if v.endswith('_name'):
v = v[:-5]
if v == 'client_ip_address_in_dot_form':
v = 'clientip'
if v == 'integer':
for k in 'xyz':
if k not in params:
v = k
break
if v == 'command':
v = 'cmd'
if v == 'real':
v = 'realname'
if v == 'name' and 'nick' not in params:
v = 'nick'
if v == 'user':
if 'nick' not in params and num not in ('352',):
v = 'nick'
else:
v = 'username'
return v
def tsub(m):
v = m.groupdict()['m'].strip('<>')
v = repl(v)
params.append(v)
return '{%s}' % v
if item['num'] == '303':
omask = ':<nicknames>'
elif item['num'] == '311':
omask = omask.replace('*', '<m>')
elif item['num'] == '319':
omask = ':<channels>'
elif item['num'] == '353':
omask = '<m> <channel> :<nicknames>'
tpl = _re_sub.sub(tsub, omask)
for v in ((' %d ', '{days}'),
('%d:%02d:%02d', '{hours}'),
(':%-8s %-9s %-8s', '{x} {y} {z}')):
tpl = tpl.replace(*v)
tpl_ = [':{c.srv} ' + item['num'] + ' {c.nick} ']
if len(tpl) > 60:
tpl_.extend([':' + s for s in tpl.split(':', 1)])
else:
tpl_.append(tpl)
tpl = '\n '.join([repr(v) for v in tpl_])
params = []
def msub(m):
v = m.groupdict()['m'].strip('<>')
v = repl(v)
params.append(v)
return '(?P<%s>\S+)' % v
mask = _re_sub.sub(msub, omask)
if '???? ' in mask:
mask = mask.replace('???? ', r'\S+ ')
if ' * ' in mask:
mask = mask.replace(' * ', r' . ')
if ':' in mask:
mask = mask.split(':', 1)[0]
mask += ':(?P<data>.*)'
mask = '(?P<srv>\S+) ' + str(i) + ' (?P<me>\S+) "\n "' + mask
mask = mask.replace(
' (?P<server>\S+)',
' "\n "(?P<server>\S+)')
mask = mask.replace(
' (?P<sent_messages>\S+)',
' "\n "(?P<sent_messages>\S+)')
item['mask'] = mask
params = [p for p in params if '<%s>' % p in mask]
if '<data>' in mask and 'data' not in params:
params.append('data')
out.write('%(name)s.re = (\n "^:%(mask)s")\n' % item)
params = pprint.pformat(
['srv', 'me'] + params, width=60, indent=4)
if len(params) > 60:
params = params.replace('[', '[\n ')
out.write('%(name)s.tpl = (\n' % dict(item))
out.write(' %s)\n' % tpl)
out.write('%(name)s.params = %(p)s\n' % dict(item, p=params))
out.write('\n')
out.write('RETCODES = {\n')
for i, item in sorted(items):
if i in valids:
out.write(' %(num)s: %(name)s,\n' % item)
out.write('}\n')
if __name__ == '__main__':
main()
| gawel/irc3 | irc3/_parse_rfc.py | Python | mit | 5,523 |
from sha3 import sha3_256
from ethereum.utils import big_endian_to_int
def sha3(seed):
return sha3_256(bytes(seed)).digest()
# colors
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def DEBUG(*args, **kargs):
print(FAIL + repr(args) + repr(kargs) + ENDC)
colors = ['\033[9%dm' % i for i in range(0, 7)]
colors += ['\033[4%dm' % i for i in range(1, 8)]
num_colors = len(colors)
def cstr(num, txt):
if isinstance(num, bytes):
num = big_endian_to_int(num)
return '%s%s%s' % (colors[num % len(colors)], txt, ENDC)
def cprint(num, txt):
print cstr(num, txt)
def phx(x):
return x.encode('hex')[:8]
if __name__ == '__main__':
for i in range(len(colors)):
cprint(i, 'test')
| HydraChain/hydrachain | hydrachain/consensus/utils.py | Python | mit | 750 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_shaupaut.iff"
result.attribute_template_id = 9
result.stfName("monster_name","shaupaut")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/mobile/shared_shaupaut.py | Python | mit | 430 |
"""Main view for geo locator application"""
from django.shortcuts import render
def index(request):
if request.location:
location = request.location
else:
location = None
return render(request, "homepage.html", {'location': location})
| mindcube/mindcube-django-cookiecutter | {{cookiecutter.repo_name}}/project/apps/geo_locator/views.py | Python | mit | 265 |
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:twisted
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class ServiceException(TException):
"""
Attributes:
- message: Readable message desribing the error condition.
- stacktrace: Program stack trace
- inputs: Optional mapping
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
(2, TType.STRING, 'stacktrace', None, None, ), # 2
(3, TType.MAP, 'inputs', (TType.STRING,None,TType.STRING,None), None, ), # 3
)
def __init__(self, message=None, stacktrace=None, inputs=None,):
self.message = message
self.stacktrace = stacktrace
self.inputs = inputs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stacktrace = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.inputs = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString();
_val6 = iprot.readString();
self.inputs[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ServiceException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.stacktrace is not None:
oprot.writeFieldBegin('stacktrace', TType.STRING, 2)
oprot.writeString(self.stacktrace)
oprot.writeFieldEnd()
if self.inputs is not None:
oprot.writeFieldBegin('inputs', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.inputs))
for kiter7,viter8 in self.inputs.items():
oprot.writeString(kiter7)
oprot.writeString(viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
value = (value * 31) ^ hash(self.stacktrace)
value = (value * 31) ^ hash(self.inputs)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthorizationException(TException):
"""
Attributes:
- message: Readable message desribing the error condition.
- stacktrace: Program stack trace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
(2, TType.STRING, 'stacktrace', None, None, ), # 2
)
def __init__(self, message=None, stacktrace=None,):
self.message = message
self.stacktrace = stacktrace
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stacktrace = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthorizationException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.stacktrace is not None:
oprot.writeFieldBegin('stacktrace', TType.STRING, 2)
oprot.writeString(self.stacktrace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
value = (value * 31) ^ hash(self.stacktrace)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthenticationException(TException):
"""
Attributes:
- message: Readable message desribing the error condition.
- stacktrace: Program stack trace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
(2, TType.STRING, 'stacktrace', None, None, ), # 2
)
def __init__(self, message=None, stacktrace=None,):
self.message = message
self.stacktrace = stacktrace
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stacktrace = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthenticationException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.stacktrace is not None:
oprot.writeFieldBegin('stacktrace', TType.STRING, 2)
oprot.writeString(self.stacktrace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
value = (value * 31) ^ hash(self.stacktrace)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ObjectReferenceException(TException):
"""
Attributes:
- message: Readable message desribing the error condition.
- stacktrace: Program stack trace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
(2, TType.STRING, 'stacktrace', None, None, ), # 2
)
def __init__(self, message=None, stacktrace=None,):
self.message = message
self.stacktrace = stacktrace
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stacktrace = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ObjectReferenceException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.stacktrace is not None:
oprot.writeFieldBegin('stacktrace', TType.STRING, 2)
oprot.writeString(self.stacktrace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
value = (value * 31) ^ hash(self.stacktrace)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AttributeException(TException):
"""
Attributes:
- message: Readable message desribing the error condition.
- stacktrace: Program stack trace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
(2, TType.STRING, 'stacktrace', None, None, ), # 2
)
def __init__(self, message=None, stacktrace=None,):
self.message = message
self.stacktrace = stacktrace
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stacktrace = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AttributeException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.stacktrace is not None:
oprot.writeFieldBegin('stacktrace', TType.STRING, 2)
oprot.writeString(self.stacktrace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
value = (value * 31) ^ hash(self.stacktrace)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TypeException(TException):
"""
Attributes:
- message: Readable message desribing the error condition.
- stacktrace: Program stack trace
- valid_types: List of types that would have been acceptable.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
(2, TType.STRING, 'stacktrace', None, None, ), # 2
(3, TType.LIST, 'valid_types', (TType.STRING,None), None, ), # 3
)
def __init__(self, message=None, stacktrace=None, valid_types=None,):
self.message = message
self.stacktrace = stacktrace
self.valid_types = valid_types
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stacktrace = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.valid_types = []
(_etype12, _size9) = iprot.readListBegin()
for _i13 in xrange(_size9):
_elem14 = iprot.readString();
self.valid_types.append(_elem14)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TypeException')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
if self.stacktrace is not None:
oprot.writeFieldBegin('stacktrace', TType.STRING, 2)
oprot.writeString(self.stacktrace)
oprot.writeFieldEnd()
if self.valid_types is not None:
oprot.writeFieldBegin('valid_types', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.valid_types))
for iter15 in self.valid_types:
oprot.writeString(iter15)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.message is None:
raise TProtocol.TProtocolException(message='Required field message is unset!')
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.message)
value = (value * 31) ^ hash(self.stacktrace)
value = (value * 31) ^ hash(self.valid_types)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Region:
"""
Attributes:
- contig_id: The identifier for the contig to which this region corresponds.
- strand: Either a "+" or a "-", for the strand on which the region is located.
- start: Starting position for this region.
- length: Distance from the start position that bounds the end of the region.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'contig_id', None, None, ), # 1
(2, TType.STRING, 'strand', None, None, ), # 2
(3, TType.I64, 'start', None, None, ), # 3
(4, TType.I64, 'length', None, None, ), # 4
)
def __init__(self, contig_id=None, strand=None, start=None, length=None,):
self.contig_id = contig_id
self.strand = strand
self.start = start
self.length = length
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.contig_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.strand = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.start = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.length = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Region')
if self.contig_id is not None:
oprot.writeFieldBegin('contig_id', TType.STRING, 1)
oprot.writeString(self.contig_id)
oprot.writeFieldEnd()
if self.strand is not None:
oprot.writeFieldBegin('strand', TType.STRING, 2)
oprot.writeString(self.strand)
oprot.writeFieldEnd()
if self.start is not None:
oprot.writeFieldBegin('start', TType.I64, 3)
oprot.writeI64(self.start)
oprot.writeFieldEnd()
if self.length is not None:
oprot.writeFieldBegin('length', TType.I64, 4)
oprot.writeI64(self.length)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.contig_id)
value = (value * 31) ^ hash(self.strand)
value = (value * 31) ^ hash(self.start)
value = (value * 31) ^ hash(self.length)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Feature_id_filters:
"""
Filters passed to :meth:`get_feature_ids`
Attributes:
- type_list: List of Feature type strings.
- region_list: List of region specs.
For example::
[{"contig_id": str, "strand": "+"|"-",
"start": int, "length": int},...]
The Feature sequence begin and end are calculated as follows:
- [start, start) for "+" strand
- (start - length, start] for "-" strand
- function_list: List of function strings.
- alias_list: List of alias strings.
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'type_list', (TType.STRING,None), [
], ), # 1
(2, TType.LIST, 'region_list', (TType.STRUCT,(Region, Region.thrift_spec)), [
], ), # 2
(3, TType.LIST, 'function_list', (TType.STRING,None), [
], ), # 3
(4, TType.LIST, 'alias_list', (TType.STRING,None), [
], ), # 4
)
def __init__(self, type_list=thrift_spec[1][4], region_list=thrift_spec[2][4], function_list=thrift_spec[3][4], alias_list=thrift_spec[4][4],):
if type_list is self.thrift_spec[1][4]:
type_list = [
]
self.type_list = type_list
if region_list is self.thrift_spec[2][4]:
region_list = [
]
self.region_list = region_list
if function_list is self.thrift_spec[3][4]:
function_list = [
]
self.function_list = function_list
if alias_list is self.thrift_spec[4][4]:
alias_list = [
]
self.alias_list = alias_list
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.type_list = []
(_etype19, _size16) = iprot.readListBegin()
for _i20 in xrange(_size16):
_elem21 = iprot.readString();
self.type_list.append(_elem21)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.region_list = []
(_etype25, _size22) = iprot.readListBegin()
for _i26 in xrange(_size22):
_elem27 = Region()
_elem27.read(iprot)
self.region_list.append(_elem27)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.function_list = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in xrange(_size28):
_elem33 = iprot.readString();
self.function_list.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.alias_list = []
(_etype37, _size34) = iprot.readListBegin()
for _i38 in xrange(_size34):
_elem39 = iprot.readString();
self.alias_list.append(_elem39)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Feature_id_filters')
if self.type_list is not None:
oprot.writeFieldBegin('type_list', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.type_list))
for iter40 in self.type_list:
oprot.writeString(iter40)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.region_list is not None:
oprot.writeFieldBegin('region_list', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.region_list))
for iter41 in self.region_list:
iter41.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.function_list is not None:
oprot.writeFieldBegin('function_list', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.function_list))
for iter42 in self.function_list:
oprot.writeString(iter42)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.alias_list is not None:
oprot.writeFieldBegin('alias_list', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.alias_list))
for iter43 in self.alias_list:
oprot.writeString(iter43)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.type_list)
value = (value * 31) ^ hash(self.region_list)
value = (value * 31) ^ hash(self.function_list)
value = (value * 31) ^ hash(self.alias_list)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Feature_id_mapping:
"""
Attributes:
- by_type: Mapping of Feature type string to a list of Feature IDs
- by_region: Mapping of contig ID, strand "+" or "-", and range "start--end" to
a list of Feature IDs. For example::
{'contig1': {'+': {'123--456': ['feature1', 'feature2'] }}}
- by_function: Mapping of function string to a list of Feature IDs
- by_alias: Mapping of alias string to a list of Feature IDs
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'by_type', (TType.STRING,None,TType.LIST,(TType.STRING,None)), {
}, ), # 1
(2, TType.MAP, 'by_region', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.MAP,(TType.STRING,None,TType.LIST,(TType.STRING,None)))), {
}, ), # 2
(3, TType.MAP, 'by_function', (TType.STRING,None,TType.LIST,(TType.STRING,None)), {
}, ), # 3
(4, TType.MAP, 'by_alias', (TType.STRING,None,TType.LIST,(TType.STRING,None)), {
}, ), # 4
)
def __init__(self, by_type=thrift_spec[1][4], by_region=thrift_spec[2][4], by_function=thrift_spec[3][4], by_alias=thrift_spec[4][4],):
if by_type is self.thrift_spec[1][4]:
by_type = {
}
self.by_type = by_type
if by_region is self.thrift_spec[2][4]:
by_region = {
}
self.by_region = by_region
if by_function is self.thrift_spec[3][4]:
by_function = {
}
self.by_function = by_function
if by_alias is self.thrift_spec[4][4]:
by_alias = {
}
self.by_alias = by_alias
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.by_type = {}
(_ktype45, _vtype46, _size44 ) = iprot.readMapBegin()
for _i48 in xrange(_size44):
_key49 = iprot.readString();
_val50 = []
(_etype54, _size51) = iprot.readListBegin()
for _i55 in xrange(_size51):
_elem56 = iprot.readString();
_val50.append(_elem56)
iprot.readListEnd()
self.by_type[_key49] = _val50
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.by_region = {}
(_ktype58, _vtype59, _size57 ) = iprot.readMapBegin()
for _i61 in xrange(_size57):
_key62 = iprot.readString();
_val63 = {}
(_ktype65, _vtype66, _size64 ) = iprot.readMapBegin()
for _i68 in xrange(_size64):
_key69 = iprot.readString();
_val70 = {}
(_ktype72, _vtype73, _size71 ) = iprot.readMapBegin()
for _i75 in xrange(_size71):
_key76 = iprot.readString();
_val77 = []
(_etype81, _size78) = iprot.readListBegin()
for _i82 in xrange(_size78):
_elem83 = iprot.readString();
_val77.append(_elem83)
iprot.readListEnd()
_val70[_key76] = _val77
iprot.readMapEnd()
_val63[_key69] = _val70
iprot.readMapEnd()
self.by_region[_key62] = _val63
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.by_function = {}
(_ktype85, _vtype86, _size84 ) = iprot.readMapBegin()
for _i88 in xrange(_size84):
_key89 = iprot.readString();
_val90 = []
(_etype94, _size91) = iprot.readListBegin()
for _i95 in xrange(_size91):
_elem96 = iprot.readString();
_val90.append(_elem96)
iprot.readListEnd()
self.by_function[_key89] = _val90
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.by_alias = {}
(_ktype98, _vtype99, _size97 ) = iprot.readMapBegin()
for _i101 in xrange(_size97):
_key102 = iprot.readString();
_val103 = []
(_etype107, _size104) = iprot.readListBegin()
for _i108 in xrange(_size104):
_elem109 = iprot.readString();
_val103.append(_elem109)
iprot.readListEnd()
self.by_alias[_key102] = _val103
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Feature_id_mapping')
if self.by_type is not None:
oprot.writeFieldBegin('by_type', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.by_type))
for kiter110,viter111 in self.by_type.items():
oprot.writeString(kiter110)
oprot.writeListBegin(TType.STRING, len(viter111))
for iter112 in viter111:
oprot.writeString(iter112)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.by_region is not None:
oprot.writeFieldBegin('by_region', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.by_region))
for kiter113,viter114 in self.by_region.items():
oprot.writeString(kiter113)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(viter114))
for kiter115,viter116 in viter114.items():
oprot.writeString(kiter115)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(viter116))
for kiter117,viter118 in viter116.items():
oprot.writeString(kiter117)
oprot.writeListBegin(TType.STRING, len(viter118))
for iter119 in viter118:
oprot.writeString(iter119)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeMapEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.by_function is not None:
oprot.writeFieldBegin('by_function', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.by_function))
for kiter120,viter121 in self.by_function.items():
oprot.writeString(kiter120)
oprot.writeListBegin(TType.STRING, len(viter121))
for iter122 in viter121:
oprot.writeString(iter122)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.by_alias is not None:
oprot.writeFieldBegin('by_alias', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.by_alias))
for kiter123,viter124 in self.by_alias.items():
oprot.writeString(kiter123)
oprot.writeListBegin(TType.STRING, len(viter124))
for iter125 in viter124:
oprot.writeString(iter125)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.by_type)
value = (value * 31) ^ hash(self.by_region)
value = (value * 31) ^ hash(self.by_function)
value = (value * 31) ^ hash(self.by_alias)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Feature_data:
"""
Attributes:
- feature_id: Identifier for this feature
- feature_type: The Feature type e.g., "mRNA", "CDS", "gene", ...
- feature_function: The functional annotation description
- feature_aliases: Dictionary of Alias string to List of source string identifiers
- feature_dna_sequence_length: Integer representing the length of the DNA sequence for convenience
- feature_dna_sequence: String containing the DNA sequence of the Feature
- feature_md5: String containing the MD5 of the sequence, calculated from the uppercase string
- feature_locations: List of dictionaries::
{ "contig_id": str,
"start": int,
"strand": str,
"length": int }
List of Feature regions, where the Feature bounds are
calculated as follows:
- For "+" strand, [start, start + length)
- For "-" strand, (start - length, start]
- feature_publications: List of any known publications related to this Feature
- feature_quality_warnings: List of strings indicating known data quality issues.
Note: not used for Genome type, but is used for
GenomeAnnotation
- feature_quality_score: Quality value with unknown algorithm for Genomes,
not calculated yet for GenomeAnnotations.
- feature_notes: Notes recorded about this Feature
- feature_inference: Inference information
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'feature_id', None, None, ), # 1
(2, TType.STRING, 'feature_type', None, None, ), # 2
(3, TType.STRING, 'feature_function', None, None, ), # 3
(4, TType.MAP, 'feature_aliases', (TType.STRING,None,TType.LIST,(TType.STRING,None)), None, ), # 4
(5, TType.I64, 'feature_dna_sequence_length', None, None, ), # 5
(6, TType.STRING, 'feature_dna_sequence', None, None, ), # 6
(7, TType.STRING, 'feature_md5', None, None, ), # 7
(8, TType.LIST, 'feature_locations', (TType.STRUCT,(Region, Region.thrift_spec)), None, ), # 8
(9, TType.LIST, 'feature_publications', (TType.STRING,None), None, ), # 9
(10, TType.LIST, 'feature_quality_warnings', (TType.STRING,None), None, ), # 10
(11, TType.LIST, 'feature_quality_score', (TType.STRING,None), None, ), # 11
(12, TType.STRING, 'feature_notes', None, None, ), # 12
(13, TType.STRING, 'feature_inference', None, None, ), # 13
)
def __init__(self, feature_id=None, feature_type=None, feature_function=None, feature_aliases=None, feature_dna_sequence_length=None, feature_dna_sequence=None, feature_md5=None, feature_locations=None, feature_publications=None, feature_quality_warnings=None, feature_quality_score=None, feature_notes=None, feature_inference=None,):
self.feature_id = feature_id
self.feature_type = feature_type
self.feature_function = feature_function
self.feature_aliases = feature_aliases
self.feature_dna_sequence_length = feature_dna_sequence_length
self.feature_dna_sequence = feature_dna_sequence
self.feature_md5 = feature_md5
self.feature_locations = feature_locations
self.feature_publications = feature_publications
self.feature_quality_warnings = feature_quality_warnings
self.feature_quality_score = feature_quality_score
self.feature_notes = feature_notes
self.feature_inference = feature_inference
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.feature_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.feature_type = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.feature_function = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.feature_aliases = {}
(_ktype127, _vtype128, _size126 ) = iprot.readMapBegin()
for _i130 in xrange(_size126):
_key131 = iprot.readString();
_val132 = []
(_etype136, _size133) = iprot.readListBegin()
for _i137 in xrange(_size133):
_elem138 = iprot.readString();
_val132.append(_elem138)
iprot.readListEnd()
self.feature_aliases[_key131] = _val132
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.feature_dna_sequence_length = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.feature_dna_sequence = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.feature_md5 = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.feature_locations = []
(_etype142, _size139) = iprot.readListBegin()
for _i143 in xrange(_size139):
_elem144 = Region()
_elem144.read(iprot)
self.feature_locations.append(_elem144)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.feature_publications = []
(_etype148, _size145) = iprot.readListBegin()
for _i149 in xrange(_size145):
_elem150 = iprot.readString();
self.feature_publications.append(_elem150)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.LIST:
self.feature_quality_warnings = []
(_etype154, _size151) = iprot.readListBegin()
for _i155 in xrange(_size151):
_elem156 = iprot.readString();
self.feature_quality_warnings.append(_elem156)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.LIST:
self.feature_quality_score = []
(_etype160, _size157) = iprot.readListBegin()
for _i161 in xrange(_size157):
_elem162 = iprot.readString();
self.feature_quality_score.append(_elem162)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRING:
self.feature_notes = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.STRING:
self.feature_inference = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Feature_data')
if self.feature_id is not None:
oprot.writeFieldBegin('feature_id', TType.STRING, 1)
oprot.writeString(self.feature_id)
oprot.writeFieldEnd()
if self.feature_type is not None:
oprot.writeFieldBegin('feature_type', TType.STRING, 2)
oprot.writeString(self.feature_type)
oprot.writeFieldEnd()
if self.feature_function is not None:
oprot.writeFieldBegin('feature_function', TType.STRING, 3)
oprot.writeString(self.feature_function)
oprot.writeFieldEnd()
if self.feature_aliases is not None:
oprot.writeFieldBegin('feature_aliases', TType.MAP, 4)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.feature_aliases))
for kiter163,viter164 in self.feature_aliases.items():
oprot.writeString(kiter163)
oprot.writeListBegin(TType.STRING, len(viter164))
for iter165 in viter164:
oprot.writeString(iter165)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.feature_dna_sequence_length is not None:
oprot.writeFieldBegin('feature_dna_sequence_length', TType.I64, 5)
oprot.writeI64(self.feature_dna_sequence_length)
oprot.writeFieldEnd()
if self.feature_dna_sequence is not None:
oprot.writeFieldBegin('feature_dna_sequence', TType.STRING, 6)
oprot.writeString(self.feature_dna_sequence)
oprot.writeFieldEnd()
if self.feature_md5 is not None:
oprot.writeFieldBegin('feature_md5', TType.STRING, 7)
oprot.writeString(self.feature_md5)
oprot.writeFieldEnd()
if self.feature_locations is not None:
oprot.writeFieldBegin('feature_locations', TType.LIST, 8)
oprot.writeListBegin(TType.STRUCT, len(self.feature_locations))
for iter166 in self.feature_locations:
iter166.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.feature_publications is not None:
oprot.writeFieldBegin('feature_publications', TType.LIST, 9)
oprot.writeListBegin(TType.STRING, len(self.feature_publications))
for iter167 in self.feature_publications:
oprot.writeString(iter167)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.feature_quality_warnings is not None:
oprot.writeFieldBegin('feature_quality_warnings', TType.LIST, 10)
oprot.writeListBegin(TType.STRING, len(self.feature_quality_warnings))
for iter168 in self.feature_quality_warnings:
oprot.writeString(iter168)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.feature_quality_score is not None:
oprot.writeFieldBegin('feature_quality_score', TType.LIST, 11)
oprot.writeListBegin(TType.STRING, len(self.feature_quality_score))
for iter169 in self.feature_quality_score:
oprot.writeString(iter169)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.feature_notes is not None:
oprot.writeFieldBegin('feature_notes', TType.STRING, 12)
oprot.writeString(self.feature_notes)
oprot.writeFieldEnd()
if self.feature_inference is not None:
oprot.writeFieldBegin('feature_inference', TType.STRING, 13)
oprot.writeString(self.feature_inference)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.feature_id)
value = (value * 31) ^ hash(self.feature_type)
value = (value * 31) ^ hash(self.feature_function)
value = (value * 31) ^ hash(self.feature_aliases)
value = (value * 31) ^ hash(self.feature_dna_sequence_length)
value = (value * 31) ^ hash(self.feature_dna_sequence)
value = (value * 31) ^ hash(self.feature_md5)
value = (value * 31) ^ hash(self.feature_locations)
value = (value * 31) ^ hash(self.feature_publications)
value = (value * 31) ^ hash(self.feature_quality_warnings)
value = (value * 31) ^ hash(self.feature_quality_score)
value = (value * 31) ^ hash(self.feature_notes)
value = (value * 31) ^ hash(self.feature_inference)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Protein_data:
"""
Attributes:
- protein_id: Protein identifier, which is feature ID plus ".protein"
- protein_amino_acid_sequence: Amino acid sequence for this protein
- protein_function: Function of protein
- protein_aliases: List of aliases for the protein
- protein_md5: MD5 hash of the protein translation (uppercase)
- protein_domain_locations
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'protein_id', None, None, ), # 1
(2, TType.STRING, 'protein_amino_acid_sequence', None, None, ), # 2
(3, TType.STRING, 'protein_function', None, None, ), # 3
(4, TType.LIST, 'protein_aliases', (TType.STRING,None), None, ), # 4
(5, TType.STRING, 'protein_md5', None, None, ), # 5
(6, TType.LIST, 'protein_domain_locations', (TType.STRING,None), None, ), # 6
)
def __init__(self, protein_id=None, protein_amino_acid_sequence=None, protein_function=None, protein_aliases=None, protein_md5=None, protein_domain_locations=None,):
self.protein_id = protein_id
self.protein_amino_acid_sequence = protein_amino_acid_sequence
self.protein_function = protein_function
self.protein_aliases = protein_aliases
self.protein_md5 = protein_md5
self.protein_domain_locations = protein_domain_locations
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.protein_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.protein_amino_acid_sequence = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.protein_function = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.protein_aliases = []
(_etype173, _size170) = iprot.readListBegin()
for _i174 in xrange(_size170):
_elem175 = iprot.readString();
self.protein_aliases.append(_elem175)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.protein_md5 = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.protein_domain_locations = []
(_etype179, _size176) = iprot.readListBegin()
for _i180 in xrange(_size176):
_elem181 = iprot.readString();
self.protein_domain_locations.append(_elem181)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Protein_data')
if self.protein_id is not None:
oprot.writeFieldBegin('protein_id', TType.STRING, 1)
oprot.writeString(self.protein_id)
oprot.writeFieldEnd()
if self.protein_amino_acid_sequence is not None:
oprot.writeFieldBegin('protein_amino_acid_sequence', TType.STRING, 2)
oprot.writeString(self.protein_amino_acid_sequence)
oprot.writeFieldEnd()
if self.protein_function is not None:
oprot.writeFieldBegin('protein_function', TType.STRING, 3)
oprot.writeString(self.protein_function)
oprot.writeFieldEnd()
if self.protein_aliases is not None:
oprot.writeFieldBegin('protein_aliases', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.protein_aliases))
for iter182 in self.protein_aliases:
oprot.writeString(iter182)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.protein_md5 is not None:
oprot.writeFieldBegin('protein_md5', TType.STRING, 5)
oprot.writeString(self.protein_md5)
oprot.writeFieldEnd()
if self.protein_domain_locations is not None:
oprot.writeFieldBegin('protein_domain_locations', TType.LIST, 6)
oprot.writeListBegin(TType.STRING, len(self.protein_domain_locations))
for iter183 in self.protein_domain_locations:
oprot.writeString(iter183)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.protein_id)
value = (value * 31) ^ hash(self.protein_amino_acid_sequence)
value = (value * 31) ^ hash(self.protein_function)
value = (value * 31) ^ hash(self.protein_aliases)
value = (value * 31) ^ hash(self.protein_md5)
value = (value * 31) ^ hash(self.protein_domain_locations)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Exon_data:
"""
Attributes:
- exon_location: Location of the exon in the contig.
- exon_dna_sequence: DNA Sequence string.
- exon_ordinal: The position of the exon, ordered 5' to 3'.
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'exon_location', (Region, Region.thrift_spec), None, ), # 1
(2, TType.STRING, 'exon_dna_sequence', None, None, ), # 2
(3, TType.I64, 'exon_ordinal', None, None, ), # 3
)
def __init__(self, exon_location=None, exon_dna_sequence=None, exon_ordinal=None,):
self.exon_location = exon_location
self.exon_dna_sequence = exon_dna_sequence
self.exon_ordinal = exon_ordinal
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.exon_location = Region()
self.exon_location.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.exon_dna_sequence = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.exon_ordinal = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Exon_data')
if self.exon_location is not None:
oprot.writeFieldBegin('exon_location', TType.STRUCT, 1)
self.exon_location.write(oprot)
oprot.writeFieldEnd()
if self.exon_dna_sequence is not None:
oprot.writeFieldBegin('exon_dna_sequence', TType.STRING, 2)
oprot.writeString(self.exon_dna_sequence)
oprot.writeFieldEnd()
if self.exon_ordinal is not None:
oprot.writeFieldBegin('exon_ordinal', TType.I64, 3)
oprot.writeI64(self.exon_ordinal)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.exon_location)
value = (value * 31) ^ hash(self.exon_dna_sequence)
value = (value * 31) ^ hash(self.exon_ordinal)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class UTR_data:
"""
Attributes:
- utr_locations: Locations of this UTR
- utr_dna_sequence: DNA sequence string for this UTR
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'utr_locations', (TType.STRUCT,(Region, Region.thrift_spec)), None, ), # 1
(2, TType.STRING, 'utr_dna_sequence', None, None, ), # 2
)
def __init__(self, utr_locations=None, utr_dna_sequence=None,):
self.utr_locations = utr_locations
self.utr_dna_sequence = utr_dna_sequence
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.utr_locations = []
(_etype187, _size184) = iprot.readListBegin()
for _i188 in xrange(_size184):
_elem189 = Region()
_elem189.read(iprot)
self.utr_locations.append(_elem189)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.utr_dna_sequence = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('UTR_data')
if self.utr_locations is not None:
oprot.writeFieldBegin('utr_locations', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.utr_locations))
for iter190 in self.utr_locations:
iter190.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.utr_dna_sequence is not None:
oprot.writeFieldBegin('utr_dna_sequence', TType.STRING, 2)
oprot.writeString(self.utr_dna_sequence)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.utr_locations)
value = (value * 31) ^ hash(self.utr_dna_sequence)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| mlhenderson/data_api | lib/doekbase/data_api/annotation/genome_annotation/service/ttypes.py | Python | mit | 58,338 |
from flask_login import LoginManager, UserMixin, login_user, logout_user, current_user, login_required
from werkzeug.security import generate_password_hash, check_password_hash
import ctf
class User(UserMixin, ctf.db.Model):
__tablename__ = 'users'
id = ctf.db.Column(ctf.db.Integer, primary_key=True)
username = ctf.db.Column(ctf.db.String(80), unique=True)
email = ctf.db.Column(ctf.db.String(80))
password_hash = ctf.db.Column(ctf.db.String(120))
school = ctf.db.Column(ctf.db.String(120))
score = ctf.db.Column(ctf.db.String(20))
solved = ctf.db.Column(ctf.db.String(400))
lastSubmit = ctf.db.Column(ctf.db.DateTime)
confirmed = ctf.db.Column(ctf.db.Boolean, nullable=False, default=False)
#timestamp=datetime.datetime.utcnow()
#def __init__(self, **kwargs):
# super(User, self).__init__(**kwargs)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User %r>' % self.username
class Challenges(ctf.db.Model):
__tablename__ = 'challenges'
id = ctf.db.Column(ctf.db.Integer, primary_key=True)
name = ctf.db.Column(ctf.db.String(80), unique=True)
category = ctf.db.Column(ctf.db.String(80))
info = ctf.db.Column(ctf.db.String(800))
score = ctf.db.Column(ctf.db.String(20))
flag = ctf.db.Column(ctf.db.String(40))
def __repr__(self):
return '<Challenges %r>' % self.name
| abdesslem/CTF | models.py | Python | mit | 1,692 |
Subsets and Splits