repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
abstract-open-solutions/l10n-italy | account_fiscal_year_closing/__init__.py | 16 | 1132 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <[email protected]>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Fiscal Year Closing
"""
import fyc
import wizard
| agpl-3.0 |
stormsson/procedural_city_generation_wrapper | vendor/josauder/procedural_city_generation/roadmap/growth_rules/minor_road.py | 2 | 1679 | from __future__ import division
import numpy as np
import random
from procedural_city_generation.roadmap.Vertex import Vertex
from procedural_city_generation.additional_stuff.rotate import rotate
from procedural_city_generation.additional_stuff.Singleton import Singleton
singleton=Singleton("roadmap")
def minor_road(vertex, b):
#Sammelt Numerische Werte aus Variables-Objekt
pForward=singleton.minor_roadpForward
pTurn=singleton.minor_roadpTurn
lMin=singleton.minor_roadlMin
lMax=singleton.minor_roadlMax
suggested_vertices=[]
#Berechnet den Vektor des letzten Weges zu diesem Punkt
previous_vector=np.array(vertex.coords-vertex.neighbours[len(vertex.neighbours)-1].coords)
previous_vector=previous_vector/np.linalg.norm(previous_vector)
n=np.array([-previous_vector[1], previous_vector[0]])
#Geradeaus
v=random.uniform(lMin, lMax)*previous_vector
random_number=random.randint(0, 100)
if random_number<pForward*b:
k=Vertex(vertex.coords+v)
#k.neighbours.append(vertex)
k.minor_road=True
suggested_vertices.append(k)
#Rechts
v=random.uniform(lMin, lMax)*previous_vector
random_number=random.randint(0, 100)
if random_number<pTurn*b:
k=Vertex(vertex.coords+n)
#k.neighbours.append(vertex)
k.minor_road=True
suggested_vertices.append(k)
#Links
v=random.uniform(lMin, lMax)*previous_vector
random_number=random.randint(0, 100)
if random_number<pTurn*b:
k=Vertex(vertex.coords-n)
#k.neighbours.append(vertex)
k.minor_road=True
suggested_vertices.append(k)
return suggested_vertices
| mpl-2.0 |
rajadhva/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_handshake.py | 452 | 7134 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake._base module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.common import ExtensionParameter
from mod_pywebsocket.common import ExtensionParsingException
from mod_pywebsocket.common import format_extensions
from mod_pywebsocket.common import parse_extensions
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import validate_subprotocol
class ValidateSubprotocolTest(unittest.TestCase):
"""A unittest for validate_subprotocol method."""
def test_validate_subprotocol(self):
# Should succeed.
validate_subprotocol('sample')
validate_subprotocol('Sample')
validate_subprotocol('sample\x7eprotocol')
# Should fail.
self.assertRaises(HandshakeException,
validate_subprotocol,
'')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x09protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x20protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
_TEST_TOKEN_EXTENSION_DATA = [
('foo', [('foo', [])]),
('foo; bar', [('foo', [('bar', None)])]),
('foo; bar=baz', [('foo', [('bar', 'baz')])]),
('foo; bar=baz; car=cdr', [('foo', [('bar', 'baz'), ('car', 'cdr')])]),
('foo; bar=baz, car; cdr',
[('foo', [('bar', 'baz')]), ('car', [('cdr', None)])]),
('a, b, c, d',
[('a', []), ('b', []), ('c', []), ('d', [])]),
]
_TEST_QUOTED_EXTENSION_DATA = [
('foo; bar=""', [('foo', [('bar', '')])]),
('foo; bar=" baz "', [('foo', [('bar', ' baz ')])]),
('foo; bar=",baz;"', [('foo', [('bar', ',baz;')])]),
('foo; bar="\\\r\\\nbaz"', [('foo', [('bar', '\r\nbaz')])]),
('foo; bar="\\"baz"', [('foo', [('bar', '"baz')])]),
('foo; bar="\xbbbaz"', [('foo', [('bar', '\xbbbaz')])]),
]
_TEST_REDUNDANT_TOKEN_EXTENSION_DATA = [
('foo \t ', [('foo', [])]),
('foo; \r\n bar', [('foo', [('bar', None)])]),
('foo; bar=\r\n \r\n baz', [('foo', [('bar', 'baz')])]),
('foo ;bar = baz ', [('foo', [('bar', 'baz')])]),
('foo,bar,,baz', [('foo', []), ('bar', []), ('baz', [])]),
]
_TEST_REDUNDANT_QUOTED_EXTENSION_DATA = [
('foo; bar="\r\n \r\n baz"', [('foo', [('bar', ' baz')])]),
]
class ExtensionsParserTest(unittest.TestCase):
def _verify_extension_list(self, expected_list, actual_list):
"""Verifies that ExtensionParameter objects in actual_list have the
same members as extension definitions in expected_list. Extension
definition used in this test is a pair of an extension name and a
parameter dictionary.
"""
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
(name, parameters) = expected
self.assertEqual(name, actual._name)
self.assertEqual(parameters, actual._parameters)
def test_parse(self):
for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_quoted_data(self):
for formatted_string, definition in _TEST_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_quoted_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_bad_data(self):
_TEST_BAD_EXTENSION_DATA = [
('foo; ; '),
('foo; a a'),
('foo foo'),
(',,,'),
('foo; bar='),
('foo; bar="hoge'),
('foo; bar="a\r"'),
('foo; bar="\\\xff"'),
('foo; bar=\ra'),
]
for formatted_string in _TEST_BAD_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions, formatted_string)
class FormatExtensionsTest(unittest.TestCase):
def test_format_extensions(self):
for formatted_string, definitions in _TEST_TOKEN_EXTENSION_DATA:
extensions = []
for definition in definitions:
(name, parameters) = definition
extension = ExtensionParameter(name)
extension._parameters = parameters
extensions.append(extension)
self.assertEqual(
formatted_string, format_extensions(extensions))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
paulproteus/django | django/contrib/localflavor/za/forms.py | 109 | 1977 | """
South Africa-specific Form helpers
"""
from __future__ import unicode_literals
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField
from django.utils.checksums import luhn
from django.utils.translation import gettext as _
import re
from datetime import date
id_re = re.compile(r'^(?P<yy>\d\d)(?P<mm>\d\d)(?P<dd>\d\d)(?P<mid>\d{4})(?P<end>\d{3})')
class ZAIDField(CharField):
"""A form field for South African ID numbers -- the checksum is validated
using the Luhn checksum, and uses a simlistic (read: not entirely accurate)
check for the birthdate
"""
default_error_messages = {
'invalid': _('Enter a valid South African ID number'),
}
def clean(self, value):
super(ZAIDField, self).clean(value)
if value in EMPTY_VALUES:
return ''
# strip spaces and dashes
value = value.strip().replace(' ', '').replace('-', '')
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
g = match.groupdict()
try:
# The year 2000 is conveniently a leapyear.
# This algorithm will break in xx00 years which aren't leap years
# There is no way to guess the century of a ZA ID number
d = date(int(g['yy']) + 2000, int(g['mm']), int(g['dd']))
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not luhn(value):
raise ValidationError(self.error_messages['invalid'])
return value
class ZAPostCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a valid South African postal code'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ZAPostCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
| bsd-3-clause |
coreentin/android_kernel_nvidia_s8515 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
phiLangley/openPHD | [CODE]/processing/modes/PythonMode/examples/Topics/Fractals and L-Systems/PenroseSnowflake/l_system.py | 1 | 1430 | class LSystem(object):
def __init__(self):
self.steps = 0
self.axiom = "F"
self.rule = "F+F-F"
self.startLength = 90.0
self.theta = radians(120.0)
self.reset()
def reset(self):
self.production = self.axiom
self.drawLength = self.startLength
self.generations = 0
def getAge(self):
return self.generations
def render(self):
translate(width / 2, height / 2)
self.steps += 5
if self.steps > len(self.production)():
self.steps = len(self.production)()
for i in range(self.steps):
step = self.production.charAt(i)
if step == 'F':
rect(0, 0, -self.drawLength, -self.drawLength)
noFill()
translate(0, -self.drawLength)
elif step == '+':
rotate(self.theta)
elif step == '-':
rotate(-self.theta)
elif step == '[':
with pushMatrix():
elif step == ']':
def simulate(self, gen):
while self.getAge() < gen:
self.production = self.iterate(self.production, self.rule)
def iterate(self, prod_, rule_):
self.drawLength = self.drawLength * 0.6
self.generations += 1
newProduction = prod_
newProduction = newProduction.replaceAll("F", rule_)
return newProduction
| gpl-2.0 |
wso2/product-private-paas | components/org.wso2.ppaas.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/databridge/thrift/thrift/transport/TTwisted.py | 19 | 6528 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from cStringIO import StringIO
from zope.interface import implements, Interface, Attribute
from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log
from twisted.web import server, resource, http
import TTransport
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
for k, v in self.client._reqs.iteritems():
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message='Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
d = self.factory.processor.process(iprot, oprot)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
class ThriftResource(resource.Resource):
allowedMethods = ('POST',)
def __init__(self, processor, inputProtocolFactory,
outputProtocolFactory=None):
resource.Resource.__init__(self)
self.inputProtocolFactory = inputProtocolFactory
if outputProtocolFactory is None:
self.outputProtocolFactory = inputProtocolFactory
else:
self.outputProtocolFactory = outputProtocolFactory
self.processor = processor
def getChild(self, path, request):
return self
def _cbProcess(self, _, request, tmo):
msg = tmo.getvalue()
request.setResponseCode(http.OK)
request.setHeader("content-type", "application/x-thrift")
request.write(msg)
request.finish()
def render_POST(self, request):
request.content.seek(0, 0)
data = request.content.read()
tmi = TTransport.TMemoryBuffer(data)
tmo = TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(tmi)
oprot = self.outputProtocolFactory.getProtocol(tmo)
d = self.processor.process(iprot, oprot)
d.addCallback(self._cbProcess, request, tmo)
return server.NOT_DONE_YET
| apache-2.0 |
Alecto3-D/testable-greeter | bb-master/sandbox/lib/python3.5/site-packages/buildbot/data/masters.py | 11 | 7229 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python import log
from buildbot.data import base
from buildbot.data import resultspec
from buildbot.data import types
from buildbot.process.results import RETRY
from buildbot.util import epoch2datetime
# time, in minutes, after which a master that hasn't checked in will be
# marked as inactive
EXPIRE_MINUTES = 10
def _db2data(master):
return dict(masterid=master['id'],
name=master['name'],
active=master['active'],
last_active=master['last_active'])
class MasterEndpoint(base.Endpoint):
isCollection = False
pathPatterns = """
/masters/n:masterid
/builders/n:builderid/masters/n:masterid
"""
@defer.inlineCallbacks
def get(self, resultSpec, kwargs):
# if a builder is given, only return the master if it's associated with
# this builder
if 'builderid' in kwargs:
builder = yield self.master.db.builders.getBuilder(
builderid=kwargs['builderid'])
if not builder or kwargs['masterid'] not in builder['masterids']:
defer.returnValue(None)
return
m = yield self.master.db.masters.getMaster(kwargs['masterid'])
defer.returnValue(_db2data(m) if m else None)
class MastersEndpoint(base.Endpoint):
isCollection = True
pathPatterns = """
/masters
/builders/n:builderid/masters
"""
rootLinkName = 'masters'
@defer.inlineCallbacks
def get(self, resultSpec, kwargs):
masterlist = yield self.master.db.masters.getMasters()
if 'builderid' in kwargs:
builder = yield self.master.db.builders.getBuilder(
builderid=kwargs['builderid'])
if builder:
masterids = set(builder['masterids'])
masterlist = [m for m in masterlist if m['id'] in masterids]
else:
masterlist = []
defer.returnValue([_db2data(m) for m in masterlist])
class Master(base.ResourceType):
name = "master"
plural = "masters"
endpoints = [MasterEndpoint, MastersEndpoint]
eventPathPatterns = """
/masters/:masterid
"""
class EntityType(types.Entity):
masterid = types.Integer()
name = types.String()
active = types.Boolean()
last_active = types.DateTime()
entityType = EntityType(name)
@base.updateMethod
@defer.inlineCallbacks
def masterActive(self, name, masterid, _reactor=reactor):
activated = yield self.master.db.masters.setMasterState(
masterid=masterid, active=True, _reactor=_reactor)
if activated:
self.produceEvent(
dict(masterid=masterid, name=name, active=True),
'started')
@base.updateMethod
@defer.inlineCallbacks
def expireMasters(self, forceHouseKeeping=False, _reactor=reactor):
too_old = epoch2datetime(_reactor.seconds() - 60 * EXPIRE_MINUTES)
masters = yield self.master.db.masters.getMasters()
for m in masters:
if m['last_active'] is not None and m['last_active'] >= too_old:
continue
# mark the master inactive, and send a message on its behalf
deactivated = yield self.master.db.masters.setMasterState(
masterid=m['id'], active=False, _reactor=_reactor)
if deactivated:
yield self._masterDeactivated(m['id'], m['name'])
elif forceHouseKeeping:
yield self._masterDeactivatedHousekeeping(m['id'], m['name'])
@base.updateMethod
@defer.inlineCallbacks
def masterStopped(self, name, masterid):
deactivated = yield self.master.db.masters.setMasterState(
masterid=masterid, active=False)
if deactivated:
yield self._masterDeactivated(masterid, name)
@defer.inlineCallbacks
def _masterDeactivatedHousekeeping(self, masterid, name):
log.msg("doing housekeeping for master %d %s" % (masterid, name))
# common code for deactivating a master
yield self.master.data.rtypes.worker._masterDeactivated(
masterid=masterid)
yield self.master.data.rtypes.builder._masterDeactivated(
masterid=masterid)
yield self.master.data.rtypes.scheduler._masterDeactivated(
masterid=masterid)
yield self.master.data.rtypes.changesource._masterDeactivated(
masterid=masterid)
# for each build running on that instance..
builds = yield self.master.data.get(('builds',),
filters=[resultspec.Filter('masterid', 'eq', [masterid]),
resultspec.Filter('complete', 'eq', [False])])
for build in builds:
# stop any running steps..
steps = yield self.master.data.get(
('builds', build['buildid'], 'steps'),
filters=[resultspec.Filter('results', 'eq', [None])])
for step in steps:
# finish remaining logs for those steps..
logs = yield self.master.data.get(
('steps', step['stepid'], 'logs'),
filters=[resultspec.Filter('complete', 'eq',
[False])])
for _log in logs:
yield self.master.data.updates.finishLog(
logid=_log['logid'])
yield self.master.data.updates.finishStep(
stepid=step['stepid'], results=RETRY, hidden=False)
# then stop the build itself
yield self.master.data.updates.finishBuild(
buildid=build['buildid'], results=RETRY)
# unclaim all of the build requests owned by the deactivated instance
buildrequests = yield self.master.db.buildrequests.getBuildRequests(
complete=False, claimed=masterid)
yield self.master.db.buildrequests.unclaimBuildRequests(
brids=[br['buildrequestid'] for br in buildrequests])
@defer.inlineCallbacks
def _masterDeactivated(self, masterid, name):
yield self._masterDeactivatedHousekeeping(masterid, name)
self.produceEvent(
dict(masterid=masterid, name=name, active=False),
'stopped')
| mit |
hackebrot/click | examples/termui/termui.py | 32 | 4332 | # coding: utf-8
import click
import math
import time
import random
try:
range_type = xrange
except NameError:
range_type = range
@click.group()
def cli():
"""This script showcases different terminal UI helpers in Click."""
pass
@cli.command()
def colordemo():
"""Demonstrates ANSI color support."""
for color in 'red', 'green', 'blue':
click.echo(click.style('I am colored %s' % color, fg=color))
click.echo(click.style('I am background colored %s' % color, bg=color))
@cli.command()
def pager():
"""Demonstrates using the pager."""
lines = []
for x in range_type(200):
lines.append('%s. Hello World!' % click.style(str(x), fg='green'))
click.echo_via_pager('\n'.join(lines))
@cli.command()
@click.option('--count', default=8000, type=click.IntRange(1, 100000),
help='The number of items to process.')
def progress(count):
"""Demonstrates the progress bar."""
items = range_type(count)
def process_slowly(item):
time.sleep(0.002 * random.random())
def filter(items):
for item in items:
if random.random() > 0.3:
yield item
with click.progressbar(items, label='Processing accounts',
fill_char=click.style('#', fg='green')) as bar:
for item in bar:
process_slowly(item)
def show_item(item):
if item is not None:
return 'Item #%d' % item
with click.progressbar(filter(items), label='Committing transaction',
fill_char=click.style('#', fg='yellow'),
item_show_func=show_item) as bar:
for item in bar:
process_slowly(item)
with click.progressbar(length=count, label='Counting',
bar_template='%(label)s %(bar)s | %(info)s',
fill_char=click.style(u'█', fg='cyan'),
empty_char=' ') as bar:
for item in bar:
process_slowly(item)
with click.progressbar(length=count, width=0, show_percent=False,
show_eta=False,
fill_char=click.style('#', fg='magenta')) as bar:
for item in bar:
process_slowly(item)
# 'Non-linear progress bar'
steps = [math.exp( x * 1. / 20) - 1 for x in range(20)]
count = int(sum(steps))
with click.progressbar(length=count, show_percent=False,
label='Slowing progress bar',
fill_char=click.style(u'█', fg='green')) as bar:
for item in steps:
time.sleep(item)
bar.update(item)
@cli.command()
@click.argument('url')
def open(url):
"""Opens a file or URL In the default application."""
click.launch(url)
@cli.command()
@click.argument('url')
def locate(url):
"""Opens a file or URL In the default application."""
click.launch(url, locate=True)
@cli.command()
def edit():
"""Opens an editor with some text in it."""
MARKER = '# Everything below is ignored\n'
message = click.edit('\n\n' + MARKER)
if message is not None:
msg = message.split(MARKER, 1)[0].rstrip('\n')
if not msg:
click.echo('Empty message!')
else:
click.echo('Message:\n' + msg)
else:
click.echo('You did not enter anything!')
@cli.command()
def clear():
"""Clears the entire screen."""
click.clear()
@cli.command()
def pause():
"""Waits for the user to press a button."""
click.pause()
@cli.command()
def menu():
"""Shows a simple menu."""
menu = 'main'
while 1:
if menu == 'main':
click.echo('Main menu:')
click.echo(' d: debug menu')
click.echo(' q: quit')
char = click.getchar()
if char == 'd':
menu = 'debug'
elif char == 'q':
menu = 'quit'
else:
click.echo('Invalid input')
elif menu == 'debug':
click.echo('Debug menu')
click.echo(' b: back')
char = click.getchar()
if char == 'b':
menu = 'main'
else:
click.echo('Invalid input')
elif menu == 'quit':
return
| bsd-3-clause |
coders-circle/Notifica | web/routine/views.py | 1 | 4456 | from django.shortcuts import render, redirect
from django.views.generic import View, TemplateView
from django.db.models import Q
import json
from classroom.models import *
from classroom.utils import *
from routine.models import *
from routine.utils import *
days = ["Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday", "Friday", "Saturday"]
days_short = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
def getGroups(groups):
gs = []
for g in groups:
gs.append(Group.objects.get(pk=g["id"]))
return gs
class RoutineView(View):
def get(self, request):
if not isValidUser(request.user):
return redirect("home")
r = getRoutine(request.user)
context = {"days_short": days_short, "days": days, "routine": r}
context["current_page"] = "Routine"
context["edit_routine"] = "false"
request.user.profile = UserProfile.objects.get(user=request.user).profile
students = getStudents(request.user)
student = None if len(students)==0 else students[0]
context["student"] = student
return render(request, 'routine/routine.html', context)
class RoutineAdminView(View):
def get(self, request):
if not isValidUser(request.user):
return redirect("home")
students = getStudents(request.user)
student = None if len(students)==0 else students[0]
if not student:
redirect("routine:routine")
if student.user not in student.group.p_class.admins.all():
redirect("routine:routine")
r = getRoutineForAdmin(request.user)
context = {"days_short": days_short, "days": days, "routine": r}
groups = Group.objects.filter(p_class__pk=student.group.p_class.pk)
context["groups"] = groups
context["student"] = student
context["edit_routine"] = "true"
request.user.profile = UserProfile.objects.get(user=request.user).profile
return render(request, 'routine/routine-admin.html', context)
def post(self, request):
if not isValidUser(request.user):
return redirect("home")
self.tempSids = {}
self.tempTids = {}
# TODO: Support multiple students
students = getStudents(request.user)
student = None if len(students)==0 else students[0]
if student:
Period.objects.all().delete()
routine = json.loads(request.POST.get('routine'))
r = Routine.objects.get(p_class__pk=student.group.p_class.pk)
for d, day in enumerate(routine):
for period in day:
p = Period()
p.routine = r
p.period_type = period["period_type"]
p.subject = self.getSubject(period["subject"])
p.start_time = period["start_time"]
p.end_time = period["end_time"]
p.day = d
p.remarks = period["remarks"]
p.save()
p.teachers.add(*self.getTeachers(period["teachers"]))
if "groups" in period:
p.groups.add(*getGroups(period["groups"]))
r.save()
return self.get(request)
def getSubject(self, subject):
sid = int(subject["id"])
if sid < 0:
if sid in self.tempSids:
return Subject.objects.get(pk=self.tempSids[sid])
newSubject = Subject()
newSubject.name = subject["name"]
newSubject.save()
self.tempSids[sid] = newSubject.pk
return newSubject
return Subject.objects.get(pk=sid)
def getTeachers(self, teachers):
ts = []
for t in teachers:
tid = int(t["id"])
if tid < 0:
if tid in self.tempTids:
ts.append(Teacher.objects.get(pk=self.tempTids[tid]))
else:
teacher = Teacher()
teacher.username = t["name"]
teacher.save()
self.tempTids[tid] = teacher.pk
ts.append(teacher)
else:
ts.append(Teacher.objects.get(pk=tid))
return ts
def getStudents(self, students):
ss = []
for s in students:
sid = int(s["id"])
ss.append(Student.objects.get(sid))
return ss
| gpl-2.0 |
jenalgit/django | tests/migrations/test_base.py | 292 | 4620 | import os
import shutil
import tempfile
from contextlib import contextmanager
from importlib import import_module
from django.apps import apps
from django.db import connection
from django.db.migrations.recorder import MigrationRecorder
from django.test import TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils.module_loading import module_dir
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
def tearDown(self):
# Reset applied-migrations state.
recorder = MigrationRecorder(connection)
recorder.migration_qs.filter(app='migrations').delete()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], True)
def assertColumnNotNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], False)
def assertIndexExists(self, table, columns, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["index"]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)
def assertFKExists(self, table, columns, to, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["foreign_key"] == to
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertFKNotExists(self, table, columns, to, value=True):
return self.assertFKExists(table, columns, to, False)
@contextmanager
def temporary_migration_module(self, app_label='migrations', module=None):
"""
Allows testing management commands in a temporary migrations module.
Wrap all invocations to makemigrations and squashmigrations with this
context manager in order to avoid creating migration files in your
source tree inadvertently.
Takes the application label that will be passed to makemigrations or
squashmigrations and the Python path to a migrations module.
The migrations module is used as a template for creating the temporary
migrations module. If it isn't provided, the application's migrations
module is used, if it exists.
Returns the filesystem path to the temporary migrations module.
"""
temp_dir = tempfile.mkdtemp()
try:
target_dir = tempfile.mkdtemp(dir=temp_dir)
with open(os.path.join(target_dir, '__init__.py'), 'w'):
pass
target_migrations_dir = os.path.join(target_dir, 'migrations')
if module is None:
module = apps.get_app_config(app_label).name + '.migrations'
try:
source_migrations_dir = module_dir(import_module(module))
except (ImportError, ValueError):
pass
else:
shutil.copytree(source_migrations_dir, target_migrations_dir)
with extend_sys_path(temp_dir):
new_module = os.path.basename(target_dir) + '.migrations'
with self.settings(MIGRATION_MODULES={app_label: new_module}):
yield target_migrations_dir
finally:
shutil.rmtree(temp_dir)
| bsd-3-clause |
SimplyAutomationized/python-snap7 | snap7/snap7types.py | 1 | 5544 | """
Python equivalent for snap7 specific types.
"""
import ctypes
from snap7.common import ADict
S7Object = ctypes.c_void_p
buffer_size = 65536
buffer_type = ctypes.c_ubyte * buffer_size
time_t = ctypes.c_uint64 # TODO: check if this is valid for all platforms
word = ctypes.c_uint16
longword = ctypes.c_uint32
# // PARAMS LIST
LocalPort = 1
RemotePort = 2
PingTimeout = 3
SendTimeout = 4
RecvTimeout = 5
WorkInterval = 6
SrcRef = 7
DstRef = 8
SrcTSap = 9
PDURequest = 10
MaxClients = 11
BSendTimeout = 12
BRecvTimeout = 13
RecoveryTime = 14
KeepAliveTime = 15
param_types = ADict({
LocalPort: ctypes.c_uint16,
RemotePort: ctypes.c_uint16,
PingTimeout: ctypes.c_int32,
SendTimeout: ctypes.c_int32,
RecvTimeout: ctypes.c_int32,
WorkInterval: ctypes.c_int32,
SrcRef: ctypes.c_uint16,
DstRef: ctypes.c_uint16,
SrcTSap: ctypes.c_uint16,
PDURequest: ctypes.c_int32,
MaxClients: ctypes.c_int32,
BSendTimeout: ctypes.c_int32,
BRecvTimeout: ctypes.c_int32,
RecoveryTime: ctypes.c_uint32,
KeepAliveTime: ctypes.c_uint32,
})
# mask types
mkEvent = 0
mkLog = 1
# Area ID
S7AreaPE = 0x81
S7AreaPA = 0x82
S7AreaMK = 0x83
S7AreaDB = 0x84
S7AreaCT = 0x1C
S7AreaTM = 0x1D
areas = ADict({
'PE': 0x81,
'PA': 0x82,
'MK': 0x83,
'DB': 0x84,
'CT': 0x1C,
'TM': 0x1D,
})
# Word Length
S7WLBit = 0x01
S7WLByte = 0x02
S7WLWord = 0x04
S7WLDWord = 0x06
S7WLReal = 0x08
S7WLCounter = 0x1C
S7WLTimer = 0x1D
# Server Area ID (use with Register/unregister - Lock/unlock Area)
# NOTE: these are not the same for the client!!
srvAreaPE = 0
srvAreaPA = 1
srvAreaMK = 2
srvAreaCT = 3
srvAreaTM = 4
srvAreaDB = 5
server_areas = ADict({
'PE': 0,
'PA': 1,
'MK': 2,
'CT': 3,
'TM': 4,
'DB': 5,
})
wordlen_to_ctypes = ADict({
S7WLBit: ctypes.c_int16,
S7WLByte: ctypes.c_int8,
S7WLWord: ctypes.c_int16,
S7WLDWord: ctypes.c_int32,
S7WLReal: ctypes.c_int32,
S7WLCounter: ctypes.c_int16,
S7WLTimer: ctypes.c_int16,
})
block_types = ADict({
'OB': ctypes.c_int(0x38),
'DB': ctypes.c_int(0x41),
'SDB': ctypes.c_int(0x42),
'FC': ctypes.c_int(0x43),
'SFC': ctypes.c_int(0x44),
'FB': ctypes.c_int(0x45),
'SFB': ctypes.c_int(0x46),
})
server_statuses = {
0: 'SrvStopped',
1: 'SrvRunning',
2: 'SrvError',
}
cpu_statuses = {
0: 'S7CpuStatusUnknown',
4: 'S7CpuStatusStop',
8: 'S7CpuStatusRun',
}
class SrvEvent(ctypes.Structure):
_fields_ = [
('EvtTime', time_t),
('EvtSender', ctypes.c_int),
('EvtCode', longword),
('EvtRetCode', word),
('EvtParam1', word),
('EvtParam2', word),
('EvtParam3', word),
('EvtParam4', word),
]
def __str__(self):
return "<event time: %s sender: %s code: %s retcode: %s param1: " \
"%s param2:%s param3: %s param4: " \
"%s>" % (self.EvtTime, self.EvtSender, self.EvtCode,
self.EvtRetCode, self.EvtParam1, self.EvtParam2,
self.EvtParam3, self.EvtParam4)
class BlocksList(ctypes.Structure):
_fields_ = [
('OBCount', ctypes.c_int32),
('FBCount', ctypes.c_int32),
('FCCount', ctypes.c_int32),
('SFBCount', ctypes.c_int32),
('SFCCount', ctypes.c_int32),
('DBCount', ctypes.c_int32),
('SDBCount', ctypes.c_int32),
]
def __str__(self):
return "<block list count OB: %s FB: %s FC: %s SFB: %x SFC: %s DB: %s" \
" SDB: %s>" % (self.OBCount, self.FBCount, self.FCCount,
self.SFBCount, self.SFCCount, self.DBCount,
self.SDBCount)
class TS7BlockInfo(ctypes.Structure):
_fields_ = [
('BlkType', ctypes.c_int32),
('BlkNumber', ctypes.c_int32),
('BlkLang', ctypes.c_int32),
('BlkFlags', ctypes.c_int32),
('MC7Size', ctypes.c_int32),
('LoadSize', ctypes.c_int32),
('LocalData', ctypes.c_int32),
('SBBLength', ctypes.c_int32),
('CheckSum', ctypes.c_int32),
('Version', ctypes.c_int32),
('CodeDate', ctypes.c_char * 11),
('IntfDate', ctypes.c_char * 11),
('Author', ctypes.c_char * 9),
('Family', ctypes.c_char * 9),
('Header', ctypes.c_char * 9),
]
def __str__(self):
return """\
Block type: %s
Block number: %s
Block language: %s
Block flags: %s
MC7Size: %s
Load memory size: %s
Local data: %s
SBB Length: %s
Checksum: %s
Version: %s
Code date: %s
Interface date: %s
Author: %s
Family: %s
Header: %s""" % (self.BlkType,
self.BlkNumber,
self.BlkLang,
self.BlkFlags,
self.MC7Size,
self.LoadSize,
self.LocalData,
self.SBBLength,
self.CheckSum,
self.Version,
self.CodeDate,
self.IntfDate,
self.Author,
self.Family,
self.Header)
class S7DataItem(ctypes.Structure):
_pack_ = 1
_fields_ = [
('Area', ctypes.c_int32),
('WordLen', ctypes.c_int32),
('Result', ctypes.c_int32),
('DBNumber', ctypes.c_int32),
('Start', ctypes.c_int32),
('Amount', ctypes.c_int32),
('pData', ctypes.POINTER(ctypes.c_uint8))
]
| mit |
hasteur/hasteurbot_task_3 | families/wikivoyage_family.py | 3 | 3953 | # -*- coding: utf-8 -*-
__version__ = '$Id$'
# The new wikivoyage family that is hosted at wikimedia
import family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'wikivoyage'
self.languages_by_size = [
'en', 'de', 'pt', 'nl', 'fr', 'it', 'pl', 'ru', 'sv', 'es', 'ro',
'uk', 'he', 'el',
]
self.langs = dict([(lang, '%s.wikivoyage.org' % lang)
for lang in self.languages_by_size])
# Override defaults
self.namespaces[2]['fr'] = [u'Utilisateur']
self.namespaces[3]['fr'] = [u'Discussion utilisateur', u'Discussion Utilisateur']
self.namespaces[2]['pl'] = [u'Użytkownik', u'Użytkowniczka']
self.namespaces[3]['pl'] = [u'Dyskusja użytkownika', u'Dyskusja użytkowniczki']
self.namespaces[2]['pt'] = [u'Utilizador', u'Usuário', u'Utilizadora']
self.namespaces[3]['pt'] = [u'Utilizador Discussão', u'Usuário Discussão', u'Utilizadora Discussão']
self.namespaces[9]['ro'] = [u'Discuție MediaWiki', u'Discuţie MediaWiki']
self.namespaces[12]['uk'] = [u'Довідка', u'Д']
self.namespaces[14]['uk'] = [u'Категорія', u'К']
# Most namespaces are inherited from family.Family.
# Translation used on all wikis for the different namespaces.
# (Please sort languages alphabetically)
# You only need to enter translations that differ from _default.
self.namespaces[4] = {
'de': u'Wikivoyage',
'el': u'Βικιταξίδια',
'en': u'Wikivoyage',
'es': u'Wikiviajes',
'fr': u'Wikivoyage',
'he': u'ויקימסע',
'it': u'Wikivoyage',
'nl': u'Wikivoyage',
'pl': u'Wikipodróże',
'pt': u'Wikivoyage',
'ro': u'Wikivoyage',
'ru': u'Wikivoyage',
'sv': u'Wikivoyage',
'uk': u'Вікімандри',
}
self.namespaces[5] = {
'de': u'Wikivoyage Diskussion',
'el': u'Συζήτηση Βικιταξίδια',
'en': u'Wikivoyage talk',
'es': u'Wikiviajes discusión',
'fr': u'Discussion Wikivoyage',
'he': u'שיחת ויקימסע',
'it': u'Discussioni Wikivoyage',
'nl': u'Overleg Wikivoyage',
'pl': u'Dyskusja Wikipodróży',
'pt': u'Wikivoyage Discussão',
'ro': u'Discuție Wikivoyage',
'ru': u'Обсуждение Wikivoyage',
'sv': u'Wikivoyagediskussion',
'uk': u'Обговорення Вікімандри',
}
self.namespaces[100] = {
'de': u'Portal',
'it': u'Portale',
'uk': u'Портал',
}
self.namespaces[101] = {
'de': u'Portal Diskussion',
'it': u'Discussioni portale',
'uk': u'Обговорення порталу',
}
self.namespaces[102] = {
'de': u'Wahl',
}
self.namespaces[103] = {
'de': u'Wahl Diskussion',
}
self.namespaces[104] = {
'de': u'Thema',
'it': u'Tematica',
}
self.namespaces[105] = {
'de': u'Thema Diskussion',
'it': u'Discussioni tematica',
}
self.namespaces[106] = {
'de': u'Nachrichten',
}
self.namespaces[107] = {
'de': u'Nachrichten Diskussion',
}
self.namespaces[108] = {
'he': u'ספר',
}
self.namespaces[109] = {
'he': u'שיחת ספר',
}
# Global bot allowed languages on http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = ['es', 'ru', ]
| gpl-2.0 |
sidrakesh93/grpc | src/python/interop/interop/methods.py | 8 | 14237 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Implementations of interoperability test methods."""
import enum
import json
import os
import threading
from oauth2client import client as oauth2client_client
from grpc.framework.alpha import utilities
from interop import empty_pb2
from interop import messages_pb2
_TIMEOUT = 7
def _empty_call(request, unused_context):
return empty_pb2.Empty()
_CLIENT_EMPTY_CALL = utilities.unary_unary_invocation_description(
empty_pb2.Empty.SerializeToString, empty_pb2.Empty.FromString)
_SERVER_EMPTY_CALL = utilities.unary_unary_service_description(
_empty_call, empty_pb2.Empty.FromString,
empty_pb2.Empty.SerializeToString)
def _unary_call(request, unused_context):
return messages_pb2.SimpleResponse(
payload=messages_pb2.Payload(
type=messages_pb2.COMPRESSABLE,
body=b'\x00' * request.response_size))
_CLIENT_UNARY_CALL = utilities.unary_unary_invocation_description(
messages_pb2.SimpleRequest.SerializeToString,
messages_pb2.SimpleResponse.FromString)
_SERVER_UNARY_CALL = utilities.unary_unary_service_description(
_unary_call, messages_pb2.SimpleRequest.FromString,
messages_pb2.SimpleResponse.SerializeToString)
def _streaming_output_call(request, unused_context):
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
type=request.response_type,
body=b'\x00' * response_parameters.size))
_CLIENT_STREAMING_OUTPUT_CALL = utilities.unary_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_STREAMING_OUTPUT_CALL = utilities.unary_stream_service_description(
_streaming_output_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
def _streaming_input_call(request_iterator, unused_context):
aggregate_size = 0
for request in request_iterator:
if request.payload and request.payload.body:
aggregate_size += len(request.payload.body)
return messages_pb2.StreamingInputCallResponse(
aggregated_payload_size=aggregate_size)
_CLIENT_STREAMING_INPUT_CALL = utilities.stream_unary_invocation_description(
messages_pb2.StreamingInputCallRequest.SerializeToString,
messages_pb2.StreamingInputCallResponse.FromString)
_SERVER_STREAMING_INPUT_CALL = utilities.stream_unary_service_description(
_streaming_input_call,
messages_pb2.StreamingInputCallRequest.FromString,
messages_pb2.StreamingInputCallResponse.SerializeToString)
def _full_duplex_call(request_iterator, unused_context):
for request in request_iterator:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
type=request.payload.type,
body=b'\x00' * request.response_parameters[0].size))
_CLIENT_FULL_DUPLEX_CALL = utilities.stream_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_FULL_DUPLEX_CALL = utilities.stream_stream_service_description(
_full_duplex_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
# NOTE(nathaniel): Apparently this is the same as the full-duplex call?
_CLIENT_HALF_DUPLEX_CALL = utilities.stream_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_HALF_DUPLEX_CALL = utilities.stream_stream_service_description(
_full_duplex_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
SERVICE_NAME = 'grpc.testing.TestService'
_EMPTY_CALL_METHOD_NAME = 'EmptyCall'
_UNARY_CALL_METHOD_NAME = 'UnaryCall'
_STREAMING_OUTPUT_CALL_METHOD_NAME = 'StreamingOutputCall'
_STREAMING_INPUT_CALL_METHOD_NAME = 'StreamingInputCall'
_FULL_DUPLEX_CALL_METHOD_NAME = 'FullDuplexCall'
_HALF_DUPLEX_CALL_METHOD_NAME = 'HalfDuplexCall'
CLIENT_METHODS = {
_EMPTY_CALL_METHOD_NAME: _CLIENT_EMPTY_CALL,
_UNARY_CALL_METHOD_NAME: _CLIENT_UNARY_CALL,
_STREAMING_OUTPUT_CALL_METHOD_NAME: _CLIENT_STREAMING_OUTPUT_CALL,
_STREAMING_INPUT_CALL_METHOD_NAME: _CLIENT_STREAMING_INPUT_CALL,
_FULL_DUPLEX_CALL_METHOD_NAME: _CLIENT_FULL_DUPLEX_CALL,
_HALF_DUPLEX_CALL_METHOD_NAME: _CLIENT_HALF_DUPLEX_CALL,
}
SERVER_METHODS = {
_EMPTY_CALL_METHOD_NAME: _SERVER_EMPTY_CALL,
_UNARY_CALL_METHOD_NAME: _SERVER_UNARY_CALL,
_STREAMING_OUTPUT_CALL_METHOD_NAME: _SERVER_STREAMING_OUTPUT_CALL,
_STREAMING_INPUT_CALL_METHOD_NAME: _SERVER_STREAMING_INPUT_CALL,
_FULL_DUPLEX_CALL_METHOD_NAME: _SERVER_FULL_DUPLEX_CALL,
_HALF_DUPLEX_CALL_METHOD_NAME: _SERVER_HALF_DUPLEX_CALL,
}
def _large_unary_common_behavior(stub, fill_username, fill_oauth_scope):
with stub:
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE, response_size=314159,
payload=messages_pb2.Payload(body=b'\x00' * 271828),
fill_username=fill_username, fill_oauth_scope=fill_oauth_scope)
response_future = stub.UnaryCall.async(request, _TIMEOUT)
response = response_future.result()
if response.payload.type is not messages_pb2.COMPRESSABLE:
raise ValueError(
'response payload type is "%s"!' % type(response.payload.type))
if len(response.payload.body) != 314159:
raise ValueError(
'response body of incorrect size %d!' % len(response.payload.body))
return response
def _empty_unary(stub):
with stub:
response = stub.EmptyCall(empty_pb2.Empty(), _TIMEOUT)
if not isinstance(response, empty_pb2.Empty):
raise TypeError(
'response is of type "%s", not empty_pb2.Empty!', type(response))
def _large_unary(stub):
_large_unary_common_behavior(stub, False, False)
def _client_streaming(stub):
with stub:
payload_body_sizes = (27182, 8, 1828, 45904)
payloads = (
messages_pb2.Payload(body=b'\x00' * size)
for size in payload_body_sizes)
requests = (
messages_pb2.StreamingInputCallRequest(payload=payload)
for payload in payloads)
response = stub.StreamingInputCall(requests, _TIMEOUT)
if response.aggregated_payload_size != 74922:
raise ValueError(
'incorrect size %d!' % response.aggregated_payload_size)
def _server_streaming(stub):
sizes = (31415, 9, 2653, 58979)
with stub:
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(
messages_pb2.ResponseParameters(size=sizes[0]),
messages_pb2.ResponseParameters(size=sizes[1]),
messages_pb2.ResponseParameters(size=sizes[2]),
messages_pb2.ResponseParameters(size=sizes[3]),
))
response_iterator = stub.StreamingOutputCall(request, _TIMEOUT)
for index, response in enumerate(response_iterator):
if response.payload.type != messages_pb2.COMPRESSABLE:
raise ValueError(
'response body of invalid type %s!' % response.payload.type)
if len(response.payload.body) != sizes[index]:
raise ValueError(
'response body of invalid size %d!' % len(response.payload.body))
def _cancel_after_begin(stub):
with stub:
sizes = (27182, 8, 1828, 45904)
payloads = [messages_pb2.Payload(body=b'\x00' * size) for size in sizes]
requests = [messages_pb2.StreamingInputCallRequest(payload=payload)
for payload in payloads]
responses = stub.StreamingInputCall.async(requests, _TIMEOUT)
responses.cancel()
if not responses.cancelled():
raise ValueError('expected call to be cancelled')
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def next(self):
with self._condition:
while not self._values and self._open:
self._condition.wait()
if self._values:
return self._values.pop(0)
else:
raise StopIteration()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify()
def close(self):
with self._condition:
self._open = False
self._condition.notify()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _ping_pong(stub):
request_response_sizes = (31415, 9, 2653, 58979)
request_payload_sizes = (27182, 8, 1828, 45904)
with stub, _Pipe() as pipe:
response_iterator = stub.FullDuplexCall(pipe, _TIMEOUT)
print 'Starting ping-pong with response iterator %s' % response_iterator
for response_size, payload_size in zip(
request_response_sizes, request_payload_sizes):
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(messages_pb2.ResponseParameters(
size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
response = next(response_iterator)
if response.payload.type != messages_pb2.COMPRESSABLE:
raise ValueError(
'response body of invalid type %s!' % response.payload.type)
if len(response.payload.body) != response_size:
raise ValueError(
'response body of invalid size %d!' % len(response.payload.body))
def _cancel_after_first_response(stub):
request_response_sizes = (31415, 9, 2653, 58979)
request_payload_sizes = (27182, 8, 1828, 45904)
with stub, _Pipe() as pipe:
response_iterator = stub.FullDuplexCall(pipe, _TIMEOUT)
response_size = request_response_sizes[0]
payload_size = request_payload_sizes[0]
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(messages_pb2.ResponseParameters(
size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
response = next(response_iterator)
# We test the contents of `response` in the Ping Pong test - don't check
# them here.
response_iterator.cancel()
try:
next(response_iterator)
except Exception:
pass
else:
raise ValueError('expected call to be cancelled')
def _compute_engine_creds(stub, args):
response = _large_unary_common_behavior(stub, True, True)
if args.default_service_account != response.username:
raise ValueError(
'expected username %s, got %s' % (args.default_service_account,
response.username))
def _service_account_creds(stub, args):
json_key_filename = os.environ[
oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
response = _large_unary_common_behavior(stub, True, True)
if wanted_email != response.username:
raise ValueError(
'expected username %s, got %s' % (wanted_email, response.username))
if args.oauth_scope.find(response.oauth_scope) == -1:
raise ValueError(
'expected to find oauth scope "%s" in received "%s"' %
(response.oauth_scope, args.oauth_scope))
@enum.unique
class TestCase(enum.Enum):
EMPTY_UNARY = 'empty_unary'
LARGE_UNARY = 'large_unary'
SERVER_STREAMING = 'server_streaming'
CLIENT_STREAMING = 'client_streaming'
PING_PONG = 'ping_pong'
CANCEL_AFTER_BEGIN = 'cancel_after_begin'
CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response'
COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
SERVICE_ACCOUNT_CREDS = 'service_account_creds'
def test_interoperability(self, stub, args):
if self is TestCase.EMPTY_UNARY:
_empty_unary(stub)
elif self is TestCase.LARGE_UNARY:
_large_unary(stub)
elif self is TestCase.SERVER_STREAMING:
_server_streaming(stub)
elif self is TestCase.CLIENT_STREAMING:
_client_streaming(stub)
elif self is TestCase.PING_PONG:
_ping_pong(stub)
elif self is TestCase.CANCEL_AFTER_BEGIN:
_cancel_after_begin(stub)
elif self is TestCase.CANCEL_AFTER_FIRST_RESPONSE:
_cancel_after_first_response(stub)
elif self is TestCase.COMPUTE_ENGINE_CREDS:
_compute_engine_creds(stub, args)
elif self is TestCase.SERVICE_ACCOUNT_CREDS:
_service_account_creds(stub, args)
else:
raise NotImplementedError('Test case "%s" not implemented!' % self.name)
| bsd-3-clause |
dbaxa/django | django/utils/inspect.py | 323 | 4195 | from __future__ import absolute_import
import inspect
from django.utils import six
def getargspec(func):
if six.PY2:
return inspect.getargspec(func)
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty
] or None
return args, varargs, varkw, defaults
def get_func_args(func):
if six.PY2:
argspec = inspect.getargspec(func)
return argspec.args[1:] # ignore 'self'
sig = inspect.signature(func)
return [
arg_name for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def get_func_full_args(func):
"""
Return a list of (argument name, default value) tuples. If the argument
does not have a default value, omit it in the tuple. Arguments such as
*args and **kwargs are also included.
"""
if six.PY2:
argspec = inspect.getargspec(func)
args = argspec.args[1:] # ignore 'self'
defaults = argspec.defaults or []
# Split args into two lists depending on whether they have default value
no_default = args[:len(args) - len(defaults)]
with_default = args[len(args) - len(defaults):]
# Join the two lists and combine it with default values
args = [(arg,) for arg in no_default] + zip(with_default, defaults)
# Add possible *args and **kwargs and prepend them with '*' or '**'
varargs = [('*' + argspec.varargs,)] if argspec.varargs else []
kwargs = [('**' + argspec.keywords,)] if argspec.keywords else []
return args + varargs + kwargs
sig = inspect.signature(func)
args = []
for arg_name, param in sig.parameters.items():
name = arg_name
# Ignore 'self'
if name == 'self':
continue
if param.kind == inspect.Parameter.VAR_POSITIONAL:
name = '*' + name
elif param.kind == inspect.Parameter.VAR_KEYWORD:
name = '**' + name
if param.default != inspect.Parameter.empty:
args.append((name, param.default))
else:
args.append((name,))
return args
def func_accepts_kwargs(func):
if six.PY2:
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(func)
except TypeError:
try:
argspec = inspect.getargspec(func.__call__)
except (TypeError, AttributeError):
argspec = None
return not argspec or argspec[2] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_KEYWORD
)
def func_accepts_var_args(func):
"""
Return True if function 'func' accepts positional arguments *args.
"""
if six.PY2:
return inspect.getargspec(func)[1] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_POSITIONAL
)
def func_has_no_args(func):
args = inspect.getargspec(func)[0] if six.PY2 else [
p for p in inspect.signature(func).parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD
]
return len(args) == 1
def func_supports_parameter(func, parameter):
if six.PY3:
return parameter in inspect.signature(func).parameters
else:
args, varargs, varkw, defaults = inspect.getargspec(func)
return parameter in args
| bsd-3-clause |
leeseulstack/openstack | neutron/tests/unit/agent/l2population_rpc_base.py | 8 | 6003 | # Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from neutron.agent import l2population_rpc
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.tests import base
class FakeNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin):
def fdb_add(self, context, fdb_entries):
pass
def fdb_remove(self, context, fdb_entries):
pass
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
pass
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
pass
def setup_tunnel_port(self, br, remote_ip, network_type):
pass
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
pass
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
pass
class TestL2populationRpcCallBackTunnelMixinBase(base.BaseTestCase):
def setUp(self):
super(TestL2populationRpcCallBackTunnelMixinBase, self).setUp()
self.fakeagent = FakeNeutronAgent()
self.fakebr = mock.Mock()
Port = collections.namedtuple('Port', 'ip, ofport')
LVM = collections.namedtuple(
'LVM', 'net, vlan, phys, segid, mac, ip, vif, port')
self.local_ip = '127.0.0.1'
self.type_gre = 'gre'
self.ports = [Port(ip='10.1.0.1', ofport='ofport1'),
Port(ip='10.1.0.2', ofport='ofport2'),
Port(ip='10.1.0.3', ofport='ofport3')]
self.ofports = {
self.type_gre: {
self.ports[0].ip: self.ports[0].ofport,
self.ports[1].ip: self.ports[1].ofport,
self.ports[2].ip: self.ports[2].ofport,
}
}
self.lvms = [LVM(net='net1', vlan=1, phys='phys1', segid='tun1',
mac='mac1', ip='1.1.1.1', vif='vifid1',
port='port1'),
LVM(net='net2', vlan=2, phys='phys2', segid='tun2',
mac='mac2', ip='2.2.2.2', vif='vifid2',
port='port2'),
LVM(net='net3', vlan=3, phys='phys3', segid='tun3',
mac='mac3', ip='3.3.3.3', vif='vifid3',
port='port3')]
self.agent_ports = {
self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)],
self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)],
self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)],
}
self.fdb_entries1 = {
self.lvms[0].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[0].segid,
'ports': {
self.local_ip: [],
self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)]},
},
self.lvms[1].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[1].segid,
'ports': {
self.local_ip: [],
self.ports[1].ip: [(self.lvms[1].mac, self.lvms[1].ip)]},
},
self.lvms[2].net: {
'network_type': self.type_gre,
'segment_id': self.lvms[2].segid,
'ports': {
self.local_ip: [],
self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)]},
},
}
self.lvm1 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[0].vlan, self.type_gre, self.lvms[0].phys,
self.lvms[0].segid, {self.lvms[0].vif: self.lvms[0].port})
self.lvm2 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[1].vlan, self.type_gre, self.lvms[1].phys,
self.lvms[1].segid, {self.lvms[1].vif: self.lvms[1].port})
self.lvm3 = ovs_neutron_agent.LocalVLANMapping(
self.lvms[2].vlan, self.type_gre, self.lvms[2].phys,
self.lvms[2].segid, {self.lvms[2].vif: self.lvms[2].port})
self.local_vlan_map1 = {
self.lvms[0].net: self.lvm1,
self.lvms[1].net: self.lvm2,
self.lvms[2].net: self.lvm3,
}
self.upd_fdb_entry1_val = {
self.lvms[0].net: {
self.ports[0].ip: {
'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
self.lvms[0].ip)],
'after': [l2pop_rpc.PortInfo(self.lvms[1].mac,
self.lvms[1].ip)],
},
self.ports[1].ip: {
'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
self.lvms[0].ip)],
'after': [l2pop_rpc.PortInfo(self.lvms[1].mac,
self.lvms[1].ip)],
},
},
self.lvms[1].net: {
self.ports[2].ip: {
'before': [l2pop_rpc.PortInfo(self.lvms[0].mac,
self.lvms[0].ip)],
'after': [l2pop_rpc.PortInfo(self.lvms[2].mac,
self.lvms[2].ip)],
},
},
}
self.upd_fdb_entry1 = {'chg_ip': self.upd_fdb_entry1_val}
| apache-2.0 |
DavidAndreev/indico | migrations/versions/201511121701_134a1c372738_add_currency_to_registration.py | 2 | 1725 | """Add currency to registration
Revision ID: 134a1c372738
Revises: 98f411f40bb
Create Date: 2015-11-12 17:01:41.117908
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '134a1c372738'
down_revision = '98f411f40bb'
def upgrade():
op.add_column('forms', sa.Column('currency', sa.String(), nullable=True), schema='event_registration')
op.add_column('registrations', sa.Column('currency', sa.String(), nullable=True), schema='event_registration')
op.execute("""
UPDATE event_registration.forms f SET currency = coalesce(
(SELECT value FROM events.settings WHERE module = 'payment' AND name = 'currency' AND event_id = f.event_id),
(SELECT value FROM indico.settings WHERE module = 'payment' AND name = 'currency')
) #>> '{}'
""")
op.execute("DELETE FROM events.settings WHERE module = 'payment' AND name = 'currency'")
op.execute("""
UPDATE event_registration.registrations r SET currency = (
SELECT currency FROM event_registration.forms WHERE id = r.registration_form_id
)
""")
op.alter_column('forms', 'currency', nullable=False, schema='event_registration')
op.alter_column('registrations', 'currency', nullable=False, schema='event_registration')
def downgrade():
op.execute("""
INSERT INTO events.settings (event_id, module, name, value)
SELECT DISTINCT ON (event_id) event_id, 'payment', 'currency', to_json(currency)
FROM event_registration.forms ORDER BY event_id
""")
op.drop_column('registrations', 'currency', schema='event_registration')
op.drop_column('forms', 'currency', schema='event_registration')
| gpl-3.0 |
stephanie-wang/ray | python/ray/gcs_utils.py | 1 | 2203 | from ray.core.generated.gcs_pb2 import (
ActorCheckpointIdData,
ActorTableData,
GcsNodeInfo,
JobTableData,
ErrorTableData,
ErrorType,
GcsEntry,
HeartbeatBatchTableData,
HeartbeatTableData,
ObjectTableData,
ProfileTableData,
TablePrefix,
TablePubsub,
TaskTableData,
ResourceTableData,
)
__all__ = [
"ActorCheckpointIdData",
"ActorTableData",
"GcsNodeInfo",
"JobTableData",
"ErrorTableData",
"ErrorType",
"GcsEntry",
"HeartbeatBatchTableData",
"HeartbeatTableData",
"ObjectTableData",
"ProfileTableData",
"TablePrefix",
"TablePubsub",
"TaskTableData",
"ResourceTableData",
"construct_error_message",
]
FUNCTION_PREFIX = "RemoteFunction:"
LOG_FILE_CHANNEL = "RAY_LOG_CHANNEL"
REPORTER_CHANNEL = "RAY_REPORTER"
# xray heartbeats
XRAY_HEARTBEAT_CHANNEL = str(
TablePubsub.Value("HEARTBEAT_PUBSUB")).encode("ascii")
XRAY_HEARTBEAT_BATCH_CHANNEL = str(
TablePubsub.Value("HEARTBEAT_BATCH_PUBSUB")).encode("ascii")
# xray job updates
XRAY_JOB_CHANNEL = str(TablePubsub.Value("JOB_PUBSUB")).encode("ascii")
# These prefixes must be kept up-to-date with the TablePrefix enum in
# gcs.proto.
# TODO(rkn): We should use scoped enums, in which case we should be able to
# just access the flatbuffer generated values.
TablePrefix_RAYLET_TASK_string = "RAYLET_TASK"
TablePrefix_OBJECT_string = "OBJECT"
TablePrefix_ERROR_INFO_string = "ERROR_INFO"
TablePrefix_PROFILE_string = "PROFILE"
TablePrefix_JOB_string = "JOB"
TablePrefix_ACTOR_string = "ACTOR"
def construct_error_message(job_id, error_type, message, timestamp):
"""Construct a serialized ErrorTableData object.
Args:
job_id: The ID of the job that the error should go to. If this is
nil, then the error will go to all drivers.
error_type: The type of the error.
message: The error message.
timestamp: The time of the error.
Returns:
The serialized object.
"""
data = ErrorTableData()
data.job_id = job_id.binary()
data.type = error_type
data.error_message = message
data.timestamp = timestamp
return data.SerializeToString()
| apache-2.0 |
alrusdi/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/geos/tests/test_io.py | 321 | 4159 | import binascii, ctypes, unittest
from django.contrib.gis.geos import GEOSGeometry, WKTReader, WKTWriter, WKBReader, WKBWriter, geos_version_info
class GEOSIOTest(unittest.TestCase):
def test01_wktreader(self):
# Creating a WKTReader instance
wkt_r = WKTReader()
wkt = 'POINT (5 23)'
# read() should return a GEOSGeometry
ref = GEOSGeometry(wkt)
g1 = wkt_r.read(wkt)
g2 = wkt_r.read(unicode(wkt))
for geom in (g1, g2):
self.assertEqual(ref, geom)
# Should only accept basestring objects.
self.assertRaises(TypeError, wkt_r.read, 1)
self.assertRaises(TypeError, wkt_r.read, buffer('foo'))
def test02_wktwriter(self):
# Creating a WKTWriter instance, testing its ptr property.
wkt_w = WKTWriter()
self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type())
ref = GEOSGeometry('POINT (5 23)')
ref_wkt = 'POINT (5.0000000000000000 23.0000000000000000)'
self.assertEqual(ref_wkt, wkt_w.write(ref))
def test03_wkbreader(self):
# Creating a WKBReader instance
wkb_r = WKBReader()
hex = '000000000140140000000000004037000000000000'
wkb = buffer(binascii.a2b_hex(hex))
ref = GEOSGeometry(hex)
# read() should return a GEOSGeometry on either a hex string or
# a WKB buffer.
g1 = wkb_r.read(wkb)
g2 = wkb_r.read(hex)
for geom in (g1, g2):
self.assertEqual(ref, geom)
bad_input = (1, 5.23, None, False)
for bad_wkb in bad_input:
self.assertRaises(TypeError, wkb_r.read, bad_wkb)
def test04_wkbwriter(self):
wkb_w = WKBWriter()
# Representations of 'POINT (5 23)' in hex -- one normal and
# the other with the byte order changed.
g = GEOSGeometry('POINT (5 23)')
hex1 = '010100000000000000000014400000000000003740'
wkb1 = buffer(binascii.a2b_hex(hex1))
hex2 = '000000000140140000000000004037000000000000'
wkb2 = buffer(binascii.a2b_hex(hex2))
self.assertEqual(hex1, wkb_w.write_hex(g))
self.assertEqual(wkb1, wkb_w.write(g))
# Ensuring bad byteorders are not accepted.
for bad_byteorder in (-1, 2, 523, 'foo', None):
# Equivalent of `wkb_w.byteorder = bad_byteorder`
self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder)
# Setting the byteorder to 0 (for Big Endian)
wkb_w.byteorder = 0
self.assertEqual(hex2, wkb_w.write_hex(g))
self.assertEqual(wkb2, wkb_w.write(g))
# Back to Little Endian
wkb_w.byteorder = 1
# Now, trying out the 3D and SRID flags.
g = GEOSGeometry('POINT (5 23 17)')
g.srid = 4326
hex3d = '0101000080000000000000144000000000000037400000000000003140'
wkb3d = buffer(binascii.a2b_hex(hex3d))
hex3d_srid = '01010000A0E6100000000000000000144000000000000037400000000000003140'
wkb3d_srid = buffer(binascii.a2b_hex(hex3d_srid))
# Ensuring bad output dimensions are not accepted
for bad_outdim in (-1, 0, 1, 4, 423, 'foo', None):
# Equivalent of `wkb_w.outdim = bad_outdim`
self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim)
# These tests will fail on 3.0.0 because of a bug that was fixed in 3.1:
# http://trac.osgeo.org/geos/ticket/216
if not geos_version_info()['version'].startswith('3.0.'):
# Now setting the output dimensions to be 3
wkb_w.outdim = 3
self.assertEqual(hex3d, wkb_w.write_hex(g))
self.assertEqual(wkb3d, wkb_w.write(g))
# Telling the WKBWriter to inlcude the srid in the representation.
wkb_w.srid = True
self.assertEqual(hex3d_srid, wkb_w.write_hex(g))
self.assertEqual(wkb3d_srid, wkb_w.write(g))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSIOTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| gpl-3.0 |
beni55/dipy | dipy/reconst/tests/test_shore_metrics.py | 6 | 3418 | import numpy as np
from dipy.data import get_gtab_taiwan_dsi
from numpy.testing import (assert_almost_equal,
assert_equal,
run_module_suite)
from dipy.reconst.shore import ShoreModel, shore_matrix, shore_indices, shore_order
from dipy.sims.voxel import (
MultiTensor, all_tensor_evecs, multi_tensor_odf, single_tensor_odf,
multi_tensor_rtop, multi_tensor_msd, multi_tensor_pdf)
from dipy.data import get_sphere
from scipy.special import genlaguerre
def test_shore_metrics():
gtab = get_gtab_taiwan_dsi()
mevals = np.array(([0.0015, 0.0003, 0.0003],
[0.0015, 0.0003, 0.0003]))
angl = [(0, 0), (60, 0)]
S, sticks = MultiTensor(gtab, mevals, S0=100.0, angles=angl,
fractions=[50, 50], snr=None)
# test shore_indices
n = 7
l = 6
m = -4
radial_order, c = shore_order(n, l, m)
n2, l2, m2 = shore_indices(radial_order, c)
assert_equal(n, n2)
assert_equal(l, l2)
assert_equal(m, m2)
radial_order = 6
c = 41
n, l, m = shore_indices(radial_order, c)
radial_order2, c2 = shore_order(n, l, m)
assert_equal(radial_order, radial_order2)
assert_equal(c, c2)
# since we are testing without noise we can use higher order and lower lambdas, with respect to the default.
radial_order = 8
zeta = 700
lambdaN = 1e-12
lambdaL = 1e-12
asm = ShoreModel(gtab, radial_order=radial_order,
zeta=zeta, lambdaN=lambdaN, lambdaL=lambdaL)
asmfit = asm.fit(S)
c_shore = asmfit.shore_coeff
cmat = shore_matrix(radial_order, zeta, gtab)
S_reconst = np.dot(cmat, c_shore)
# test the signal reconstruction
S = S / S[0]
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 4)
# test if the analytical integral of the pdf is equal to one
integral = 0
for n in range(int((radial_order)/2 +1)):
integral += c_shore[n] * (np.pi**(-1.5) * zeta **(-1.5) * genlaguerre(n,0.5)(0)) ** 0.5
assert_almost_equal(integral, 1.0, 10)
# test if the integral of the pdf calculated on a discrete grid is equal to one
pdf_discrete = asmfit.pdf_grid(17, 40e-3)
integral = pdf_discrete.sum()
assert_almost_equal(integral, 1.0, 1)
# compare the shore pdf with the ground truth multi_tensor pdf
sphere = get_sphere('symmetric724')
v = sphere.vertices
radius = 10e-3
pdf_shore = asmfit.pdf(v * radius)
pdf_mt = multi_tensor_pdf(v * radius, mevals=mevals,
angles=angl, fractions= [50, 50])
nmse_pdf = np.sqrt(np.sum((pdf_mt - pdf_shore) ** 2)) / (pdf_mt.sum())
assert_almost_equal(nmse_pdf, 0.0, 2)
# compare the shore rtop with the ground truth multi_tensor rtop
rtop_shore_signal = asmfit.rtop_signal()
rtop_shore_pdf = asmfit.rtop_pdf()
assert_almost_equal(rtop_shore_signal, rtop_shore_pdf, 9)
rtop_mt = multi_tensor_rtop([.5, .5], mevals=mevals)
assert_equal(rtop_mt / rtop_shore_signal <1.10 and rtop_mt / rtop_shore_signal > 0.95, True)
# compare the shore msd with the ground truth multi_tensor msd
msd_mt = multi_tensor_msd([.5, .5], mevals=mevals)
msd_shore = asmfit.msd()
assert_equal(msd_mt / msd_shore < 1.05 and msd_mt / msd_shore > 0.95, True)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
punchagan/zulip | zerver/migrations/0044_reaction.py | 6 | 1273 | import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0043_realm_filter_validators"),
]
operations = [
migrations.CreateModel(
name="Reaction",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
(
"user_profile",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
(
"message",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.Message"
),
),
("emoji_name", models.TextField()),
],
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="reaction",
unique_together={("user_profile", "message", "emoji_name")},
),
]
| apache-2.0 |
jmansar/empathy | release.py | 2 | 8128 | #!/usr/bin/env python
import os
import re
import urllib
import csv
import datetime
import time
from string import Template
from optparse import OptionParser
last_tag_patern = 'EMPATHY_2_29*'
upload_server = 'master.gnome.org'
template = '''\
$name $version is now available for download from:
$download
$md5sums
What is it?
===========
$about
You can visit the project web site:
$website
What's New?
===========
$news
$footer'''
class Bug:
number = ''
author = ''
class Project:
def __init__(self):
f = open('config.h', 'r')
s = f.read()
f.close()
key = {}
key['package'] = '#define PACKAGE_NAME "'
key['version'] = '#define PACKAGE_VERSION "'
key['bugreport'] = '#define PACKAGE_BUGREPORT "'
for line in s.splitlines(1):
if line.startswith(key['package']):
p1 = len(key['package'])
p2 = line.rfind('"')
self.package_name = line[p1:p2]
elif line.startswith(key['version']):
p1 = len(key['version'])
p2 = line.rfind('"')
self.package_version = line[p1:p2]
elif line.startswith(key['bugreport']):
p2 = line.rfind('"')
p1 = line.rfind('=') + 1
self.package_module = line[p1:p2]
first = self.package_version.find('.')
second = self.package_version.find('.', first + 1)
if first == -1 or second == -1 or first == second:
version_dir = self.package_version
else:
version_dir = self.package_version[:second]
self.package_dl_url = 'http://download.gnome.org/sources/%s/%s/' % (self.package_name.lower(),
version_dir)
tags_str = self.exec_cmd('git tag -l %s' % (last_tag_patern))
tags = tags_str.splitlines()
self.last_tag = tags[len(tags)-1]
def exec_cmd(self,cmd):
return os.popen(cmd).read()
def get_news(self):
f = open ('NEWS', 'r')
s = f.read()
f.close()
start = s.find ('NEW in '+ self.package_version)
if start != -1:
start = s.find ('\n', start) + 1
start = s.find ('\n', start) + 1
end = s.find ('NEW in', start) - 1
return s[start:end].strip()
def get_md5sums(self):
md5sums = ''
cmd = 'md5sum %s-%s.tar.gz' % (self.package_name.lower(), self.package_version)
md5sums += self.exec_cmd(cmd)
cmd = 'md5sum %s-%s.tar.bz2' % (self.package_name.lower(), self.package_version)
md5sums += self.exec_cmd(cmd).strip()
return md5sums
def get_bugzilla_info(self):
query = 'http://bugzilla.gnome.org/browse.cgi?product=%s' % (self.package_module)
f = urllib.urlopen(query)
s = f.read()
f.close()
s1 = '<p><i>'
i = s.find(s1)
start = i + len(s1)
s2 = '</i></p>'
end = s.find(s2, i + 1)
description = s[start:end]
s1 = "homepage"
i = s.find(s1)
s1 = "href"
i = s.rfind(s1, 0, i)
start = i + 6
s2 = '">'
end = s.find(s2, start)
project_url = s[start:end]
return (description, project_url)
def get_release_notes(self):
name = self.package_name
version = self.package_version
download = self.package_dl_url
md5sums = self.get_md5sums()
(about, website) = self.get_bugzilla_info()
news = self.get_news()
footer = '%s\n%s team' % (datetime.date.today().strftime('%d %B %Y'),\
self.package_name)
t = Template(template)
return t.substitute(locals())
def get_translations(self, cmd, format):
translations = ''
files_str = self.exec_cmd(cmd)
files = files_str.splitlines()
for line in files:
f = line[line.rfind(' '):]
lang = f[f.rfind('/')+1:f.rfind('.')]
commit_str = self.exec_cmd("git log %s.. %s" % (self.last_tag, f))
if commit_str == '':
continue
authors = ''
for line in commit_str.splitlines():
if line.startswith('Author:'):
p1 = line.find(' ')
p2 = line.find('<')
author = line[p1:p2].strip()
if authors.find(author) != -1:
continue
if authors != '':
authors += ", "
authors += author
translations += format % (lang, authors)
return translations
def get_bug_author(self, bug_number):
cmd = 'git log %s.. | grep -B 20 -E "(bug %s|#%s)"' \
' | tac | grep ^Author: | head -1' \
% (self.last_tag, bug_number, bug_number)
line = self.exec_cmd (cmd)
p1 = line.find(" ")
p2 = line.find("<")
return line[p1:p2].strip()
def get_bugs(self):
commit_str = self.exec_cmd('git show %s' % (self.last_tag))
for line in commit_str.splitlines():
if line.startswith('Date:'):
time_str = line[5:line.rfind('+')].strip()
t = time.strptime(time_str)
last_tag_date = time.strftime('%Y-%m-%d', t)
break
query = 'http://bugzilla.gnome.org/buglist.cgi?' \
'ctype=csv&product=empathy&' \
'bug_status=RESOLVED,CLOSED,VERIFIED&resolution=FIXED&' \
'chfieldfrom=%s&chfieldto=Now' % (last_tag_date)
f = urllib.urlopen(query)
s = f.read()
f.close()
col_bug_id = -1
col_description = -1
reader = csv.reader(s.splitlines(1))
header = reader.next()
i = 0
for col in header:
if col == 'bug_id':
col_bug_id = i
if col == 'short_short_desc':
col_description = i
i = i + 1
bugs = ''
for row in reader:
bug_number = row[col_bug_id]
description = row[col_description]
author = self.get_bug_author(bug_number)
bugs += ' - Fixed #%s, %s' % (bug_number, description)
if author != '':
bugs += ' (%s)' % (author)
bugs += '\n'
return bugs
def generate_news(self):
translations = self.get_translations("ls -l po/*.po", \
" - Updated %s Translation (%s)\n")
help_translations = self.get_translations("ls -l help/*/*.po", \
" - Updated %s Documentation translation (%s)\n")
bugs = self.get_bugs()
news = 'NEW in '+ self.package_version + '\n==============\n'
if bugs != '':
news += 'Bugs fixed:\n' + bugs + '\n'
if translations != '':
news += 'Translations:\n' + translations + '\n'
if help_translations != '':
news += 'Documentation translations:\n' + \
help_translations + '\n'
return news
def write_news(self):
news = self.generate_news()
f = open ('/tmp/NEWS', 'w')
s = f.write(news)
f.close()
self.exec_cmd('cat NEWS >> /tmp/NEWS')
self.exec_cmd('mv /tmp/NEWS .')
def make_tag(self):
new_tag = self.package_name.upper() + '_' +\
self.package_version.replace('.', '_')
self.exec_cmd('git tag -m "Tagged for release %s." %s' % ( self.package_version, new_tag))
def _get_username(self):
username = os.environ.get('GNOME_ACCOUNT_NAME')
if username is not None:
return username
return os.getlogin()
def upload_tarball(self):
username = self._get_username()
tarball = '%s-%s.tar.gz' % (self.package_name.lower(), self.package_version)
cmd = 'scp %s %s@%s:' % (tarball, username, upload_server)
self.exec_cmd(cmd)
cmd = 'ssh %s@%s install-module -u %s' % (username, upload_server, tarball)
self.exec_cmd(cmd)
def send_email(self):
notes = self.get_release_notes()
cmd = 'xdg-email ' \
' --cc [email protected]' \
' --subject "ANNOUNCE: Empathy %s"' \
' --body "%s"' \
' [email protected]' % (self.package_version,
notes.replace('"', '\\"'))
self.exec_cmd(cmd)
def release(self):
self.make_tag()
self.upload_tarball()
self.send_email()
if __name__ == '__main__':
p = Project()
parser = OptionParser()
parser.add_option("-n", "--print-news", action="store_true",\
dest="print_news", help="Generate and print news")
parser.add_option("-p", "--print-notes", action="store_true",\
dest="print_notes", help="Generate and print the release notes")
parser.add_option("-w", "--write-news", action="store_true",\
dest="write_news", help="Generate and write news into the NEWS file")
parser.add_option("-r", "--release", action="store_true",\
dest="release", help="Release the tarball")
parser.add_option("-e", "--email", action="store_true",\
dest="email", help="Send the release announce email")
(options, args) = parser.parse_args ()
if (options.print_news):
print p.generate_news ()
if (options.print_notes):
print p.get_release_notes ()
if (options.write_news):
p.write_news ()
if (options.release):
p.release ()
if (options.email):
p.send_email ()
| gpl-2.0 |
splunk/splunk-webframework | contrib/django/django/contrib/admin/helpers.py | 100 | 13631 | from __future__ import unicode_literals
from django import forms
from django.contrib.admin.util import (flatten_fieldsets, lookup_field,
display_for_field, label_for_field, help_text_for_field)
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.util import flatatt
from django.template.defaultfilters import capfirst
from django.utils.encoding import force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, normalize_fieldsets(fieldsets)
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def first_field(self):
try:
fieldset_name, fieldset_options = self.fieldsets[0]
field_name = fieldset_options['fields'][0]
if not isinstance(field_name, six.string_types):
field_name = field_name[0]
return self.form[field_name]
except (KeyError, IndexError):
pass
try:
return next(iter(self.form))
except StopIteration:
return None
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe('\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n'))
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
else:
contents += ':'
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = classes and {'class': ' '.join(classes)} or {}
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
label = label_for_field(field, form._meta.model, model_admin)
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ != '<lambda>' and field.__name__ or ''
else:
class_name = field
self.field = {
'name': class_name,
'label': label,
'field': field,
'help_text': help_text_for_field(class_name, form._meta.model)
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{0}>{1}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
if isinstance(f.rel, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field:
continue
if field in self.readonly_fields:
yield {
'label': label_for_field(field, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False
}
else:
yield self.formset.form.base_fields[field]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
if original is not None:
self.original_content_type_id = ContentType.objects.get_for_model(original).pk
self.show_url = original and hasattr(original, 'get_absolute_url')
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def has_auto_field(self):
if self.form._meta.model._meta.has_auto_field:
return True
# Also search any parents for an auto field.
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def field_count(self):
# tabular.html uses this function for colspan value.
num_of_fields = 0
if self.has_auto_field():
num_of_fields += 1
num_of_fields += len(self.fieldsets[0][1]["fields"])
if self.formset.can_order:
num_of_fields += 1
if self.formset.can_delete:
num_of_fields += 1
return num_of_fields
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.util.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
if form.is_bound:
self.extend(list(six.itervalues(form.errors)))
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(list(six.itervalues(errors_in_inline_form)))
def normalize_fieldsets(fieldsets):
"""
Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data.
"""
result = []
for name, options in fieldsets:
result.append((name, normalize_dictionary(options)))
return result
def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict
| apache-2.0 |
moylop260/odoo-dev | addons/account/wizard/account_move_bank_reconcile.py | 385 | 2684 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_move_bank_reconcile(osv.osv_memory):
"""
Bank Reconciliation
"""
_name = "account.move.bank.reconcile"
_description = "Move bank reconcile"
_columns = {
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account move bank reconcile’s ID or list of IDs
@return: dictionary of Open account move line on given journal_id.
"""
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
cr.execute('select default_credit_account_id \
from account_journal where id=%s', (data['journal_id'],))
account_id = cr.fetchone()[0]
if not account_id:
raise osv.except_osv(_('Error!'), _('You have to define \
the bank account\nin the journal definition for reconciliation.'))
return {
'domain': "[('journal_id','=',%d), ('account_id','=',%d), ('state','<>','draft')]" % (data['journal_id'], account_id),
'name': _('Standard Encoding'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'context': "{'journal_id': %d}" % (data['journal_id'],),
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fentas/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py | 115 | 12236 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import os
import shutil
import tempfile
import unittest2 as unittest
from .checkout import Checkout
from .changelog import ChangeLogEntry
from .scm import CommitMessage, SCMDetector
from .scm.scm_mock import MockSCM
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem import FileSystem # FIXME: This should not be needed.
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
_changelog1entry1 = u"""2010-03-25 Tor Arne Vestb\u00f8 <[email protected]>
Unreviewed build fix to un-break webkit-patch land.
Move commit_message_for_this_commit from scm to checkout
https://bugs.webkit.org/show_bug.cgi?id=36629
* Scripts/webkitpy/common/checkout/api.py: import scm.CommitMessage
"""
_changelog1entry2 = u"""2010-03-25 Adam Barth <[email protected]>
Reviewed by Eric Seidel.
Move commit_message_for_this_commit from scm to checkout
https://bugs.webkit.org/show_bug.cgi?id=36629
* Scripts/webkitpy/common/checkout/api.py:
"""
_changelog1 = u"\n".join([_changelog1entry1, _changelog1entry2])
_changelog2 = u"""2010-03-25 Tor Arne Vestb\u00f8 <[email protected]>
Unreviewed build fix to un-break webkit-patch land.
Second part of this complicated change by me, Tor Arne Vestb\u00f8!
* Path/To/Complicated/File: Added.
2010-03-25 Adam Barth <[email protected]>
Reviewed by Eric Seidel.
Filler change.
"""
class CommitMessageForThisCommitTest(unittest.TestCase):
expected_commit_message = u"""Unreviewed build fix to un-break webkit-patch land.
Tools:
Move commit_message_for_this_commit from scm to checkout
https://bugs.webkit.org/show_bug.cgi?id=36629
* Scripts/webkitpy/common/checkout/api.py: import scm.CommitMessage
LayoutTests:
Second part of this complicated change by me, Tor Arne Vestb\u00f8!
* Path/To/Complicated/File: Added.
"""
def setUp(self):
# FIXME: This should not need to touch the filesystem, however
# ChangeLog is difficult to mock at current.
self.filesystem = FileSystem()
self.temp_dir = str(self.filesystem.mkdtemp(suffix="changelogs"))
self.old_cwd = self.filesystem.getcwd()
self.filesystem.chdir(self.temp_dir)
self.webkit_base = WebKitFinder(self.filesystem).webkit_base()
# Trick commit-log-editor into thinking we're in a Subversion working copy so it won't
# complain about not being able to figure out what SCM is in use.
# FIXME: VCSTools.pm is no longer so easily fooled. It logs because "svn info" doesn't
# treat a bare .svn directory being part of an svn checkout.
self.filesystem.maybe_make_directory(".svn")
self.changelogs = map(self.filesystem.abspath, (self.filesystem.join("Tools", "ChangeLog"), self.filesystem.join("LayoutTests", "ChangeLog")))
for path, contents in zip(self.changelogs, (_changelog1, _changelog2)):
self.filesystem.maybe_make_directory(self.filesystem.dirname(path))
self.filesystem.write_text_file(path, contents)
def tearDown(self):
self.filesystem.rmtree(self.temp_dir)
self.filesystem.chdir(self.old_cwd)
def test_commit_message_for_this_commit(self):
executive = Executive()
def mock_run(*args, **kwargs):
# Note that we use a real Executive here, not a MockExecutive, so we can test that we're
# invoking commit-log-editor correctly.
env = os.environ.copy()
env['CHANGE_LOG_EMAIL_ADDRESS'] = '[email protected]'
kwargs['env'] = env
return executive.run_command(*args, **kwargs)
detector = SCMDetector(self.filesystem, executive)
real_scm = detector.detect_scm_system(self.webkit_base)
mock_scm = MockSCM()
mock_scm.run = mock_run
mock_scm.script_path = real_scm.script_path
checkout = Checkout(mock_scm)
checkout.modified_changelogs = lambda git_commit, changed_files=None: self.changelogs
commit_message = checkout.commit_message_for_this_commit(git_commit=None, return_stderr=True)
# Throw away the first line - a warning about unknown VCS root.
commit_message.message_lines = commit_message.message_lines[1:]
self.assertMultiLineEqual(commit_message.message(), self.expected_commit_message)
class CheckoutTest(unittest.TestCase):
def _make_checkout(self):
return Checkout(scm=MockSCM(), filesystem=MockFileSystem(), executive=MockExecutive())
def test_latest_entry_for_changelog_at_revision(self):
def mock_contents_at_revision(changelog_path, revision):
self.assertEqual(changelog_path, "foo")
self.assertEqual(revision, "bar")
# contents_at_revision is expected to return a byte array (str)
# so we encode our unicode ChangeLog down to a utf-8 stream.
# The ChangeLog utf-8 decoding should ignore invalid codepoints.
invalid_utf8 = "\255"
return _changelog1.encode("utf-8") + invalid_utf8
checkout = self._make_checkout()
checkout._scm.contents_at_revision = mock_contents_at_revision
entry = checkout._latest_entry_for_changelog_at_revision("foo", "bar")
self.assertMultiLineEqual(entry.contents(), _changelog1entry1) # Pylint is confused about this line, pylint: disable=E1101
# FIXME: This tests a hack around our current changed_files handling.
# Right now changelog_entries_for_revision tries to fetch deleted files
# from revisions, resulting in a ScriptError exception. Test that we
# recover from those and still return the other ChangeLog entries.
def test_changelog_entries_for_revision(self):
checkout = self._make_checkout()
checkout._scm.changed_files_for_revision = lambda revision: ['foo/ChangeLog', 'bar/ChangeLog']
def mock_latest_entry_for_changelog_at_revision(path, revision):
if path == "foo/ChangeLog":
return 'foo'
raise ScriptError()
checkout._latest_entry_for_changelog_at_revision = mock_latest_entry_for_changelog_at_revision
# Even though fetching one of the entries failed, the other should succeed.
entries = checkout.changelog_entries_for_revision(1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], 'foo')
def test_commit_info_for_revision(self):
checkout = self._make_checkout()
checkout._scm.changed_files_for_revision = lambda revision: ['path/to/file', 'another/file']
checkout._scm.committer_email_for_revision = lambda revision, changed_files=None: "[email protected]"
checkout.changelog_entries_for_revision = lambda revision, changed_files=None: [ChangeLogEntry(_changelog1entry1)]
commitinfo = checkout.commit_info_for_revision(4)
self.assertEqual(commitinfo.bug_id(), 36629)
self.assertEqual(commitinfo.author_name(), u"Tor Arne Vestb\u00f8")
self.assertEqual(commitinfo.author_email(), "[email protected]")
self.assertIsNone(commitinfo.reviewer_text())
self.assertIsNone(commitinfo.reviewer())
self.assertEqual(commitinfo.committer_email(), "[email protected]")
self.assertIsNone(commitinfo.committer())
self.assertEqual(commitinfo.to_json(), {
'bug_id': 36629,
'author_email': '[email protected]',
'changed_files': [
'path/to/file',
'another/file',
],
'reviewer_text': None,
'author_name': u'Tor Arne Vestb\xf8',
})
checkout.changelog_entries_for_revision = lambda revision, changed_files=None: []
self.assertIsNone(checkout.commit_info_for_revision(1))
def test_bug_id_for_revision(self):
checkout = self._make_checkout()
checkout._scm.committer_email_for_revision = lambda revision: "[email protected]"
checkout.changelog_entries_for_revision = lambda revision, changed_files=None: [ChangeLogEntry(_changelog1entry1)]
self.assertEqual(checkout.bug_id_for_revision(4), 36629)
def test_bug_id_for_this_commit(self):
checkout = self._make_checkout()
checkout.commit_message_for_this_commit = lambda git_commit, changed_files=None: CommitMessage(ChangeLogEntry(_changelog1entry1).contents().splitlines())
self.assertEqual(checkout.bug_id_for_this_commit(git_commit=None), 36629)
def test_modified_changelogs(self):
checkout = self._make_checkout()
checkout._scm.checkout_root = "/foo/bar"
checkout._scm.changed_files = lambda git_commit: ["file1", "ChangeLog", "relative/path/ChangeLog"]
expected_changlogs = ["/foo/bar/ChangeLog", "/foo/bar/relative/path/ChangeLog"]
self.assertEqual(checkout.modified_changelogs(git_commit=None), expected_changlogs)
def test_suggested_reviewers(self):
def mock_changelog_entries_for_revision(revision, changed_files=None):
if revision % 2 == 0:
return [ChangeLogEntry(_changelog1entry1)]
return [ChangeLogEntry(_changelog1entry2)]
def mock_revisions_changing_file(path, limit=5):
if path.endswith("ChangeLog"):
return [3]
return [4, 8]
checkout = self._make_checkout()
checkout._scm.checkout_root = "/foo/bar"
checkout._scm.changed_files = lambda git_commit: ["file1", "file2", "relative/path/ChangeLog"]
checkout._scm.revisions_changing_file = mock_revisions_changing_file
checkout.changelog_entries_for_revision = mock_changelog_entries_for_revision
reviewers = checkout.suggested_reviewers(git_commit=None)
reviewer_names = [reviewer.full_name for reviewer in reviewers]
self.assertEqual(reviewer_names, [u'Tor Arne Vestb\xf8'])
def test_apply_patch(self):
checkout = self._make_checkout()
checkout._executive = MockExecutive(should_log=True)
checkout._scm.script_path = lambda script: script
mock_patch = Mock()
mock_patch.contents = lambda: "foo"
mock_patch.reviewer = lambda: None
expected_logs = "MOCK run_command: ['svn-apply', '--force'], cwd=/mock-checkout, input=foo\n"
OutputCapture().assert_outputs(self, checkout.apply_patch, [mock_patch], expected_logs=expected_logs)
| bsd-3-clause |
ukanga/SickRage | lib/libgrowl/gntp.py | 150 | 12750 | import re
import hashlib
import time
import platform
__version__ = '0.1'
class BaseError(Exception):
pass
class ParseError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=500,errordesc='Error parsing the message')
return error.encode()
class AuthError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=400,errordesc='Error with authorization')
return error.encode()
class UnsupportedError(BaseError):
def gntp_error(self):
error = GNTPError(errorcode=500,errordesc='Currently unsupported by gntp.py')
return error.encode()
class _GNTPBase(object):
def __init__(self,messagetype):
self.info = {
'version':'1.0',
'messagetype':messagetype,
'encryptionAlgorithmID':None
}
self.requiredHeaders = []
self.headers = {}
def add_origin_info(self):
self.add_header('Origin-Machine-Name',platform.node())
self.add_header('Origin-Software-Name','gntp.py')
self.add_header('Origin-Software-Version',__version__)
self.add_header('Origin-Platform-Name',platform.system())
self.add_header('Origin-Platform-Version',platform.platform())
def send(self):
print self.encode()
def __str__(self):
return self.encode()
def parse_info(self,data):
'''
Parse the first line of a GNTP message to get security and other info values
@param data: GNTP Message
@return: GNTP Message information in a dictionary
'''
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
match = re.match('GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)'+
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?'+
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n', data,re.IGNORECASE)
if not match:
raise ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info
def set_password(self,password,encryptAlgo='MD5'):
'''
Set a password for a GNTP Message
@param password: Null to clear password
@param encryptAlgo: Currently only supports MD5
@todo: Support other hash functions
'''
self.password = password
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None;
return
password = password.encode('utf8')
seed = time.ctime()
salt = hashlib.md5(seed).hexdigest()
saltHash = hashlib.md5(seed).digest()
keyBasis = password+saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
self.info['keyHashAlgorithmID'] = encryptAlgo.upper()
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper()
def _decode_hex(self,value):
'''
Helper function to decode hex string to `proper` hex string
@param value: Value to decode
@return: Hex string
'''
result = ''
for i in range(0,len(value),2):
tmp = int(value[i:i+2],16)
result += chr(tmp)
return result
def _decode_binary(self,rawIdentifier,identifier):
rawIdentifier += '\r\n\r\n'
dataLength = int(identifier['Length'])
pointerStart = self.raw.find(rawIdentifier)+len(rawIdentifier)
pointerEnd = pointerStart + dataLength
data = self.raw[pointerStart:pointerEnd]
if not len(data) == dataLength:
raise ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s'%(dataLength,len(data)))
return data
def validate_password(self,password):
'''
Validate GNTP Message against stored password
'''
self.password = password
if password == None: raise Exception()
keyHash = self.info.get('keyHash',None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise AuthError('Invalid keyHash')
if self.password is None:
raise AuthError('Missing password')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password+saltHash
key = hashlib.md5(keyBasis).digest()
keyHash = hashlib.md5(key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise AuthError('Invalid Hash')
return True
def validate(self):
'''
Verify required headers
'''
for header in self.requiredHeaders:
if not self.headers.get(header,False):
raise ParseError('Missing Notification Header: '+header)
def format_info(self):
'''
Generate info line for GNTP Message
@return: Info line string
'''
info = u'GNTP/%s %s'%(
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID',None):
info += ' %s:%s'%(
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info+=' NONE'
if self.info.get('keyHashAlgorithmID',None):
info += ' %s:%s.%s'%(
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
def parse_dict(self,data):
'''
Helper function to parse blocks of GNTP headers into a dictionary
@param data:
@return: Dictionary of headers
'''
dict = {}
for line in data.split('\r\n'):
match = re.match('([\w-]+):(.+)', line)
if not match: continue
key = match.group(1).strip()
val = match.group(2).strip()
dict[key] = val
#print key,'\t\t\t',val
return dict
def add_header(self,key,value):
self.headers[key] = value
def decode(self,data,password=None):
'''
Decode GNTP Message
@param data:
'''
self.password = password
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.headers = self.parse_dict(parts[0])
def encode(self):
'''
Encode a GNTP Message
@return: GNTP Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
message += EOL
return message
class GNTPRegister(_GNTPBase):
'''
GNTP Registration Message
'''
def __init__(self,data=None,password=None):
'''
@param data: (Optional) See decode()
@param password: (Optional) Password to use while encoding/decoding messages
'''
_GNTPBase.__init__(self,'REGISTER')
self.notifications = []
self.resources = {}
self.requiredHeaders = [
'Application-Name',
'Notifications-Count'
]
self.requiredNotification = [
'Notification-Name',
]
if data:
self.decode(data,password)
else:
self.set_password(password)
self.headers['Application-Name'] = 'pygntp'
self.headers['Notifications-Count'] = 0
self.add_origin_info()
def validate(self):
'''
Validate required headers and validate notification headers
'''
for header in self.requiredHeaders:
if not self.headers.get(header,False):
raise ParseError('Missing Registration Header: '+header)
for notice in self.notifications:
for header in self.requiredNotification:
if not notice.get(header,False):
raise ParseError('Missing Notification Header: '+header)
def decode(self,data,password):
'''
Decode existing GNTP Registration message
@param data: Message to decode.
'''
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.validate_password(password)
self.headers = self.parse_dict(parts[0])
for i,part in enumerate(parts):
if i==0: continue #Skip Header
if part.strip()=='': continue
notice = self.parse_dict(part)
if notice.get('Notification-Name',False):
self.notifications.append(notice)
elif notice.get('Identifier',False):
notice['Data'] = self._decode_binary(part,notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[ notice.get('Identifier') ] = notice
def add_notification(self,name,enabled=True):
'''
Add new Notification to Registration message
@param name: Notification Name
@param enabled: Default Notification to Enabled
'''
notice = {}
notice['Notification-Name'] = name
notice['Notification-Enabled'] = str(enabled)
self.notifications.append(notice)
self.headers['Notifications-Count'] = len(self.notifications)
def encode(self):
'''
Encode a GNTP Registration Message
@return: GNTP Registration Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
#Notifications
if len(self.notifications)>0:
for notice in self.notifications:
message += EOL
for k,v in notice.iteritems():
message += k.encode('utf8') + SEP + str(v).encode('utf8') + EOL
message += EOL
return message
class GNTPNotice(_GNTPBase):
'''
GNTP Notification Message
'''
def __init__(self,data=None,app=None,name=None,title=None,password=None):
'''
@param data: (Optional) See decode()
@param app: (Optional) Set Application-Name
@param name: (Optional) Set Notification-Name
@param title: (Optional) Set Notification Title
@param password: (Optional) Password to use while encoding/decoding messages
'''
_GNTPBase.__init__(self,'NOTIFY')
self.resources = {}
self.requiredHeaders = [
'Application-Name',
'Notification-Name',
'Notification-Title'
]
if data:
self.decode(data,password)
else:
self.set_password(password)
if app:
self.headers['Application-Name'] = app
if name:
self.headers['Notification-Name'] = name
if title:
self.headers['Notification-Title'] = title
self.add_origin_info()
def decode(self,data,password):
'''
Decode existing GNTP Notification message
@param data: Message to decode.
'''
self.raw = data
parts = self.raw.split('\r\n\r\n')
self.info = self.parse_info(data)
self.validate_password(password)
self.headers = self.parse_dict(parts[0])
for i,part in enumerate(parts):
if i==0: continue #Skip Header
if part.strip()=='': continue
notice = self.parse_dict(part)
if notice.get('Identifier',False):
notice['Data'] = self._decode_binary(part,notice)
#open('notice.png','wblol').write(notice['Data'])
self.resources[ notice.get('Identifier') ] = notice
def encode(self):
'''
Encode a GNTP Notification Message
@return: GNTP Notification Message ready to be sent
'''
self.validate()
SEP = u': '
EOL = u'\r\n'
message = self.format_info() + EOL
#Headers
for k,v in self.headers.iteritems():
message += k + SEP + unicode(v) + EOL
message += EOL
return message.encode('utf-8')
class GNTPSubscribe(_GNTPBase):
def __init__(self,data=None,password=None):
_GNTPBase.__init__(self, 'SUBSCRIBE')
self.requiredHeaders = [
'Subscriber-ID',
'Subscriber-Name',
]
if data:
self.decode(data,password)
else:
self.set_password(password)
self.add_origin_info()
class GNTPOK(_GNTPBase):
def __init__(self,data=None,action=None):
'''
@param data: (Optional) See _GNTPResponse.decode()
@param action: (Optional) Set type of action the OK Response is for
'''
_GNTPBase.__init__(self,'-OK')
self.requiredHeaders = ['Response-Action']
if data:
self.decode(data)
if action:
self.headers['Response-Action'] = action
self.add_origin_info()
class GNTPError(_GNTPBase):
def __init__(self,data=None,errorcode=None,errordesc=None):
'''
@param data: (Optional) See _GNTPResponse.decode()
@param errorcode: (Optional) Error code
@param errordesc: (Optional) Error Description
'''
_GNTPBase.__init__(self,'-ERROR')
self.requiredHeaders = ['Error-Code','Error-Description']
if data:
self.decode(data)
if errorcode:
self.headers['Error-Code'] = errorcode
self.headers['Error-Description'] = errordesc
self.add_origin_info()
def parse_gntp(data,password=None,debug=False):
'''
Attempt to parse a message as a GNTP message
@param data: Message to be parsed
@param password: Optional password to be used to verify the message
@param debug: Print out extra debugging information
'''
match = re.match('GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',data,re.IGNORECASE)
if not match:
if debug:
print '----'
print self.data
print '----'
raise ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data,password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data,password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data,password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
if debug: print info
raise ParseError('INVALID_GNTP_MESSAGE')
| gpl-3.0 |
tdjordan/tortoisegit | tortoisegit/test/testiconoverlay.py | 1 | 3213 | #
# development and stub-testing module for overlay icon handlers
#
import os, sys, time, atexit
from mercurial import ui
from mercurial.i18n import _
# FIXMEL: quick & dirty hack to add tortoise to module search path
import __init__
moddir = os.path.dirname(__init__.__file__)
sys.path.insert(0, os.path.join(moddir, os.pardir))
from iconoverlay import IconOverlayExtension
def lsprof(checkargs):
try:
from mercurial import lsprof
except ImportError:
raise util.Abort(_(
'lsprof not available - install from '
'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
p = lsprof.Profiler()
p.enable(subcalls=True)
try:
return checkargs()
finally:
p.disable()
stats = lsprof.Stats(p.getstats())
stats.sort()
stats.pprint(top=10, file=sys.stderr, climit=5)
def profile(checkargs):
import hotshot, hotshot.stats
prof = hotshot.Profile("hg.prof")
try:
try:
return prof.runcall(checkargs)
except:
try:
ui.warn(_('exception raised - generating '
'profile anyway\n'))
except:
pass
raise
finally:
prof.close()
stats = hotshot.stats.load("hg.prof")
stats.strip_dirs()
stats.sort_stats('cumulative', 'time', 'calls')
stats.print_stats(40)
def timeit():
u = ui.ui()
def get_times():
t = os.times()
if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
t = (t[0], t[1], t[2], t[3], time.clock())
return t
s = get_times()
def print_time():
t = get_times()
u.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
(t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
atexit.register(print_time)
def get_option(args):
import getopt
long_opt_list = ('time', 'lsprof', 'profile')
opts, args = getopt.getopt(args, "", long_opt_list)
options = {}
for o, a in opts:
options[o] = a
return options, args
if __name__=='__main__':
ovh = IconOverlayExtension()
option, argv = get_option(sys.argv[1:])
path = len(argv) and argv[0] or os.getcwd()
path = os.path.abspath(path)
# get the list of files residing in the target directory
dirname = os.path.dirname(path)
if dirname == path:
dirlist = [path]
else:
dirlist = [os.path.join(dirname, x) for x in os.listdir(dirname)]
# first call to _get_state() is usually longer...
def get_state_1st():
ovh._get_state(path)
# subsequent call to _get_state() using the files in
# the target directory
def get_state_2nd():
for f in dirlist:
ovh._get_state(f)
# 'master' function for profiling purpose
def get_states():
get_state_1st()
get_state_2nd()
if option.has_key('--time'):
timeit()
if option.has_key('--lsprof'):
lsprof(get_states)
elif option.has_key('--profile'):
profile(get_states)
else:
get_states()
| gpl-2.0 |
migue/voltdb | tests/sqlcoverage/schema/matview-advanced-nonjoin-schema.py | 2 | 2228 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"V_P2": {
"columns": (("V_G1", FastSerializer.VOLTTYPE_INTEGER),
("V_G2", FastSerializer.VOLTTYPE_INTEGER),
("V_CNT", FastSerializer.VOLTTYPE_INTEGER),
("V_SUM_AGE", FastSerializer.VOLTTYPE_INTEGER),
("V_SUM_RENT", FastSerializer.VOLTTYPE_INTEGER),
),
},
"V_R2_ABS": {
"columns": (("V_G1", FastSerializer.VOLTTYPE_INTEGER),
("V_G2", FastSerializer.VOLTTYPE_INTEGER),
("V_CNT", FastSerializer.VOLTTYPE_INTEGER),
("V_SUM_AGE", FastSerializer.VOLTTYPE_INTEGER),
("V_SUM_RENT", FastSerializer.VOLTTYPE_INTEGER),
),
},
"V_R2": {
"columns": (("V_G1", FastSerializer.VOLTTYPE_INTEGER),
("V_G2", FastSerializer.VOLTTYPE_INTEGER),
("V_CNT", FastSerializer.VOLTTYPE_INTEGER),
("V_SUM_AGE", FastSerializer.VOLTTYPE_INTEGER),
("V_SUM_RENT", FastSerializer.VOLTTYPE_INTEGER),
),
},
}
| agpl-3.0 |
terbolous/SickRage | lib/sqlalchemy/engine/threadlocal.py | 79 | 4097 | # engine/threadlocal.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides a thread-local transactional wrapper around the root Engine class.
The ``threadlocal`` module is invoked when using the
``strategy="threadlocal"`` flag with :func:`~sqlalchemy.engine.create_engine`.
This module is semi-private and is invoked automatically when the threadlocal
engine strategy is used.
"""
from .. import util
from . import base
import weakref
class TLConnection(base.Connection):
def __init__(self, *arg, **kw):
super(TLConnection, self).__init__(*arg, **kw)
self.__opencount = 0
def _increment_connect(self):
self.__opencount += 1
return self
def close(self):
if self.__opencount == 1:
base.Connection.close(self)
self.__opencount -= 1
def _force_close(self):
self.__opencount = 0
base.Connection.close(self)
class TLEngine(base.Engine):
"""An Engine that includes support for thread-local managed
transactions.
"""
_tl_connection_cls = TLConnection
def __init__(self, *args, **kwargs):
super(TLEngine, self).__init__(*args, **kwargs)
self._connections = util.threading.local()
def contextual_connect(self, **kw):
if not hasattr(self._connections, 'conn'):
connection = None
else:
connection = self._connections.conn()
if connection is None or connection.closed:
# guards against pool-level reapers, if desired.
# or not connection.connection.is_valid:
connection = self._tl_connection_cls(
self, self.pool.connect(), **kw)
self._connections.conn = weakref.ref(connection)
return connection._increment_connect()
def begin_twophase(self, xid=None):
if not hasattr(self._connections, 'trans'):
self._connections.trans = []
self._connections.trans.append(
self.contextual_connect().begin_twophase(xid=xid))
return self
def begin_nested(self):
if not hasattr(self._connections, 'trans'):
self._connections.trans = []
self._connections.trans.append(
self.contextual_connect().begin_nested())
return self
def begin(self):
if not hasattr(self._connections, 'trans'):
self._connections.trans = []
self._connections.trans.append(self.contextual_connect().begin())
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None:
self.commit()
else:
self.rollback()
def prepare(self):
if not hasattr(self._connections, 'trans') or \
not self._connections.trans:
return
self._connections.trans[-1].prepare()
def commit(self):
if not hasattr(self._connections, 'trans') or \
not self._connections.trans:
return
trans = self._connections.trans.pop(-1)
trans.commit()
def rollback(self):
if not hasattr(self._connections, 'trans') or \
not self._connections.trans:
return
trans = self._connections.trans.pop(-1)
trans.rollback()
def dispose(self):
self._connections = util.threading.local()
super(TLEngine, self).dispose()
@property
def closed(self):
return not hasattr(self._connections, 'conn') or \
self._connections.conn() is None or \
self._connections.conn().closed
def close(self):
if not self.closed:
self.contextual_connect().close()
connection = self._connections.conn()
connection._force_close()
del self._connections.conn
self._connections.trans = []
def __repr__(self):
return 'TLEngine(%s)' % str(self.url)
| gpl-3.0 |
mkmelin/bedrock | tests/redirects/map_htaccess.py | 6 | 8752 | from __future__ import absolute_import
from .base import flatten, url_test
URLS = flatten((
# Bug 774331 - European press pages
# en-GB
url_test('/en-GB/press/', 'https://blog.mozilla.org/press-uk/'),
url_test('/en-GB/press/media/', 'https://blog.mozilla.org/press-uk/media-library/'),
url_test('/en-GB/press/media/logos/', 'https://blog.mozilla.org/press-uk/media-library/'),
url_test('/en-GB/press/media/screenshots/',
'https://blog.mozilla.org/press-uk/media-library/product-screenshots/'),
url_test('/en-GB/press/media/images/', 'https://blog.mozilla.org/press-uk/media-library/'),
url_test('/en-GB/press/media/videos/',
'https://blog.mozilla.org/press-uk/media-library/videos/'),
# de
url_test('/de/press/', 'https://blog.mozilla.org/press-de/'),
url_test('/de/press/media/', 'https://blog.mozilla.org/press-de/medienbibliothek/'),
url_test('/de/press/media/logos/', 'https://blog.mozilla.org/press-de/medienbibliothek/'),
url_test('/de/press/media/screenshots/',
'https://blog.mozilla.org/press-de/medienbibliothek/produkt-screenshots/'),
url_test('/de/press/media/images/', 'https://blog.mozilla.org/press-de/medienbibliothek/'),
url_test('/de/press/media/videos/',
'https://blog.mozilla.org/press-de/medienbibliothek/videos/'),
url_test('/de/press/media/bios/', 'https://blog.mozilla.org/press/media-library/bios/'),
# fr
url_test('/fr/press/', 'https://blog.mozilla.org/press-fr/'),
url_test('/fr/press/media/', 'https://blog.mozilla.org/press-fr/bibliotheque-mozilla/'),
url_test('/fr/press/media/logos/', 'https://blog.mozilla.org/press-fr/bibliotheque-mozilla/'),
url_test('/fr/press/media/screenshots/',
'https://blog.mozilla.org/press-fr/bibliotheque-mozilla/captures-decran-produits/'),
url_test('/fr/press/media/images/',
'https://blog.mozilla.org/press-fr/bibliotheque-mozilla/'),
url_test('/fr/press/media/videos/',
'https://blog.mozilla.org/press-fr/bibliotheque-mozilla/videos/'),
url_test('/fr/press/media/bios/', 'https://blog.mozilla.org/press/media-library/bios/'),
# it
url_test('/it/press/', 'https://blog.mozilla.org/press-it/'),
url_test('/it/press/media/', 'https://blog.mozilla.org/press-it/galleria-multimediale/'),
url_test('/it/press/media/logos/',
'https://blog.mozilla.org/press-it/galleria-multimediale/'),
url_test('/it/press/media/screenshots/',
'https://blog.mozilla.org/press-it/galleria-multimediale/immagini-del-prodotto/'),
url_test('/it/press/media/images/',
'https://blog.mozilla.org/press-it/galleria-multimediale/'),
url_test('/it/press/media/videos/',
'https://blog.mozilla.org/press-it/galleria-multimediale/videos/'),
url_test('/it/press/media/bios/', 'https://blog.mozilla.org/press/media-library/bios/'),
# es
url_test('/es{,-ES,-AR,-MX}/press/', 'https://blog.mozilla.org/press-es/'),
url_test('/es{,-ES,-AR,-MX}/press/media/',
'https://blog.mozilla.org/press-es/galeria-multimedia-de-mozilla/'),
url_test('/es{,-ES,-AR,-MX}/press/media/logos/',
'https://blog.mozilla.org/press-es/galeria-multimedia-de-mozilla/'),
url_test('/es{,-ES,-AR,-MX}/press/media/screenshots/',
'https://blog.mozilla.org/press-es/galeria-multimedia-de-mozilla/imagenes-del-producto/'),
url_test('/es{,-ES,-AR,-MX}/press/media/images/',
'https://blog.mozilla.org/press-es/galeria-multimedia-de-mozilla/'),
url_test('/es{,-ES,-AR,-MX}/press/media/videos/',
'https://blog.mozilla.org/press-es/galeria-multimedia-de-mozilla/videos/'),
url_test('/es{,-ES,-AR,-MX}/press/media/bios/', 'https://blog.mozilla.org/press/media-library/bios/'),
# pl
url_test('/pl/press/', 'https://blog.mozilla.org/press-pl/'),
url_test('/pl/press/media/', 'https://blog.mozilla.org/press-pl/galeria-multimediow/'),
url_test('/pl/press/media/logos/', 'https://blog.mozilla.org/press-pl/galeria-multimediow/'),
url_test('/pl/press/media/screenshots/',
'https://blog.mozilla.org/press-pl/galeria-multimediow/screenshoty-produktow/'),
url_test('/pl/press/media/images/', 'https://blog.mozilla.org/press-pl/galeria-multimediow/'),
url_test('/pl/press/media/videos/',
'https://blog.mozilla.org/press-pl/galeria-multimediow/videos/'),
url_test('/pl/press/media/bios/', 'https://blog.mozilla.org/press/media-library/bios/'),
# rest
# Bug 747565
url_test('/press/', 'https://blog.mozilla.org/press/'),
url_test('/press/ataglance/', 'https://blog.mozilla.org/press/ataglance/'),
url_test('/press/bios/', 'https://blog.mozilla.org/press/bios/'),
url_test('/press/kits/', 'https://blog.mozilla.org/press/kits/'),
url_test('/press/media/', 'https://blog.mozilla.org/press/media-library/'),
url_test('/press/media/logos/', 'https://blog.mozilla.org/press/media-library/'),
url_test('/press/media/bios/', 'https://blog.mozilla.org/press/media-library/bios/'),
url_test('/press/media/screenshots/',
'https://blog.mozilla.org/press/media-library/screenshots/'),
url_test('/press/media/videos/', 'https://blog.mozilla.org/press/media-library/videos/'),
# Redirects for SeaMonkey project website, now living at seamonkey-project.org
url_test('/projects/seamonkey/', 'http://www.seamonkey-project.org/'),
url_test('/projects/seamonkey/artwork.html',
'http://www.seamonkey-project.org/dev/artwork'),
url_test('/projects/seamonkey/community.html',
'http://www.seamonkey-project.org/community'),
url_test('/projects/seamonkey/get-involved.html',
'http://www.seamonkey-project.org/dev/get-involved'),
url_test('/projects/seamonkey/index.html', 'http://www.seamonkey-project.org/'),
url_test('/projects/seamonkey/news.html', 'http://www.seamonkey-project.org/news'),
url_test('/projects/seamonkey/project-areas.html',
'http://www.seamonkey-project.org/dev/project-areas'),
url_test('/projects/seamonkey/releases/', 'http://www.seamonkey-project.org/releases/'),
url_test('/projects/seamonkey/releases/index.html',
'http://www.seamonkey-project.org/releases/'),
url_test('/projects/seamonkey/review-and-flags.html',
'http://www.seamonkey-project.org/dev/review-and-flags'),
url_test('/projects/seamonkey/releases/1.2.3.html',
'http://www.seamonkey-project.org/releases/1.2.3'),
url_test('/projects/seamonkey/releases/seamonkey-man/index.html',
'http://www.seamonkey-project.org/releases/seamonkey-man/'),
url_test('/projects/seamonkey/releases/seamonkey-dude/walter.html',
'http://www.seamonkey-project.org/releases/seamonkey-dude/walter'),
url_test('/projects/seamonkey/releases/updates/so-good',
'http://www.seamonkey-project.org/releases/updates/so-good'),
url_test('/projects/seamonkey/start/', 'http://www.seamonkey-project.org/start/'),
# bug 1236910
url_test('/support/anything', 'https://support.mozilla.org/'),
# Bug 682619
url_test('/support/thunderbird/problem', 'https://support.mozilla.org/products/thunderbird'),
url_test('/support/firefox/bug', 'https://support.mozilla.org/products/firefox'),
# Bug 638948 redirect beta privacy policy link
url_test('/firefox/beta/feedbackprivacypolicy/', '/privacy/firefox/'),
# Bug 424204
url_test('/en-US/firefox/help/', 'https://support.mozilla.org/'),
# Bug 1255882
url_test('/some/url///', '/some/url/'),
url_test('////', '/en-US/'),
url_test('/en-US///', '/en-US/'),
url_test('/de/firefox/about/', '/de/about/'),
# bug 1300373
url_test('/%2fgoogle.com//', '/google.com/'),
# bug 453506, 1255882
url_test('/editor/editor-embedding.html',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Embedding_the_editor'),
url_test('/editor/midasdemo/securityprefs.html',
'https://developer.mozilla.org/docs/Mozilla/Projects/Midas/Security_preferences'),
url_test('/editor/random/page.html', 'http://www-archive.mozilla.org/editor/random/page.html'),
# bug 726217, 1255882
url_test('/projects/bonecho/anti-phishing/',
'https://support.mozilla.org/kb/how-does-phishing-and-malware-protection-work'),
# Bug 453876, 840416
url_test('/add-ons/kodakcd', 'https://addons.mozilla.org/en-US/firefox/addon/4441'),
# Bug 1255882
url_test('/firefox/personal.html', '/firefox/new/'),
url_test('/firefox/upgrade.html', '/firefox/new/'),
url_test('/firefox/ie.html', '/firefox/new/'),
))
| mpl-2.0 |
hochem/osmdeviationfinder | web/basic.py | 1 | 4276 | # -*- coding: utf-8 -*-
"""
OSM Deviation Finder - Web Interface
~~~~~~~~~~~~~~~~~~~~
Implementation of a web interface for the OSM Deviation Finder library.
It uses the flask microframework by Armin Ronacher
For more information see https://github.com/mitsuhiko/flask/
To interact with the GeoServer REST API, the GeoServer configuration client library by boundlessgeo is used, see:
https://github.com/boundlessgeo/gsconfig
On the client side it uses jquery.js, leaflet.js, nprogress.js, DataTables and the UIKit framework,
for further information see the README.md file.
:copyright: (c) 2015 by Martin Hochenwarter
:license: MIT
"""
__author__ = 'Martin Hochenwarter'
__version__ = '0.1'
from web import db, login_manager
from models import User, DevMap
from flask import Blueprint, request, redirect, url_for, render_template, Response, make_response
from flask.ext.login import login_required, login_user, logout_user
#: Blueprint for basic functions and simple user management
basic = Blueprint('basic', __name__, template_folder='templates')
@basic.route('/')
def index():
print 'Index viewed by: '+request.remote_addr
return render_template('index.html')
@basic.route('/about', methods=['POST', 'GET'])
def about():
return render_template('about.html')
@basic.route('/browse', methods=['POST', 'GET'])
def browse():
return render_template('browse.html')
@basic.route('/<uid>', methods=['POST', 'GET'])
def deviationmap(uid):
if uid is None:
return render_template('index.html')
uid = uid.encode('ISO-8859-1')
devmap = DevMap.query.filter_by(uid=uid).first_or_404()
return render_template('map.html', uid=uid, devmap=devmap)
@basic.route('/manage', methods=['GET', 'POST'])
@login_required
def manage():
return render_template('manage.html')
@basic.route('/login', methods=['POST', 'GET'])
def login():
if request.method == "POST" and "email" in request.form and "password" in request.form:
email = request.form["email"]
password = request.form["password"]
user = User.query.filter_by(email=email).first_or_404()
remember = False
if user is None:
print 'User not found!'
return Response('User not found')
if "remember" in request.form:
remember = True
if user and user.check_password(password) and user:
if login_user(user, remember=remember):
return Response('test')
else:
print("Unable to log in user")
return render_template("index.html")
@basic.route("/register", methods=["GET", "POST"])
def register():
if request.method == 'POST' and 'email' in request.form:
email = request.form['email']
username = request.form['username']
password = request.form['password']
remember = False
if 'remember' in request.form:
remember = True
if User.query.filter_by(email=email).first() is not None:
print 'Error: User already exists!'
return Response('Error! User already exists!')
user = User(username, email, password)
try:
db.session.add(user)
db.session.commit()
if login_user(user, remember):
return redirect(url_for('basic.index'))
else:
print("Unable to log you in")
except:
print("Unable to register with the given email address")
return render_template(url_for('basic.index'))
@basic.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('basic.index'))
@basic.route('/robots.txt', methods=['GET'])
def robotstxt():
print 'Robots viewed by: ' + request.remote_addr
robots = render_template('robots.txt')
response = make_response(robots)
response.headers['Content-Type'] = 'text/plain'
return response
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect(url_for('basic.index'))
@login_manager.user_loader
def load_user(id):
if id is None:
redirect('/login')
user = User.query.filter_by(id=id).first_or_404()
if user is not None:
return user
else:
return None
| mit |
nirmeshk/oh-mainline | vendor/packages/requests/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| agpl-3.0 |
edx/configuration | util/jenkins/missing_alerts_checker/missing_alerts_checker.py | 4 | 10313 | from __future__ import absolute_import
from __future__ import print_function
import boto3
import requests
import click
from botocore.exceptions import ClientError
import sys
import re
class NewRelic:
def __init__(self, new_relic_api_key):
self.url_alert_extractor = "https://api.newrelic.com/v2/alerts_policies.json"
self.headers = {'X-Api-Key': new_relic_api_key}
def new_relic_policies_extractor(self):
"""
Return:
Return list of all alert policies extracted from New relic
{
"policy": {
"id": int,
"incident_preference": str,
"name": str,
"created_at": int,
"updated_at": int
}
}
"""
response = requests.get(self.url_alert_extractor, headers=self.headers)
if response.status_code != 200:
print("Unable to communicate with New relic.")
sys.exit(1)
try:
alert_policies = response.json()
except ValueError:
print(("Failed to parse response json. Got:\n{}".format(response.text)))
sys.exit(1)
return alert_policies
class InfraAlerts:
def edc_extractor(self):
"""
Return list of all EC2 instances with EDC's tags across all the regions
Returns:
[
{
'name': name,
'ID': instance.id
}
]
"""
client_region = boto3.client('ec2')
filter_tags = [
{
"Name": "tag:environment",
"Values": ["*"]
},
{
"Name": "tag:deployment",
"Values": ["*"]
},
{
"Name": "tag:cluster",
"Values": ["*"]
},
{
'Name': 'instance-state-name',
'Values': ['running']
}
]
instance_list = []
try:
regions_list = client_region.describe_regions()
except ClientError as e:
print(("Unable to connect to AWS with error :{}".format(e)))
sys.exit(1)
for region in regions_list['Regions']:
client = boto3.resource('ec2', region_name=region['RegionName'])
response = client.instances.filter(Filters=filter_tags)
for instance in response:
temp_dict = {}
for tag in instance.tags:
if tag['Key'] == "Name":
name = tag['Value']
temp_dict = {
'name': name,
'ID': instance.id
}
break
else:
pass
instance_list.append(temp_dict)
return instance_list
def missing_alerts_checker(self, instance_list, alert_policies):
"""
Arguments:
instance_list (list):
List of all instances for which we find alerts
alert_policies list(dict):
List of all existing alerts new relic
Return:
Return list of all instances which have no alert in new Relic
[
{
'name': name,
'ID': instance.id
}
]
"""
result_instance = []
for instance in instance_list:
if not any(policy["name"] == instance["name"] + "-infrastructure" for policy in alert_policies["policies"]):
result_instance.append(instance)
return result_instance
class AppAlerts:
def __init__(self, new_relic_api_key):
self.url_app_extractor = "https://api.newrelic.com/v2/applications.json"
self.headers = {'X-Api-Key': new_relic_api_key}
def new_relic_app_extractor(self):
"""
Return:
Return list all applications in new relic
"""
response = requests.get(self.url_app_extractor, headers=self.headers)
if response.status_code != 200:
print("Unable to communicate with New relic.")
sys.exit(1)
try:
apps_list = response.json()
except ValueError:
print(("Failed to parse response json. Got:\n{}".format(response.text)))
sys.exit(1)
return apps_list["applications"]
def missing_alerts_checker(self, app_list, alert_policies):
"""
Arguments:
app_list (list):
List of all applications for which we find alerts
alert_policies list(dict):
List of all existing alerts new relic
Return:
Return list of all applications which have no alert in new Relic
"""
result_apps = []
for apps in app_list:
if not any(policy["name"] == apps["name"] + "-application" for policy in alert_policies["policies"]):
result_apps.append(apps)
return result_apps
class BrowserAlerts:
def __init__(self, new_relic_api_key):
self.url_browser_extractor = "https://api.newrelic.com/v2/browser_applications.json"
self.headers = {'X-Api-Key': new_relic_api_key}
def new_relic_browser_extractor(self):
"""
Return:
Return list all browser applications in new relic
[
{
"id": "integer",
"name": "string",
"browser_monitoring_key": "string",
"loader_script": "string"
}
]
"""
response = requests.get(self.url_browser_extractor, headers=self.headers)
if response.status_code != 200:
print("Unable to communicate with New relic.")
sys.exit(1)
try:
browser_list = response.json()
except ValueError:
raise Exception("Failed to parse response json. Got:\n{}".format(response.text))
return browser_list["browser_applications"]
def missing_alerts_checker(self, browser_list, alert_policies):
"""
Arguments:
browser_list (list):
List of all browser applications for which we find alerts
alert_policies list(dict):
List of all existing alerts new relic
Return:
Return list of all browser applications which have no alert in new Relic
[
{
"id": "integer",
"name": "string",
"browser_monitoring_key": "string",
"loader_script": "string"
}
]
"""
result_browser = []
for browser in browser_list:
if not any(policy["name"] == browser["name"].rstrip() + "-browser" for policy in alert_policies["policies"]):
result_browser.append(browser)
return result_browser
@click.command()
@click.option('--new-relic-api-key', required=True, help='API Key to use to speak with NewRelic.')
@click.option('--ignore', '-i', multiple=True, help='App name regex to filter out, can be specified multiple times')
def controller(new_relic_api_key,ignore):
"""
Control execution of all other functions
Arguments:
new_relic_api_key (str):
Get this from cli args
"""
flag = 0
# Initializing object of classes
infracheck = InfraAlerts()
new_relic_obj = NewRelic(new_relic_api_key)
# Get list of all instances in different regions
instance_list = infracheck.edc_extractor()
# Get list of all alert policies in new relic
alert_policies = new_relic_obj.new_relic_policies_extractor()
# Get list of all instances without alerts
missing_alerts_list = infracheck.missing_alerts_checker(instance_list, alert_policies)
filtered_missing_alerts_list = list([x for x in missing_alerts_list if not any(re.search(r, x['name']) for r in ignore)])
format_string = "{:<30}{}"
print((format_string.format("Instance ID", "Instance Name")))
for instance_wo_alerts in filtered_missing_alerts_list:
print((format_string.format(instance_wo_alerts["ID"], instance_wo_alerts["name"])))
flag = 1
# Initializing object of classes
appcheck = AppAlerts(new_relic_api_key)
new_relic_obj = NewRelic(new_relic_api_key)
# Get list of all applications from new relic
apps_list = appcheck.new_relic_app_extractor()
# Get list of all applications without alerts
missing_alerts_list_app = appcheck.missing_alerts_checker(apps_list, alert_policies)
filtered_missing_alerts_list_app = list([x for x in missing_alerts_list_app if not any(re.search(r, x['name']) for r in ignore)])
format_string = "{:<20}{}"
print("")
print((format_string.format("Application ID", "Application Name")))
for instance_wo_alerts in filtered_missing_alerts_list_app:
print((format_string.format(instance_wo_alerts["id"], instance_wo_alerts["name"])))
flag = 1
# Initializing object of classes
browsercheck = BrowserAlerts(new_relic_api_key)
new_relic_obj = NewRelic(new_relic_api_key)
# Get list of all browser applications from new relic
browser_list = browsercheck.new_relic_browser_extractor()
# Get list of all browser applications without alerts
missing_alerts_list_browser = browsercheck.missing_alerts_checker(browser_list, alert_policies)
filtered_missing_alerts_list_browser = list([x for x in missing_alerts_list_browser if not any(re.search(r, x['name']) for r in ignore)])
format_string = "{:<20}{}"
print("")
print((format_string.format("Browser ID", "Browser Name")))
for instance_wo_alerts in filtered_missing_alerts_list_browser:
print((format_string.format(instance_wo_alerts["id"], instance_wo_alerts["name"])))
flag = 1
sys.exit(flag)
if __name__ == '__main__':
controller()
| agpl-3.0 |
jonnor/FreeCAD | src/Mod/Draft/WorkingPlane.py | 16 | 18204 | #***************************************************************************
#* *
#* Copyright (c) 2009, 2010 *
#* Ken Cline <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD, math, DraftVecUtils
from FreeCAD import Vector
__title__="FreeCAD Working Plane utility"
__author__ = "Ken Cline"
__url__ = "http://www.freecadweb.org"
'''
This module provides a class called plane to assist in selecting and maintaining a working plane.
'''
class plane:
'''A WorkPlane object'''
def __init__(self,u=Vector(1,0,0),v=Vector(0,1,0),w=Vector(0,0,1),pos=Vector(0,0,0)):
# keep track of active document. Reset view when doc changes.
self.doc = None
# self.weak is true if the plane has been defined by self.setup or has been reset
self.weak = True
# u, v axes and position define plane, perpendicular axis is handy, though redundant.
self.u = u
self.v = v
self.axis = w
self.position = pos
# a placeholder for a stored state
self.stored = None
def __repr__(self):
return "Workplane x="+str(DraftVecUtils.rounded(self.u))+" y="+str(DraftVecUtils.rounded(self.v))+" z="+str(DraftVecUtils.rounded(self.axis))
def offsetToPoint(self, p, direction=None):
'''
Return the signed distance from p to the plane, such
that p + offsetToPoint(p)*direction lies on the plane.
direction defaults to -plane.axis
'''
'''
A picture will help explain the computation:
p
//|
/ / |
/ / |
/ / |
/ / |
-------------------- plane -----c-----x-----a--------
Here p is the specified point,
c is a point (in this case plane.position) on the plane
x is the intercept on the plane from p in the specified direction, and
a is the perpendicular intercept on the plane (i.e. along plane.axis)
Using vertival bars to denote the length operator,
|ap| = |cp| * cos(apc) = |xp| * cos(apx)
so
|xp| = |cp| * cos(apc) / cos(apx)
= (cp . axis) / (direction . axis)
'''
if direction == None: direction = self.axis
return direction.dot(self.position.sub(p))
def projectPoint(self, p, direction=None):
'''project point onto plane, default direction is orthogonal'''
if not direction:
direction = self.axis
lp = self.getLocalCoords(p)
gp = self.getGlobalCoords(Vector(lp.x,lp.y,0))
a = direction.getAngle(gp.sub(p))
if a > math.pi/2:
direction = direction.negative()
a = math.pi - a
ld = self.getLocalRot(direction)
gd = self.getGlobalRot(Vector(ld.x,ld.y,0))
hyp = abs(math.tan(a) * lp.z)
return gp.add(DraftVecUtils.scaleTo(gd,hyp))
def projectPointOld(self, p, direction=None):
'''project point onto plane, default direction is orthogonal. Obsolete'''
if not direction:
direction = self.axis
t = Vector(direction)
#t.normalize()
a = round(t.getAngle(self.axis),DraftVecUtils.precision())
pp = round((math.pi)/2,DraftVecUtils.precision())
if a == pp:
return p
t.multiply(self.offsetToPoint(p, direction))
return p.add(t)
def alignToPointAndAxis(self, point, axis, offset=0, upvec=None):
self.doc = FreeCAD.ActiveDocument
self.axis = axis;
self.axis.normalize()
if (DraftVecUtils.equals(axis, Vector(1,0,0))):
self.u = Vector(0,1,0)
self.v = Vector(0,0,1)
elif (DraftVecUtils.equals(axis, Vector(-1,0,0))):
self.u = Vector(0,-1,0)
self.v = Vector(0,0,1)
elif upvec:
self.v = upvec
self.v.normalize()
self.u = self.v.cross(self.axis)
else:
self.v = axis.cross(Vector(1,0,0))
self.v.normalize()
self.u = DraftVecUtils.rotate(self.v, -math.pi/2, self.axis)
offsetVector = Vector(axis); offsetVector.multiply(offset)
self.position = point.add(offsetVector)
self.weak = False
# FreeCAD.Console.PrintMessage("(position = " + str(self.position) + ")\n")
# FreeCAD.Console.PrintMessage("Current workplane: x="+str(DraftVecUtils.rounded(self.u))+" y="+str(DraftVecUtils.rounded(self.v))+" z="+str(DraftVecUtils.rounded(self.axis))+"\n")
def alignToPointAndAxis_SVG(self, point, axis, offset):
# based on cases table
self.doc = FreeCAD.ActiveDocument
self.axis = axis;
self.axis.normalize()
ref_vec = Vector(0.0, 1.0, 0.0)
if ((abs(axis.x) > abs(axis.y)) and (abs(axis.y) > abs(axis.z))):
ref_vec = Vector(0.0, 0., 1.0)
self.u = axis.negative().cross(ref_vec)
self.u.normalize()
self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis)
#projcase = "Case new"
elif ((abs(axis.y) > abs(axis.z)) and (abs(axis.z) >= abs(axis.x))):
ref_vec = Vector(1.0, 0.0, 0.0)
self.u = axis.negative().cross(ref_vec)
self.u.normalize()
self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis)
#projcase = "Y>Z, View Y"
elif ((abs(axis.y) >= abs(axis.x)) and (abs(axis.x) > abs(axis.z))):
ref_vec = Vector(0.0, 0., 1.0)
self.u = axis.cross(ref_vec)
self.u.normalize()
self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis)
#projcase = "ehem. XY, Case XY"
elif ((abs(axis.x) > abs(axis.z)) and (abs(axis.z) >= abs(axis.y))):
self.u = axis.cross(ref_vec)
self.u.normalize()
self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis)
#projcase = "X>Z, View X"
elif ((abs(axis.z) >= abs(axis.y)) and (abs(axis.y) > abs(axis.x))):
ref_vec = Vector(1.0, 0., 0.0)
self.u = axis.cross(ref_vec)
self.u.normalize()
self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis)
#projcase = "Y>X, Case YZ"
else:
self.u = axis.negative().cross(ref_vec)
self.u.normalize()
self.v = DraftVecUtils.rotate(self.u, math.pi/2, self.axis)
#projcase = "else"
#spat_vec = self.u.cross(self.v)
#spat_res = spat_vec.dot(axis)
#FreeCAD.Console.PrintMessage(projcase + " spat Prod = " + str(spat_res) + "\n")
offsetVector = Vector(axis); offsetVector.multiply(offset)
self.position = point.add(offsetVector)
self.weak = False
# FreeCAD.Console.PrintMessage("(position = " + str(self.position) + ")\n")
# FreeCAD.Console.PrintMessage("Current workplane: x="+str(DraftVecUtils.rounded(self.u))+" y="+str(DraftVecUtils.rounded(self.v))+" z="+str(DraftVecUtils.rounded(self.axis))+"\n")
def alignToCurve(self, shape, offset):
if shape.ShapeType == 'Edge':
#??? TODO: process curve here. look at shape.edges[0].Curve
return False
elif shape.ShapeType == 'Wire':
#??? TODO: determine if edges define a plane
return False
else:
return False
def alignToEdges(self,edges):
# use a list of edges to find a plane position
if len(edges) > 2:
return False
# for axes systems, we suppose the 2 first edges are parallel
# ??? TODO: exclude other cases first
v1 = edges[0].Vertexes[-1].Point.sub(edges[0].Vertexes[0].Point)
v2 = edges[1].Vertexes[0].Point.sub(edges[0].Vertexes[0].Point)
v3 = v1.cross(v2)
v1.normalize()
v2.normalize()
v3.normalize()
#print v1,v2,v3
self.u = v1
self.v = v2
self.axis = v3
def alignToFace(self, shape, offset=0):
# Set face to the unique selected face, if found
if shape.ShapeType == 'Face':
self.alignToPointAndAxis(shape.Faces[0].CenterOfMass, shape.Faces[0].normalAt(0,0), offset)
import DraftGeomUtils
q = DraftGeomUtils.getQuad(shape)
if q:
self.u = q[1]
self.v = q[2]
if not DraftVecUtils.equals(self.u.cross(self.v),self.axis):
self.u = q[2]
self.v = q[1]
self.weak = False
return True
else:
return False
def alignToSelection(self, offset):
'''If selection uniquely defines a plane, align working plane to it. Return success (bool)'''
import FreeCADGui
sex = FreeCADGui.Selection.getSelectionEx(FreeCAD.ActiveDocument.Name)
if len(sex) == 0:
return False
elif len(sex) == 1:
if not sex[0].Object.isDerivedFrom("Part::Shape"):
return False
return self.alignToCurve(sex[0].Object.Shape, offset) \
or self.alignToFace(sex[0].Object.Shape, offset) \
or (len(sex[0].SubObjects) == 1 and self.alignToFace(sex[0].SubObjects[0], offset))
else:
# len(sex) > 2, look for point and line, three points, etc.
return False
def setup(self, direction=None, point=None, upvec=None):
'''If working plane is undefined, define it!'''
if self.weak:
if direction and point:
self.alignToPointAndAxis(point, direction, 0, upvec)
else:
try:
import FreeCADGui
from pivy import coin
rot = FreeCADGui.ActiveDocument.ActiveView.getCameraNode().getField("orientation").getValue()
upvec = Vector(rot.multVec(coin.SbVec3f(0,1,0)).getValue())
vdir = FreeCADGui.ActiveDocument.ActiveView.getViewDirection()
self.alignToPointAndAxis(Vector(0,0,0), vdir.negative(), 0, upvec)
except:
pass
self.weak = True
def reset(self):
self.doc = None
self.weak = True
def getRotation(self):
"returns a placement describing the working plane orientation ONLY"
m = DraftVecUtils.getPlaneRotation(self.u,self.v,self.axis)
return FreeCAD.Placement(m)
def getPlacement(self,rotated=False):
"returns the placement of the working plane"
if rotated:
m = FreeCAD.Matrix(
self.u.x,self.axis.x,-self.v.x,self.position.x,
self.u.y,self.axis.y,-self.v.y,self.position.y,
self.u.z,self.axis.z,-self.v.z,self.position.z,
0.0,0.0,0.0,1.0)
else:
m = FreeCAD.Matrix(
self.u.x,self.v.x,self.axis.x,self.position.x,
self.u.y,self.v.y,self.axis.y,self.position.y,
self.u.z,self.v.z,self.axis.z,self.position.z,
0.0,0.0,0.0,1.0)
return FreeCAD.Placement(m)
def setFromPlacement(self,pl):
"sets the working plane from a placement (rotaton ONLY)"
rot = FreeCAD.Placement(pl).Rotation
self.u = rot.multVec(FreeCAD.Vector(1,0,0))
self.v = rot.multVec(FreeCAD.Vector(0,1,0))
self.axis = rot.multVec(FreeCAD.Vector(0,0,1))
def inverse(self):
"inverts the direction of the working plane"
self.u = self.u.negative()
self.axis = self.axis.negative()
def save(self):
"stores the current plane state"
self.stored = [self.u,self.v,self.axis,self.position,self.weak]
def restore(self):
"restores a previously saved plane state, if exists"
if self.stored:
self.u = self.stored[0]
self.v = self.stored[1]
self.axis = self.stored[2]
self.position = self.stored[3]
self.weak = self.stored[4]
self.stored = None
def getLocalCoords(self,point):
"returns the coordinates of a given point on the working plane"
pt = point.sub(self.position)
xv = DraftVecUtils.project(pt,self.u)
x = xv.Length
if xv.getAngle(self.u) > 1:
x = -x
yv = DraftVecUtils.project(pt,self.v)
y = yv.Length
if yv.getAngle(self.v) > 1:
y = -y
zv = DraftVecUtils.project(pt,self.axis)
z = zv.Length
if zv.getAngle(self.axis) > 1:
z = -z
return Vector(x,y,z)
def getGlobalCoords(self,point):
"returns the global coordinates of the given point, taken relatively to this working plane"
vx = Vector(self.u).multiply(point.x)
vy = Vector(self.v).multiply(point.y)
vz = Vector(self.axis).multiply(point.z)
pt = (vx.add(vy)).add(vz)
return pt.add(self.position)
def getLocalRot(self,point):
"Same as getLocalCoords, but discards the WP position"
xv = DraftVecUtils.project(point,self.u)
x = xv.Length
if xv.getAngle(self.u) > 1:
x = -x
yv = DraftVecUtils.project(point,self.v)
y = yv.Length
if yv.getAngle(self.v) > 1:
y = -y
zv = DraftVecUtils.project(point,self.axis)
z = zv.Length
if zv.getAngle(self.axis) > 1:
z = -z
return Vector(x,y,z)
def getGlobalRot(self,point):
"Same as getGlobalCoords, but discards the WP position"
vx = Vector(self.u).multiply(point.x)
vy = Vector(self.v).multiply(point.y)
vz = Vector(self.axis).multiply(point.z)
pt = (vx.add(vy)).add(vz)
return pt
def getClosestAxis(self,point):
"returns which of the workingplane axes is closest from the given vector"
ax = point.getAngle(self.u)
ay = point.getAngle(self.v)
az = point.getAngle(self.axis)
bx = point.getAngle(self.u.negative())
by = point.getAngle(self.v.negative())
bz = point.getAngle(self.axis.negative())
b = min(ax,ay,az,bx,by,bz)
if b in [ax,bx]:
return "x"
elif b in [ay,by]:
return "y"
elif b in [az,bz]:
return "z"
else:
return None
def isGlobal(self):
"returns True if the plane axes are equal to the global axes"
if self.u != Vector(1,0,0):
return False
if self.v != Vector(0,1,0):
return False
if self.axis != Vector(0,0,1):
return False
return True
def isOrtho(self):
"returns True if the plane axes are following the global axes"
if round(self.u.getAngle(Vector(0,1,0)),6) in [0,-1.570796,1.570796,-3.141593,3.141593,-4.712389,4.712389,6.283185]:
if round(self.v.getAngle(Vector(0,1,0)),6) in [0,-1.570796,1.570796,-3.141593,3.141593,-4.712389,4.712389,6.283185]:
if round(self.axis.getAngle(Vector(0,1,0)),6) in [0,-1.570796,1.570796,-3.141593,3.141593,-4.712389,4.712389,6.283185]:
return True
return False
def getDeviation(self):
"returns the deviation angle between the u axis and the horizontal plane"
proj = Vector(self.u.x,self.u.y,0)
if self.u.getAngle(proj) == 0:
return 0
else:
norm = proj.cross(self.u)
return DraftVecUtils.angle(self.u,proj,norm)
def getPlacementFromPoints(points):
"returns a placement from a list of 3 or 4 vectors"
pl = plane()
try:
pl.position = points[0]
pl.u = (points[1].sub(points[0]).normalize())
pl.v = (points[2].sub(points[0]).normalize())
if len(points) == 4:
pl.axis = (points[3].sub(points[0]).normalize())
else:
pl.axis = ((pl.u).cross(pl.v)).normalize()
except:
return None
p = pl.getPlacement()
del pl
return p
def getPlacementFromFace(face,rotated=False):
"returns a placement from a face"
pl = plane()
try:
pl.alignToFace(face)
except:
return None
p = pl.getPlacement(rotated)
del pl
return p
| lgpl-2.1 |
AuyaJackie/odoo | addons/calendar/calendar.py | 81 | 86439 | # -*- coding: utf-8 -*-
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
import collections
import babel.dates
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import api
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from operator import itemgetter
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (basestring)):
res = calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT)
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (basestring, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked', ondelete='cascade'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, allday=False):
if idate:
if allday:
return openerp.fields.Date.from_string(idate)
else:
return openerp.fields.Datetime.from_string(idate).replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.start or not event_obj.stop:
raise osv.except_osv(_('Warning!'), _("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
event.add('dtstart').value = ics_datetime(event_obj.start, event_obj.allday)
event.add('dtend').value = ics_datetime(event_obj.stop, event_obj.allday)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'Odoo'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False),
template_xmlid='calendar_template_meeting_invitation', force=False, context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
@param force: If set to True, email will be sent to user himself. Usefull for example for alert, ...
"""
res = False
if self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_mail', default=False) or context.get("no_mail_to_attendees"):
return res
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['email.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context.update({
'color': color,
'action_id': self.pool['ir.actions.act_window'].search(cr, uid, [('view_id', '=', act_id)], context=context)[0],
'dbname': cr.dbname,
'base_url': self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
})
for attendee in self.browse(cr, uid, ids, context=context):
if attendee.email and email_from and (attendee.email != email_from or force):
ics_file = self.get_ics_file(cr, uid, attendee.event_id, context=context)
mail_id = template_pool.send_mail(cr, uid, template_id, attendee.id, context=local_context)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'datas_fname': 'invitation.ics',
'datas': str(ics_file).encode('base64')})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
the_mailmess = mail_pool.browse(cr, uid, mail_id, context=context).mail_message_id
mailmess_pool.write(cr, uid, [the_mailmess.id], vals, context=context)
mail_ids.append(mail_id)
if mail_ids:
res = mail_pool.send(cr, uid, mail_ids, context=context)
return res
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availability on change of user_id field.
@param ids: list of attendee's IDs
@param user_id: changed value of User id
@return: dictionary of values which put value in email and availability fields
"""
if not user_id:
return {'value': {'email': ''}}
user = self.pool['res.users'].browse(cr, uid, user_id, *args)
return {'value': {'email': user.email, 'availability': user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
"""
Makes event invitation as Tentative.
@param ids: list of attendee's IDs
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Accepted.
@param ids: list of attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'accepted'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has accepted invitation") % (attendee.cn)),
subtype="calendar.subtype_invitation", context=context)
return res
def do_decline(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Declined.
@param ids: list of calendar attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'declined'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has declined invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x: x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context=context)
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'calendar_last_notif_ack': fields.datetime('Last notification marked as read from base Calendar'),
}
def get_attendee_detail(self, cr, uid, ids, meeting_id, context=None):
"""
Return a list of tuple (id, name, status)
Used by web_calendar.js : Many2ManyAttendee
"""
datas = []
meeting = None
if meeting_id:
meeting = self.pool['calendar.event'].browse(cr, uid, get_real_ids(meeting_id), context=context)
for partner in self.browse(cr, uid, ids, context=context):
data = self.name_get(cr, uid, [partner.id], context)[0]
if meeting:
for attendee in meeting.attendee_ids:
if attendee.partner_id.id == partner.id:
data = (data[0], data[1], attendee.state)
datas.append(data)
return datas
def _set_calendar_last_notif_ack(self, cr, uid, context=None):
partner = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id
self.write(cr, uid, partner.id, {'calendar_last_notif_ack': datetime.now()}, context=context)
return
class calendar_alarm_manager(osv.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, cr, uid, seconds, notif=True, mail=True, partner_id=None, context=None):
res = {}
base_request = """
SELECT
cal.id,
cal.start - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.final_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.stop - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.start as first_event_date,
CASE
WHEN cal.recurrency THEN cal.final_date
ELSE cal.stop
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN
(
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type in %s
GROUP BY rel.calendar_event_id
) AS calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
#Add filter on type
type_to_read = ()
if notif:
type_to_read += ('notification',)
if mail:
type_to_read += ('email',)
tuple_params = (type_to_read,)
# ADD FILTER ON PARTNER_ID
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
#Add filter on hours
tuple_params += (seconds,)
cr.execute("""SELECT *
FROM ( %s WHERE cal.active = True ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < (now() at time zone 'utc' + interval '%%s' second )
AND ALL_EVENTS.last_event_date > (now() at time zone 'utc')
""" % base_request, tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in cr.fetchall():
res[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return res
def do_check_alarm_for_one_date(self, cr, uid, one_date, event, event_maxdelta, in_the_next_X_seconds, after=False, notif=True, mail=True, missing=False, context=None):
# one_date: date of the event to check (not the same that in the event browse if recurrent)
# event: Event browse record
# event_maxdelta: biggest duration from alarms for this event
# in_the_next_X_seconds: looking in the future (in seconds)
# after: if not False: will return alert if after this date (date as string - todo: change in master)
# missing: if not False: will return alert even if we are too late
# notif: Looking for type notification
# mail: looking for type email
res = []
# TODO: replace notif and email in master by alarm_type + remove event_maxdelta and if using it
alarm_type = []
if notif:
alarm_type.append('notification')
if mail:
alarm_type.append('email')
if one_date - timedelta(minutes=(missing and 0 or event_maxdelta)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type in alarm_type and \
one_date - timedelta(minutes=(missing and 0 or alarm.duration_minutes)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > openerp.fields.Datetime.from_string(after)):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
res.append(alert)
return res
def get_next_mail(self, cr, uid, context=None):
now = openerp.fields.Datetime.to_string(datetime.now())
icp = self.pool['ir.config_parameter']
last_notif_mail = icp.get_param(cr, SUPERUSER_ID, 'calendar.last_notif_mail', default=False) or now
try:
cron = self.pool['ir.model.data'].get_object(cr, uid, 'calendar', 'ir_cron_scheduler_alarm', context=context)
except ValueError:
_logger.error("Cron for " + self._name + " can not be identified !")
return False
interval_to_second = {
"weeks": 7 * 24 * 60 * 60,
"days": 24 * 60 * 60,
"hours": 60 * 60,
"minutes": 60,
"seconds": 1
}
if cron.interval_type not in interval_to_second.keys():
_logger.error("Cron delay can not be computed !")
return False
cron_interval = cron.interval_number * interval_to_second[cron.interval_type]
all_events = self.get_next_potential_limit_alarm(cr, uid, cron_interval, notif=False, context=context)
for curEvent in self.pool.get('calendar.event').browse(cr, uid, all_events.keys(), context=context):
max_delta = all_events[curEvent.id]['max_duration']
if curEvent.recurrency:
at_least_one = False
last_found = False
for one_date in self.pool.get('calendar.event').get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
last_found = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, 0, after=last_notif_mail, notif=False, missing=True, context=context)
for alert in last_found:
self.do_mail_reminder(cr, uid, alert, context=context)
at_least_one = True # if it's the first alarm for this recurrent event
if at_least_one and not last_found: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
last_found = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, 0, after=last_notif_mail, notif=False, missing=True, context=context)
for alert in last_found:
self.do_mail_reminder(cr, uid, alert, context=context)
icp.set_param(cr, SUPERUSER_ID, 'calendar.last_notif_mail', now)
def get_next_notif(self, cr, uid, context=None):
ajax_check_every_seconds = 300
partner = self.pool['res.users'].read(cr, SUPERUSER_ID, uid, ['partner_id', 'calendar_last_notif_ack'], context=context)
all_notif = []
if not partner:
return []
all_events = self.get_next_potential_limit_alarm(cr, uid, ajax_check_every_seconds, partner_id=partner['partner_id'][0], mail=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get("calendar.event").get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner['calendar_last_notif_ack'], mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner['calendar_last_notif_ack'], mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
return all_notif
def do_mail_reminder(self, cr, uid, alert, context=None):
if context is None:
context = {}
res = False
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
if alarm.type == 'email':
res = self.pool['calendar.attendee']._send_mail_to_attendees(
cr,
uid,
[att.id for att in event.attendee_ids],
email_from=event.user_id.partner_id.email,
template_xmlid='calendar_template_meeting_reminder',
force=True,
context=context
)
return res
def do_notif_reminder(self, cr, uid, alert, context=None):
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
if alarm.type == 'notification':
message = event.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': event.id,
'title': event.name,
'message': message,
'timer': delta,
'notify_at': alert['notify_at'].strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
class calendar_alarm(osv.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
def _get_duration(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for alarm in self.browse(cr, uid, ids, context=context):
if alarm.interval == "minutes":
res[alarm.id] = alarm.duration
elif alarm.interval == "hours":
res[alarm.id] = alarm.duration * 60
elif alarm.interval == "days":
res[alarm.id] = alarm.duration * 60 * 24
else:
res[alarm.id] = 0
return res
_columns = {
'name': fields.char('Name', required=True),
'type': fields.selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True),
'duration': fields.integer('Amount', required=True),
'interval': fields.selection([('minutes', 'Minutes'), ('hours', 'Hours'), ('days', 'Days')], 'Unit', required=True),
'duration_minutes': fields.function(_get_duration, type='integer', string='Duration in minutes', store=True),
}
_defaults = {
'type': 'notification',
'duration': 1,
'interval': 'hours',
}
def _update_cron(self, cr, uid, context=None):
try:
cron = self.pool['ir.model.data'].get_object(
cr, SUPERUSER_ID, 'calendar', 'ir_cron_scheduler_alarm', context=context)
except ValueError:
return False
return cron.toggle(model=self._name, domain=[('type', '=', 'email')])
def create(self, cr, uid, values, context=None):
res = super(calendar_alarm, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(calendar_alarm, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(calendar_alarm, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class ir_values(osv.Model):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model,
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model,
meta, context, res_id_req, without_user, key2_req)
class ir_model(osv.Model):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
new_ids = isinstance(ids, (basestring, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, context=context, load=load)
if data:
for val in data:
val['id'] = calendar_id2real_id(val['id'])
return isinstance(ids, (basestring, int, long)) and data[0] or data
original_exp_report = openerp.service.report.exp_report
def exp_report(db, uid, object, ids, datas=None, context=None):
"""
Export Report
"""
if object == 'printscreen.list':
original_exp_report(db, uid, object, ids, datas, context)
new_ids = []
for id in ids:
new_ids.append(calendar_id2real_id(id))
if datas.get('id', False):
datas['id'] = calendar_id2real_id(datas['id'])
return original_exp_report(db, uid, object, new_ids, datas, context)
openerp.service.report.exp_report = exp_report
class calendar_event_type(osv.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_name = 'calendar.event'
_description = "Event"
_order = "id desc"
_inherit = ["mail.thread", "ir.needaction_mixin"]
def do_run_scheduler(self, cr, uid, id, context=None):
self.pool['calendar.alarm_manager'].get_next_mail(cr, uid, context=context)
def get_recurrent_date_by_event(self, cr, uid, event, context=None):
"""Get recurrent dates based on Rule string and all event where recurrent_id is child
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
if context is None:
context = {}
timezone = pytz.timezone(context.get('tz') or 'UTC')
startdate = pytz.UTC.localize(datetime.strptime(event.start, DEFAULT_SERVER_DATETIME_FORMAT)) # Add "+hh:mm" timezone
if not startdate:
startdate = datetime.now()
## Convert the start date to saved timezone (or context tz) as it'll
## define the correct hour/day asked by the user to repeat for recurrence.
startdate = startdate.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(event.rrule), dtstart=startdate, forceset=True)
ids_depending = self.search(cr, uid, [('recurrent_id', '=', event.id), '|', ('active', '=', False), ('active', '=', True)], context=context)
all_events = self.browse(cr, uid, ids_depending, context=context)
for ev in all_events:
rset1._exdate.append(todate(ev.recurrent_id_date))
return [d.astimezone(pytz.UTC) for d in rset1]
def _get_recurrency_end_date(self, cr, uid, id, context=None):
data = self.read(cr, uid, id, ['final_date', 'recurrency', 'rrule_type', 'count', 'end_type', 'stop'], context=context)
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
final_date = data.get('final_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'stop']):
count = data['count'] + 1
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = datetime.strptime(data['stop'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
return deadline + relativedelta(**{delay: count * mult})
return final_date
def _find_my_attendee(self, cr, uid, meeting_ids, context=None):
"""
Return the first attendee where the user connected has been invited from all the meeting_ids in parameters
"""
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
for meeting_id in meeting_ids:
for attendee in self.browse(cr, uid, meeting_id, context).attendee_ids:
if user.partner_id.id == attendee.partner_id.id:
return attendee
return False
def get_date_formats(self, cr, uid, context):
lang = context.get("lang")
res_lang = self.pool.get('res.lang')
lang_params = {}
if lang:
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["date_format", "time_format"])
# formats will be used for str{f,p}time() which do not support unicode in Python 2, coerce to str
format_date = lang_params.get("date_format", '%B-%d-%Y').encode('utf-8')
format_time = lang_params.get("time_format", '%I-%M %p').encode('utf-8')
return (format_date, format_time)
def get_display_time_tz(self, cr, uid, ids, tz=False, context=None):
context = dict(context or {})
if tz:
context["tz"] = tz
ev = self.browse(cr, uid, ids, context=context)[0]
return self._get_display_time(cr, uid, ev.start, ev.stop, ev.duration, ev.allday, context=context)
def _get_display_time(self, cr, uid, start, stop, zduration, zallday, context=None):
"""
Return date and time (from to from) based on duration with timezone in string :
eg.
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
context = dict(context or {})
tz = context.get('tz', False)
if not tz: # tz can have a value False, so dont do it in the default value of get !
context['tz'] = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
tz = context['tz']
tz = tools.ustr(tz).encode('utf-8') # make safe for str{p,f}time()
format_date, format_time = self.get_date_formats(cr, uid, context=context)
date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
date_deadline = fields.datetime.context_timestamp(cr, uid, datetime.strptime(stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
event_date = date.strftime(format_date)
display_time = date.strftime(format_time)
if zallday:
time = _("AllDay , %s") % (event_date)
elif zduration < 24:
duration = date + timedelta(hours=zduration)
time = ("%s at (%s To %s) (%s)") % (event_date, display_time, duration.strftime(format_time), tz)
else:
time = ("%s at %s To\n %s at %s (%s)") % (event_date, display_time, date_deadline.strftime(format_date), date_deadline.strftime(format_time), tz)
return time
def _compute(self, cr, uid, ids, fields, arg, context=None):
res = {}
if not isinstance(fields, list):
fields = [fields]
for meeting in self.browse(cr, uid, ids, context=context):
meeting_data = {}
res[meeting.id] = meeting_data
attendee = self._find_my_attendee(cr, uid, [meeting.id], context)
for field in fields:
if field == 'is_attendee':
meeting_data[field] = bool(attendee)
elif field == 'attendee_status':
meeting_data[field] = attendee.state if attendee else 'needsAction'
elif field == 'display_time':
meeting_data[field] = self._get_display_time(cr, uid, meeting.start, meeting.stop, meeting.duration, meeting.allday, context=context)
elif field == "display_start":
meeting_data[field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'start':
meeting_data[field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'stop':
meeting_data[field] = meeting.stop_date if meeting.allday else meeting.stop_datetime
return res
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
#read these fields as SUPERUSER because if the record is private a normal search could raise an error
events = self.read(cr, SUPERUSER_ID, ids,
['id', 'byday', 'recurrency', 'final_date', 'rrule_type', 'month_by',
'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa',
'su', 'day', 'week_list'], context=context)
for event in events:
if event['recurrency']:
result[event['id']] = self.compute_rule_string(event)
else:
result[event['id']] = ''
return result
# retro compatibility function
def _rrule_write(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=context)
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = event.start
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
self.write(cr, uid, ids, data, context=context)
return True
def _set_date(self, cr, uid, values, id=False, context=None):
if context is None:
context = {}
if values.get('start_datetime') or values.get('start_date') or values.get('start') \
or values.get('stop_datetime') or values.get('stop_date') or values.get('stop'):
allday = values.get("allday", None)
event = self.browse(cr, uid, id, context=context)
if allday is None:
if id:
allday = event.allday
else:
allday = False
_logger.warning("Calendar - All day is not specified, arbitrarily set to False")
#raise osv.except_osv(_('Error!'), ("Need to know if it's an allday or not..."))
key = "date" if allday else "datetime"
notkey = "datetime" if allday else "date"
for fld in ('start', 'stop'):
if values.get('%s_%s' % (fld, key)) or values.get(fld):
values['%s_%s' % (fld, key)] = values.get('%s_%s' % (fld, key)) or values.get(fld)
values['%s_%s' % (fld, notkey)] = None
if fld not in values.keys():
values[fld] = values['%s_%s' % (fld, key)]
diff = False
if allday and (values.get('stop_date') or values.get('start_date')):
stop_date = values.get('stop_date') or event.stop_date
start_date = values.get('start_date') or event.start_date
if stop_date and start_date:
diff = openerp.fields.Date.from_string(stop_date) - openerp.fields.Date.from_string(start_date)
elif values.get('stop_datetime') or values.get('start_datetime'):
stop_datetime = values.get('stop_datetime') or event.stop_datetime
start_datetime = values.get('start_datetime') or event.start_datetime
if stop_datetime and start_datetime:
diff = openerp.fields.Datetime.from_string(stop_datetime) - openerp.fields.Datetime.from_string(start_datetime)
if diff:
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
values['duration'] = round(duration, 2)
_track = {
'location': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
'start': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'id': fields.integer('ID', readonly=True),
'state': fields.selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange'),
'name': fields.char('Meeting Subject', required=True, states={'done': [('readonly', True)]}),
'is_attendee': fields.function(_compute, string='Attendee', type="boolean", multi='attendee'),
'attendee_status': fields.function(_compute, string='Attendee Status', type="selection", selection=calendar_attendee.STATE_SELECTION, multi='attendee'),
'display_time': fields.function(_compute, string='Event Time', type="char", multi='attendee'),
'display_start': fields.function(_compute, string='Date', type="char", multi='attendee', store=True),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'start': fields.function(_compute, string='Calculated start', type="datetime", multi='attendee', store=True, required=True),
'stop': fields.function(_compute, string='Calculated stop', type="datetime", multi='attendee', store=True, required=True),
'start_date': fields.date('Start Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'start_datetime': fields.datetime('Start DateTime', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_date': fields.date('End Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_datetime': fields.datetime('End Datetime', states={'done': [('readonly', True)]}, track_visibility='onchange'), # old date_deadline
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), ('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', help="Location of Event", track_visibility='onchange', states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}),
# RECURRENCE FIELD
'rrule': fields.function(_get_rulestring, type='char', fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)')], 'Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval"),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'end_type': fields.selection([('count', 'Number of repetitions'), ('end_date', 'End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'month_by': fields.selection([('date', 'Date of month'), ('day', 'Day of month')], 'Option', oldname='select1'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'final_date': fields.date('Repeat Until'), # The last event of a recurrence
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'color_partner_id': fields.related('user_id', 'partner_id', 'id', type="integer", string="colorize", store=False), # Color of creator
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the event alarm information without removing it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False),
}
def _get_default_partners(self, cr, uid, ctx=None):
ret = [self.pool['res.users'].browse(cr, uid, uid, context=ctx).partner_id.id]
active_id = ctx.get('active_id')
if ctx.get('active_model') == 'res.partner' and active_id:
if active_id not in ret:
ret.append(active_id)
return ret
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'allday': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': _get_default_partners,
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.start_datetime and event.stop_datetime < event.start_datetime:
return False
if event.start_date and event.stop_date < event.start_date:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['start_datetime', 'stop_datetime', 'start_date', 'stop_date'])
]
def onchange_allday(self, cr, uid, ids, start=False, end=False, starttime=False, endtime=False, startdatetime=False, enddatetime=False, checkallday=False, context=None):
value = {}
if not ((starttime and endtime) or (start and end)): # At first intialize, we have not datetime
return value
if checkallday: # from datetime to date
startdatetime = startdatetime or start
if startdatetime:
start = datetime.strptime(startdatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
enddatetime = enddatetime or end
if enddatetime:
end = datetime.strptime(enddatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
else: # from date to datetime
user = self.pool['res.users'].browse(cr, uid, uid, context)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
if starttime:
start = openerp.fields.Datetime.from_string(starttime)
startdate = tz.localize(start) # Add "+hh:mm" timezone
startdate = startdate.replace(hour=8) # Set 8 AM in localtime
startdate = startdate.astimezone(pytz.utc) # Convert to UTC
value['start_datetime'] = datetime.strftime(startdate, DEFAULT_SERVER_DATETIME_FORMAT)
elif start:
value['start_datetime'] = start
if endtime:
end = datetime.strptime(endtime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
enddate = tz.localize(end).replace(hour=18).astimezone(pytz.utc)
value['stop_datetime'] = datetime.strftime(enddate, DEFAULT_SERVER_DATETIME_FORMAT)
elif end:
value['stop_datetime'] = end
return {'value': value}
def onchange_dates(self, cr, uid, ids, fromtype, start=False, end=False, checkallday=False, allday=False, context=None):
"""Returns duration and end date based on values passed
@param ids: List of calendar event's IDs.
"""
value = {}
if checkallday != allday:
return value
value['allday'] = checkallday # Force to be rewrited
if allday:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start_datetime'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop_datetime'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
else:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context=None):
if context is None:
context = {}
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if not context.get('no_email'):
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(all_partner_ids))
attendee_ids_to_remove = []
if partner_ids_to_remove:
attendee_ids_to_remove = self.pool["calendar.attendee"].search(cr, uid, [('partner_id.id', 'in', partner_ids_to_remove), ('event_id.id', '=', event.id)], context=context)
if attendee_ids_to_remove:
self.pool['calendar.attendee'].unlink(cr, uid, attendee_ids_to_remove, context)
res[event.id] = {
'new_attendee_ids': new_attendees,
'old_attendee_ids': all_attendee_ids,
'removed_attendee_ids': attendee_ids_to_remove
}
return res
def get_search_fields(self, browse_event, order_fields, r_date=None):
sort_fields = {}
for ord in order_fields:
if ord == 'id' and r_date:
sort_fields[ord] = '%s-%s' % (browse_event[ord], r_date.strftime("%Y%m%d%H%M%S"))
else:
sort_fields[ord] = browse_event[ord]
if type(browse_event[ord]) is openerp.osv.orm.browse_record:
name_get = browse_event[ord].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[ord] = name_get[0][1]
if r_date:
sort_fields['sort_start'] = r_date.strftime("%Y%m%d%H%M%S")
else:
sort_fields['sort_start'] = browse_event['display_start'].replace(' ', '').replace('-', '')
return sort_fields
def get_recurrent_ids(self, cr, uid, event_id, domain, order=None, context=None):
"""Gives virtual event ids for recurring events
This method gives ids of dates that comes between start date and end date of calendar views
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted
"""
if not context:
context = {}
if isinstance(event_id, (basestring, int, long)):
ids_to_browse = [event_id] # keep select for return
else:
ids_to_browse = event_id
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for ev in self.browse(cr, uid, ids_to_browse, context=context):
if not ev.recurrency or not ev.rrule:
result.append(ev.id)
result_data.append(self.get_search_fields(ev, order_fields))
continue
rdates = self.get_recurrent_date_by_event(cr, uid, ev, context=context)
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
for arg in domain:
if str(arg[0]) in ('start', 'stop', 'final_date'):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d') == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d') > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d') < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d') >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d') <= arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(self.get_search_fields(ev, order_fields, r_date=r_date))
if order_fields:
uniq = lambda it: collections.OrderedDict((id(x), x) for x in it).values()
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
return 0
sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')]
sort_params = uniq([comp if comp not in ['start', 'start_date', 'start_datetime'] else 'sort_start' for comp in sort_params])
sort_params = uniq([comp if comp not in ['-start', '-start_date', '-start_datetime'] else '-sort_start' for comp in sort_params])
comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params]
ids = [r['id'] for r in sorted(result_data, cmp=comparer)]
if isinstance(event_id, (basestring, int, long)):
return ids and ids[0] or False
else:
return ids
def compute_rule_string(self, data):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param data: dictionary of freq and interval value
@return: string containing recurring rule (empty if no rule)
"""
if data['interval'] and data['interval'] < 0:
raise osv.except_osv(_('warning!'), _('interval cannot be negative.'))
if data['count'] and data['count'] <= 0:
raise osv.except_osv(_('warning!'), _('count cannot be negative or 0.'))
def get_week_string(freq, data):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: data.get(x) and x in weekdays, data))
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, data):
if freq == 'monthly':
if data.get('month_by') == 'date' and (data.get('day') < 1 or data.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select a proper day of the month."))
if data.get('month_by') == 'day': # Eg : Second Monday of the month
return ';BYDAY=' + data.get('byday') + data.get('week_list')
elif data.get('month_by') == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(data.get('day'))
return ''
def get_end_date(data):
if data.get('final_date'):
data['end_date_new'] = ''.join((re.compile('\d')).findall(data.get('final_date'))) + 'T235959Z'
return (data.get('end_type') == 'count' and (';COUNT=' + str(data.get('count'))) or '') +\
((data.get('end_date_new') and data.get('end_type') == 'end_date' and (';UNTIL=' + data.get('end_date_new'))) or '')
freq = data.get('rrule_type', False) # day/week/month/year
res = ''
if freq:
interval_srting = data.get('interval') and (';INTERVAL=' + str(data.get('interval'))) or ''
res = 'FREQ=' + freq.upper() + get_week_string(freq, data) + interval_srting + get_end_date(data) + get_month_string(freq, data)
return res
def _get_empty_rrule_data(self):
return {
'byday': False,
'recurrency': False,
'final_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, DEFAULT_SERVER_DATETIME_FORMAT))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['final_date'] = r._until and r._until.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#repeat weekly
if r._byweekday:
for i in xrange(0, 7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly by nweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = str(r._bynweekday[0][1])
data['month_by'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['month_by'] = 'date'
data['rrule_type'] = 'monthly'
#repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('final_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
res = {}
for virtual_id in ids:
real_id = calendar_id2real_id(virtual_id)
result = super(calendar_event, self).message_get_subscription_data(cr, uid, [real_id], user_pid=None, context=context)
res[virtual_id] = result[real_id]
return res
def onchange_partner_ids(self, cr, uid, ids, value, context=None):
""" The basic purpose of this method is to check that destination partners
effectively have email addresses. Otherwise a warning is thrown.
:param value: value format: [[6, 0, [3, 4]]]
"""
res = {'value': {}}
if not value or not value[0] or not value[0][0] == 6:
return
res.update(self.check_partners_email(cr, uid, value[0][2], context=context))
return res
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool['res.partner'].browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following contacts have no email address :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Email addresses not found'),
'message': warning_msg,
}}
# shows events of the day for this user
def _needaction_domain_get(self, cr, uid, context=None):
return [
('stop', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')),
('start', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 00:00:00')),
('user_id', '=', uid),
]
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
if isinstance(thread_id, basestring):
thread_id = get_real_ids(thread_id)
if context.get('default_date'):
del context['default_date']
return super(calendar_event, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, **kwargs)
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
return super(calendar_event, self).message_subscribe(cr, uid, get_real_ids(ids), partner_ids, subtype_ids=subtype_ids, context=context)
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
return super(calendar_event, self).message_unsubscribe(cr, uid, get_real_ids(ids), partner_ids, context=context)
def do_sendmail(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if current_user.email:
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], email_from=current_user.email, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee(s)"), subtype="calendar.subtype_invitation", context=context)
return
def get_attendee(self, cr, uid, meeting_id, context=None):
# Used for view in controller
invitation = {'meeting': {}, 'attendee': []}
meeting = self.browse(cr, uid, int(meeting_id), context=context)
invitation['meeting'] = {
'event': meeting.name,
'where': meeting.location,
'when': meeting.display_time
}
for attendee in meeting.attendee_ids:
invitation['attendee'].append({'name': attendee.cn, 'status': attendee.state})
return invitation
def get_interval(self, cr, uid, ids, date, interval, tz=None, context=None):
''' Format and localize some dates to be used in email templates
:param string date: date/time to be formatted
:param string interval: Among 'day', 'month', 'dayname' and 'time' indicating the desired formatting
:param string tz: Timezone indicator (optional)
:return unicode: Formatted date or time (as unicode string, to prevent jinja2 crash)
(Function used only in calendar_event_data.xml) '''
date = openerp.fields.Datetime.from_string(date)
if tz:
timezone = pytz.timezone(tz or 'UTC')
date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone)
if interval == 'day':
# Day number (1-31)
res = unicode(date.day)
elif interval == 'month':
# Localized month name and year
res = babel.dates.format_date(date=date, format='MMMM y', locale=context.get('lang', 'en_US'))
elif interval == 'dayname':
# Localized day name
res = babel.dates.format_date(date=date, format='EEEE', locale=context.get('lang', 'en_US'))
elif interval == 'time':
# Localized time
dummy, format_time = self.get_date_formats(cr, uid, context=context)
res = tools.ustr(date.strftime(format_time + " %Z"))
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('mymeetings', False):
partner_id = self.pool['res.users'].browse(cr, uid, uid, context).partner_id.id
args += [('partner_ids', 'in', [partner_id])]
new_args = []
for arg in args:
new_arg = arg
if arg[0] in ('start_date', 'start_datetime', 'start',) and arg[1] == ">=":
if context.get('virtual_id', True):
new_args += ['|', '&', ('recurrency', '=', 1), ('final_date', arg[1], arg[2])]
elif arg[0] == "id":
new_id = get_real_ids(arg[2])
new_arg = (arg[0], arg[1], new_id)
new_args.append(new_arg)
if not context.get('virtual_id', True):
return super(calendar_event, self).search(cr, uid, new_args, offset=offset, limit=limit, order=order, count=count, context=context)
# offset, limit, order and count must be treated separately as we may need to deal with virtual ids
res = super(calendar_event, self).search(cr, uid, new_args, offset=0, limit=0, order=None, context=context, count=False)
res = self.get_recurrent_ids(cr, uid, res, args, order=order, context=context)
if count:
return len(res)
elif limit:
return res[offset: offset + limit]
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
self._set_date(cr, uid, default, id=default.get('id'), context=context)
return super(calendar_event, self).copy(cr, uid, calendar_id2real_id(id), default, context)
def _detach_one_event(self, cr, uid, id, values=dict(), context=None):
real_event_id = calendar_id2real_id(id)
data = self.read(cr, uid, id, ['allday', 'start', 'stop', 'rrule', 'duration'])
data['start_date' if data['allday'] else 'start_datetime'] = data['start']
data['stop_date' if data['allday'] else 'stop_datetime'] = data['stop']
if data.get('rrule'):
data.update(
values,
recurrent_id=real_event_id,
recurrent_id_date=data.get('start'),
rrule_type=False,
rrule='',
recurrency=False,
final_date=datetime.strptime(data.get('start'), DEFAULT_SERVER_DATETIME_FORMAT if data['allday'] else DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=values.get('duration', False) or data.get('duration'))
)
#do not copy the id
if data.get('id'):
del(data['id'])
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
return new_id
def open_after_detach_event(self, cr, uid, ids, context=None):
if context is None:
context = {}
new_id = self._detach_one_event(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'calendar.event',
'view_mode': 'form',
'res_id': new_id,
'target': 'current',
'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}}
}
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
for arg in args:
if arg[0] == 'id':
for n, calendar_id in enumerate(arg[2]):
if isinstance(calendar_id, basestring):
arg[2][n] = calendar_id.split('-')[0]
return super(calendar_event, self)._name_search(cr, user, name=name, args=args, operator=operator, context=context, limit=limit, name_get_uid=name_get_uid)
def write(self, cr, uid, ids, values, context=None):
def _only_changes_to_apply_on_real_ids(field_names):
''' return True if changes are only to be made on the real ids'''
for field in field_names:
if field in ['start', 'start_date', 'start_datetime', 'stop', 'stop_date', 'stop_datetime', 'active']:
return True
return False
if not isinstance(ids, (tuple, list)):
ids = [ids]
context = context or {}
self._set_date(cr, uid, values, id=ids[0], context=context)
for one_ids in ids:
if isinstance(one_ids, (basestring, int, long)):
if len(str(one_ids).split('-')) == 1:
ids = [int(one_ids)]
else:
ids = [one_ids]
res = False
new_id = False
# Special write of complex IDS
for event_id in list(ids):
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = calendar_id2real_id(event_id)
# if we are setting the recurrency flag to False or if we are only changing fields that
# should be only updated on the real ID and not on the virtual (like message_follower_ids):
# then set real ids to be updated.
if not values.get('recurrency', True) or not _only_changes_to_apply_on_real_ids(values.keys()):
ids.append(real_event_id)
continue
else:
data = self.read(cr, uid, event_id, ['start', 'stop', 'rrule', 'duration'])
if data.get('rrule'):
new_id = self._detach_one_event(cr, uid, event_id, values, context=None)
res = super(calendar_event, self).write(cr, uid, [int(event_id) for event_id in ids], values, context=context)
# set end_date for calendar searching
if values.get('recurrency', True) and values.get('end_type', 'count') in ('count', unicode('count')) and \
(values.get('rrule_type') or values.get('count') or values.get('start') or values.get('stop')):
for id in ids:
final_date = self._get_recurrency_end_date(cr, uid, id, context=context)
super(calendar_event, self).write(cr, uid, [id], {'final_date': final_date}, context=context)
attendees_create = False
if values.get('partner_ids', False):
attendees_create = self.create_attendees(cr, uid, ids, context)
if (values.get('start_date') or values.get('start_datetime', False)) and values.get('active', True):
the_id = new_id or (ids and int(ids[0]))
if the_id:
if attendees_create:
attendees_create = attendees_create[the_id]
mail_to_ids = list(set(attendees_create['old_attendee_ids']) - set(attendees_create['removed_attendee_ids']))
else:
mail_to_ids = [att.id for att in self.browse(cr, uid, the_id, context=context).attendee_ids]
if mail_to_ids:
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, mail_to_ids, template_xmlid='calendar_template_meeting_changedate', email_from=current_user.email, context=context):
self.message_post(cr, uid, the_id, body=_("A email has been send to specify that the date has been changed !"), subtype="calendar.subtype_invitation", context=context)
return res or True and False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
self._set_date(cr, uid, vals, id=False, context=context)
if not 'user_id' in vals: # Else bug with quick_create when we are filter on an other user
vals['user_id'] = uid
res = super(calendar_event, self).create(cr, uid, vals, context=context)
final_date = self._get_recurrency_end_date(cr, uid, res, context=context)
self.write(cr, uid, [res], {'final_date': final_date}, context=context)
self.create_attendees(cr, uid, [res], context=context)
return res
def export_data(self, cr, uid, ids, *args, **kwargs):
""" Override to convert virtual ids to ids """
real_ids = []
for real_id in get_real_ids(ids):
if real_id not in real_ids:
real_ids.append(real_id)
return super(calendar_event, self).export_data(cr, uid, real_ids, *args, **kwargs)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
context = dict(context or {})
if 'date' in groupby:
raise osv.except_osv(_('Warning!'), _('Group by date is not supported, use the calendar view instead.'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class', 'user_id', 'duration', 'allday', 'start', 'start_date', 'start_datetime', 'rrule')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (basestring, int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: (x, calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid, [real_id for calendar_id, real_id in select], fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) > 0 and res.get('duration') or 1)
if not isinstance(ls, (basestring, int, long)) and len(ls) >= 2:
res['start'] = ls[1]
res['stop'] = ls[2]
if res['allday']:
res['start_date'] = ls[1]
res['stop_date'] = ls[2]
else:
res['start_datetime'] = ls[1]
res['stop_datetime'] = ls[2]
if 'display_time' in fields:
res['display_time'] = self._get_display_time(cr, uid, ls[1], ls[2], res['duration'], res['allday'], context=context)
res['id'] = calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id']
if user_id == uid:
continue
if r['class'] == 'private':
for f in r.keys():
if f not in ('id', 'allday', 'start', 'stop', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date', 'rrule'):
if isinstance(r[f], list):
r[f] = []
else:
r[f] = False
if f == 'name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and (fields and (k not in fields)):
del r[k]
if isinstance(ids, (basestring, int, long)):
return result and result[0] or False
return result
def unlink(self, cr, uid, ids, can_be_deleted=True, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
ids_to_exclure = []
ids_to_unlink = []
for event_id in ids:
if can_be_deleted and len(str(event_id).split('-')) == 1: # if ID REAL
if self.browse(cr, uid, int(event_id), context).recurrent_id:
ids_to_exclure.append(event_id)
else:
ids_to_unlink.append(int(event_id))
else:
ids_to_exclure.append(event_id)
if ids_to_unlink:
res = super(calendar_event, self).unlink(cr, uid, ids_to_unlink, context=context)
if ids_to_exclure:
for id_to_exclure in ids_to_exclure:
res = self.write(cr, uid, id_to_exclure, {'active': False}, context=context)
return res
class mail_message(osv.Model):
_inherit = "mail.message"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], basestring):
args[index][2] = get_real_ids(args[index][2])
return super(mail_message, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
if context is None:
context = {}
if doc_model == 'calendar.event':
order = context.get('order', self._order)
for virtual_id in self.pool[doc_model].get_recurrent_ids(cr, uid, doc_dict.keys(), [], order=order, context=context):
doc_dict.setdefault(virtual_id, doc_dict[get_real_ids(virtual_id)])
return super(mail_message, self)._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
class ir_attachment(osv.Model):
_inherit = "ir.attachment"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], basestring):
args[index][2] = get_real_ids(args[index][2])
return super(ir_attachment, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def write(self, cr, uid, ids, vals, context=None):
'''
when posting an attachment (new or not), convert the virtual ids in real ids.
'''
if isinstance(vals.get('res_id'), basestring):
vals['res_id'] = get_real_ids(vals.get('res_id'))
return super(ir_attachment, self).write(cr, uid, ids, vals, context=context)
class ir_http(osv.AbstractModel):
_inherit = 'ir.http'
def _auth_method_calendar(self):
token = request.params['token']
db = request.params['db']
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
error_message = False
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)])
if not attendee_id:
error_message = """Invalid Invitation Token."""
elif request.session.uid and request.session.login != 'anonymous':
# if valid session but user is not match
attendee = attendee_pool.browse(cr, openerp.SUPERUSER_ID, attendee_id[0])
user = registry.get('res.users').browse(cr, openerp.SUPERUSER_ID, request.session.uid)
if attendee.partner_id.id != user.partner_id.id:
error_message = """Invitation cannot be forwarded via email. This event/meeting belongs to %s and you are logged in as %s. Please ask organizer to add you.""" % (attendee.email, user.email)
if error_message:
raise BadRequest(error_message)
return True
class invite_wizard(osv.osv_memory):
_inherit = 'mail.wizard.invite'
def default_get(self, cr, uid, fields, context=None):
'''
in case someone clicked on 'invite others' wizard in the followers widget, transform virtual ids in real ids
'''
if 'default_res_id' in context:
context = dict(context, default_res_id=get_real_ids(context['default_res_id']))
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'res_id' in result:
result['res_id'] = get_real_ids(result['res_id'])
return result
| agpl-3.0 |
bastik/youtube-dl | youtube_dl/extractor/francetv.py | 44 | 12637 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
parse_duration,
determine_ext,
)
from .dailymotion import DailymotionCloudIE
class FranceTVBaseInfoExtractor(InfoExtractor):
def _extract_video(self, video_id, catalogue):
info = self._download_json(
'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=%s&catalogue=%s'
% (video_id, catalogue),
video_id, 'Downloading video JSON')
if info.get('status') == 'NOK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, info['message']), expected=True)
allowed_countries = info['videos'][0].get('geoblocage')
if allowed_countries:
georestricted = True
geo_info = self._download_json(
'http://geo.francetv.fr/ws/edgescape.json', video_id,
'Downloading geo restriction info')
country = geo_info['reponse']['geo_info']['country_code']
if country not in allowed_countries:
raise ExtractorError(
'The video is not available from your location',
expected=True)
else:
georestricted = False
formats = []
for video in info['videos']:
if video['statut'] != 'ONLINE':
continue
video_url = video['url']
if not video_url:
continue
format_id = video['format']
ext = determine_ext(video_url)
if ext == 'f4m':
if georestricted:
# See https://github.com/rg3/youtube-dl/issues/3963
# m3u8 urls work fine
continue
f4m_url = self._download_webpage(
'http://hdfauth.francetv.fr/esi/TA?url=%s' % video_url,
video_id, 'Downloading f4m manifest token', fatal=False)
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, 1, format_id))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4', m3u8_id=format_id))
elif video_url.startswith('rtmp'):
formats.append({
'url': video_url,
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
'preference': 1,
})
else:
formats.append({
'url': video_url,
'format_id': format_id,
'preference': -1,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': info['titre'],
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'formats': formats,
}
class PluzzIE(FranceTVBaseInfoExtractor):
IE_NAME = 'pluzz.francetv.fr'
_VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
# Can't use tests, videos expire in 7 days
def _real_extract(self, url):
title = re.match(self._VALID_URL, url).group(1)
webpage = self._download_webpage(url, title)
video_id = self._search_regex(
r'data-diffusion="(\d+)"', webpage, 'ID')
return self._extract_video(video_id, 'Pluzz')
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetvinfo.fr'
_VALID_URL = r'https?://(?:www|mobile)\.francetvinfo\.fr/.*/(?P<title>.+)\.html'
_TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'info_dict': {
'id': '84981923',
'ext': 'flv',
'title': 'Soir 3',
'upload_date': '20130826',
'timestamp': 1377548400,
},
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
'info_dict': {
'id': 'EV_20019',
'ext': 'mp4',
'title': 'Débat des candidats à la Commission européenne',
'description': 'Débat des candidats à la Commission européenne',
},
'params': {
'skip_download': 'HLS (reqires ffmpeg)'
},
'skip': 'Ce direct est terminé et sera disponible en rattrapage dans quelques minutes.',
}, {
'url': 'http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html',
'md5': 'f485bda6e185e7d15dbc69b72bae993e',
'info_dict': {
'id': '556e03339473995ee145930c',
'ext': 'mp4',
'title': 'Les entreprises familiales : le secret de la réussite',
'thumbnail': 're:^https?://.*\.jpe?g$',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, 'DailymotionCloud')
video_id, catalogue = self._search_regex(
r'id-video=([^@]+@[^"]+)', webpage, 'video id').split('@')
return self._extract_video(video_id, catalogue)
class FranceTVIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetv'
IE_DESC = 'France 2, 3, 4, 5 and Ô'
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?france[2345o]\.fr/
(?:
emissions/[^/]+/(?:videos|diffusions)|
emission/[^/]+|
videos|
jt
)
/|
embed\.francetv\.fr/\?ue=
)
(?P<id>[^/?]+)
'''
_TESTS = [
# france2
{
'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
'md5': 'c03fc87cb85429ffd55df32b9fc05523',
'info_dict': {
'id': '109169362',
'ext': 'flv',
'title': '13h15, le dimanche...',
'description': 'md5:9a0932bb465f22d377a449be9d1a0ff7',
'upload_date': '20140914',
'timestamp': 1410693600,
},
},
# france3
{
'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
'md5': '679bb8f8921f8623bd658fa2f8364da0',
'info_dict': {
'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
'ext': 'mp4',
'title': 'Le scandale du prix des médicaments',
'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce',
'upload_date': '20131113',
'timestamp': 1384380000,
},
},
# france4
{
'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
'md5': 'a182bf8d2c43d88d46ec48fbdd260c1c',
'info_dict': {
'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
'ext': 'mp4',
'title': 'Hero Corp Making of - Extrait 1',
'description': 'md5:c87d54871b1790679aec1197e73d650a',
'upload_date': '20131106',
'timestamp': 1383766500,
},
},
# france5
{
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
'md5': '78f0f4064f9074438e660785bbf2c5d9',
'info_dict': {
'id': '108961659',
'ext': 'flv',
'title': 'C à dire ?!',
'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
'upload_date': '20140915',
'timestamp': 1410795000,
},
},
# franceo
{
'url': 'http://www.franceo.fr/jt/info-soir/18-07-2015',
'md5': '47d5816d3b24351cdce512ad7ab31da8',
'info_dict': {
'id': '125377621',
'ext': 'flv',
'title': 'Infô soir',
'description': 'md5:01b8c6915a3d93d8bbbd692651714309',
'upload_date': '20150718',
'timestamp': 1437241200,
'duration': 414,
},
},
{
# francetv embed
'url': 'http://embed.francetv.fr/?ue=8d7d3da1e3047c42ade5a5d7dfd3fc87',
'info_dict': {
'id': 'EV_30231',
'ext': 'flv',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
},
{
'url': 'http://www.france4.fr/emission/highlander/diffusion-du-17-07-2015-04h05',
'only_matching': True,
},
{
'url': 'http://www.franceo.fr/videos/125377617',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id, catalogue = self._html_search_regex(
r'href="http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._extract_video(video_id, catalogue)
class GenerationQuoiIE(InfoExtractor):
IE_NAME = 'france2.fr:generation-quoi'
_VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://generation-quoi.france2.fr/portrait/garde-a-vous',
'info_dict': {
'id': 'k7FJX8VBcvvLmX4wA5Q',
'ext': 'mp4',
'title': 'Génération Quoi - Garde à Vous',
'uploader': 'Génération Quoi',
},
'params': {
# It uses Dailymotion
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
info_url = compat_urlparse.urljoin(url, '/medias/video/%s.json' % display_id)
info_json = self._download_webpage(info_url, display_id)
info = json.loads(info_json)
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
ie='Dailymotion')
class CultureboxIE(FranceTVBaseInfoExtractor):
IE_NAME = 'culturebox.francetvinfo.fr'
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
_TEST = {
'url': 'http://culturebox.francetvinfo.fr/live/musique/musique-classique/le-livre-vermeil-de-montserrat-a-la-cathedrale-delne-214511',
'md5': '9b88dc156781c4dbebd4c3e066e0b1d6',
'info_dict': {
'id': 'EV_50111',
'ext': 'flv',
'title': "Le Livre Vermeil de Montserrat à la Cathédrale d'Elne",
'description': 'md5:f8a4ad202e8fe533e2c493cc12e739d9',
'upload_date': '20150320',
'timestamp': 1426892400,
'duration': 2760.9,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
if ">Ce live n'est plus disponible en replay<" in webpage:
raise ExtractorError('Video %s is not available' % name, expected=True)
video_id, catalogue = self._search_regex(
r'"http://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
return self._extract_video(video_id, catalogue)
| unlicense |
TeamHG-Memex/agnostic | tests/test_sqlite.py | 1 | 2220 | import os
import shutil
import sqlite3
import tempfile
import unittest
from tests.abstract import AbstractDatabaseTest
class TestSqlLite(AbstractDatabaseTest, unittest.TestCase):
''' Integration tests for SQLite '''
def __init__(self, *args, **kwargs):
''' Override super class: set param style. '''
super().__init__(*args, **kwargs)
self._param = '?'
@property
def db_type(self):
''' The database type as a string. '''
return 'sqlite'
@property
def default_db(self):
''' We don't drop databases in this class, so this isn't used.. '''
raise NotImplemented()
def connect_db(self, user, password, database):
''' Return a connection to the specified database. '''
db = sqlite3.connect(database)
db.isolation_level = None # Equivalent to autocommit
return db
def get_credentials_from_env(self):
'''
Override super class: SQLite does not use credentials, so we stub this
out.
'''
return None, None
def get_base_command(self):
''' Override super class: omit non-SQLite options. '''
command = [
'-t', self.db_type,
'-d', self._test_db,
]
return command
def setUp(self):
''' Override super class: don't need to drop or create database, just
create a temp file and delete it later. '''
_, self._test_db = tempfile.mkstemp(suffix='.db')
def tearDown(self):
''' Remove temporary DB file. '''
# os.unlink(self._test_db)
def table_columns(self, cursor, database, table_name):
''' Return a list of columns in the specified table. '''
sql = "pragma table_info('{}')".format(table_name)
cursor.execute(sql)
columns = [row[1] for row in cursor.fetchall()]
return columns
def table_exists(self, cursor, database, table_name):
''' Return true if the specified table exists. '''
table_query = '''
SELECT COUNT(*)
FROM sqlite_master
WHERE name = ?
'''
cursor.execute(table_query, (table_name,))
return cursor.fetchone()[0] == 1
| mit |
rosarior/rua | rua/apps/acls/models.py | 6 | 3257 | from __future__ import absolute_import
import logging
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from permissions.models import StoredPermission
from common.models import Singleton, SingletonManager
from .managers import AccessEntryManager, DefaultAccessEntryManager
from .classes import AccessObjectClass
from .api import get_classes
logger = logging.getLogger(__name__)
class AccessEntry(models.Model):
"""
Model that hold the permission, object, actor relationship
"""
permission = models.ForeignKey(StoredPermission, verbose_name=_(u'permission'))
holder_type = models.ForeignKey(
ContentType,
related_name='access_holder',
limit_choices_to={'model__in': ('user', 'group', 'role')}
)
holder_id = models.PositiveIntegerField()
holder_object = generic.GenericForeignKey(
ct_field='holder_type',
fk_field='holder_id'
)
content_type = models.ForeignKey(
ContentType,
related_name='object_content_type'
)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey(
ct_field='content_type',
fk_field='object_id'
)
objects = AccessEntryManager()
class Meta:
verbose_name = _(u'access entry')
verbose_name_plural = _(u'access entries')
def __unicode__(self):
return u'%s: %s' % (self.content_type, self.content_object)
class DefaultAccessEntry(models.Model):
"""
Model that holds the permission, class, actor relationship, that will
be added upon the creation of an instance of said class
"""
@classmethod
def get_classes(cls):
return [AccessObjectClass.encapsulate(cls) for cls in get_classes()]
permission = models.ForeignKey(StoredPermission, verbose_name=_(u'permission'))
holder_type = models.ForeignKey(
ContentType,
limit_choices_to={'model__in': ('user', 'group', 'role')},
related_name='default_access_entry_holder'
)
holder_id = models.PositiveIntegerField()
holder_object = generic.GenericForeignKey(
ct_field='holder_type',
fk_field='holder_id'
)
content_type = models.ForeignKey(
ContentType,
related_name='default_access_entry_class'
)
objects = DefaultAccessEntryManager()
class Meta:
verbose_name = _(u'default access entry')
verbose_name_plural = _(u'default access entries')
def __unicode__(self):
return u'%s: %s' % (self.content_type, self.content_object)
class CreatorSingletonManager(SingletonManager):
def passthru_check(self, holder, creator=None):
if isinstance(holder, self.model):
# TODO: raise explicit error if is instance and creator=None
return creator
else:
return holder
class CreatorSingleton(Singleton):
objects = CreatorSingletonManager()
def __unicode__(self):
return ugettext('Creator')
class Meta:
verbose_name = _(u'creator')
verbose_name_plural = _(u'creator')
| gpl-3.0 |
fillycheezstake/MissionPlanner | Lib/io.py | 53 | 3722 | """The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
allowed to throw an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is a in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# New I/O library conforming to PEP 3116.
# XXX edge cases when switching between reading/writing
# XXX need to support 1 meaning line-buffered
# XXX whenever an argument is None, use the default value
# XXX read/write ops should check readable/writable
# XXX buffered readinto should work with arbitrary buffer objects
# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
# XXX check writable, readable and seekable in appropriate places
__author__ = ("Guido van Rossum <[email protected]>, "
"Mike Verdone <[email protected]>, "
"Mark Russell <[email protected]>, "
"Antoine Pitrou <[email protected]>, "
"Amaury Forgeot d'Arc <[email protected]>, "
"Benjamin Peterson <[email protected]>")
__all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
"BytesIO", "StringIO", "BufferedIOBase",
"BufferedReader", "BufferedWriter", "BufferedRWPair",
"BufferedRandom", "TextIOBase", "TextIOWrapper",
"UnsupportedOperation", "SEEK_SET", "SEEK_CUR", "SEEK_END"]
import _io
import abc
from _io import (DEFAULT_BUFFER_SIZE, BlockingIOError, UnsupportedOperation,
open, FileIO, BytesIO, StringIO, BufferedReader,
BufferedWriter, BufferedRWPair, BufferedRandom,
IncrementalNewlineDecoder, TextIOWrapper)
OpenWrapper = _io.open # for compatibility with _pyio
# for seek()
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Declaring ABCs in C is tricky so we do it here.
# Method descriptions and default implementations are inherited from the C
# version however.
class IOBase(_io._IOBase):
__metaclass__ = abc.ABCMeta
class RawIOBase(_io._RawIOBase, IOBase):
pass
class BufferedIOBase(_io._BufferedIOBase, IOBase):
pass
class TextIOBase(_io._TextIOBase, IOBase):
pass
RawIOBase.register(FileIO)
for klass in (BytesIO, BufferedReader, BufferedWriter, BufferedRandom,
BufferedRWPair):
BufferedIOBase.register(klass)
for klass in (StringIO, TextIOWrapper):
TextIOBase.register(klass)
del klass
| gpl-3.0 |
damanjitsingh/StackExchange-python- | demo/versus.py | 3 | 1296 | #!/usr/bin/env python
from __future__ import print_function
import sys
sys.path.append('.')
sys.path.append('..')
import stackexchange, stackauth
if len(sys.argv) < 3:
print('Usage: versus.py YOUR_SO_UID THEIR_SO_UID')
sys.exit(1)
so = stackexchange.Site(stackexchange.StackOverflow, impose_throttling=True)
user1, user2 = (int(x) for x in sys.argv[1:])
rep1, rep2 = {}, {}
username1, username2 = (so.user(x).display_name for x in (user1, user2))
total_rep1, total_rep2 = 0, 0
sites = []
for site in stackauth.StackAuth().api_associated(so, user1):
rep1[site.on_site.name] = site.reputation
sites.append(site.on_site.name)
for site in stackauth.StackAuth().api_associated(so, user2):
rep2[site.on_site.name] = site.reputation
for site in sites:
total_rep1 += rep1[site]
if site in rep2:
total_rep2 += rep2[site]
max_user = username1
max_rep, other_rep = rep1[site], rep2.get(site, 0)
if rep2.get(site, 0) > rep1[site]:
max_user = username2
max_rep, other_rep = other_rep, max_rep
diff = max_rep - other_rep
print('%s: %s wins (+%d)' % (site, max_user, diff))
print('Overall: %s wins (+%d)' % (username1 if total_rep1 >= total_rep2 else username2, max(total_rep1, total_rep2) - min(total_rep1, total_rep2)))
| bsd-3-clause |
beiko-lab/gengis | bin/Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/extern/pygments/plugin.py | 27 | 1841 | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: 2006-2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
try:
import pkg_resources
except ImportError:
pkg_resources = None
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def find_plugin_lexers():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| gpl-3.0 |
htwenhe/DJOA | env/Lib/site-packages/markdown/extensions/abbr.py | 123 | 2738 | '''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
See <https://pythonhosted.org/Markdown/extensions/abbreviations.html>
for documentation.
Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and
[Seemant Kulleen](http://www.kulleen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern
from ..util import etree, AtomicString
import re
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s' % abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
super(AbbrPattern, self).__init__(pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = AtomicString(m.group('abbr'))
abbr.set('title', self.title)
return abbr
def makeExtension(*args, **kwargs):
return AbbrExtension(*args, **kwargs)
| mit |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/distutils/tests/test_spawn.py | 138 | 1924 | """Tests for distutils.spawn."""
import unittest
import os
import time
from test.test_support import captured_stdout, run_unittest
from distutils.spawn import _nt_quote_args
from distutils.spawn import spawn, find_executable
from distutils.errors import DistutilsExecError
from distutils.tests import support
class SpawnTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_nt_quote_args(self):
for (args, wanted) in ((['with space', 'nospace'],
['"with space"', 'nospace']),
(['nochange', 'nospace'],
['nochange', 'nospace'])):
res = _nt_quote_args(args)
self.assertEqual(res, wanted)
@unittest.skipUnless(os.name in ('nt', 'posix'),
'Runs only under posix or nt')
def test_spawn(self):
tmpdir = self.mkdtemp()
# creating something executable
# through the shell that returns 1
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 1')
os.chmod(exe, 0777)
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 1')
os.chmod(exe, 0777)
self.assertRaises(DistutilsExecError, spawn, [exe])
# now something that works
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 0')
os.chmod(exe, 0777)
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 0')
os.chmod(exe, 0777)
spawn([exe]) # should work without any error
def test_suite():
return unittest.makeSuite(SpawnTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| bsd-3-clause |
ritzk/ansible-modules-extras | cloud/rackspace/rax_mon_entity.py | 123 | 6171 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_mon_entity
short_description: Create or delete a Rackspace Cloud Monitoring entity
description:
- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
to monitor. Entities associate checks and alarms with a target system and
provide a convenient, centralized place to store IP addresses. Rackspace
monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
label:
description:
- Defines a name for this entity. Must be a non-empty string between 1 and
255 characters long.
required: true
state:
description:
- Ensure that an entity with this C(name) exists or does not exist.
choices: ["present", "absent"]
agent_id:
description:
- Rackspace monitoring agent on the target device to which this entity is
bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
named_ip_addresses:
description:
- Hash of IP addresses that may be referenced by name by rax_mon_checks
added to this entity. Must be a dictionary of with keys that are names
between 1 and 64 characters long, and values that are valid IPv4 or IPv6
addresses.
metadata:
description:
- Hash of arbitrary C(name), C(value) pairs that are passed to associated
rax_mon_alarms. Names and values must all be between 1 and 255 characters
long.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Entity example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Ensure an entity exists
rax_mon_entity:
credentials: ~/.rax_pub
state: present
label: my_entity
named_ip_addresses:
web_box: 192.168.0.10
db_box: 192.168.0.11
meta:
hurf: durf
register: the_entity
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
metadata):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for entity in cm.list_entities():
if label == entity.label:
existing.append(entity)
entity = None
if existing:
entity = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing entities have the label %s.' %
(len(existing), label))
if entity:
if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
should_delete = should_create = True
# Change an existing Entity, unless there's nothing to do.
should_update = agent_id and agent_id != entity.agent_id or \
(metadata and metadata != entity.metadata)
if should_update and not should_delete:
entity.update(agent_id, metadata)
changed = True
if should_delete:
entity.delete()
else:
should_create = True
if should_create:
# Create a new Entity.
entity = cm.create_entity(label=label, agent=agent_id,
ip_addresses=named_ip_addresses,
metadata=metadata)
changed = True
else:
# Delete the existing Entities.
for e in existing:
e.delete()
changed = True
if entity:
entity_dict = {
"id": entity.id,
"name": entity.name,
"agent_id": entity.agent_id,
}
module.exit_json(changed=changed, entity=entity_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
agent_id=dict(),
named_ip_addresses=dict(type='dict', default={}),
metadata=dict(type='dict', default={})
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
agent_id = module.params.get('agent_id')
named_ip_addresses = module.params.get('named_ip_addresses')
metadata = module.params.get('metadata')
setup_rax_module(module, pyrax)
cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# Invoke the module.
main()
| gpl-3.0 |
jotes/ansible | v2/ansible/plugins/action/pause.py | 16 | 5387 | # Copyright 2012, Tim Bielawa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sys
import time
from termios import tcflush, TCIFLUSH
from ansible.errors import *
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=dict()):
''' run the pause action module '''
duration_unit = 'minutes'
prompt = None
seconds = None
result = dict(
changed = False,
rc = 0,
stderr = '',
stdout = '',
start = None,
stop = None,
delta = None,
)
# FIXME: not sure if we can get this info directly like this anymore?
#hosts = ', '.join(self.runner.host_set)
# Is 'args' empty, then this is the default prompted pause
if self._task.args is None or len(self._task.args.keys()) == 0:
pause_type = 'prompt'
#prompt = "[%s]\nPress enter to continue:\n" % hosts
prompt = "[%s]\nPress enter to continue:\n" % self._task.get_name().strip()
# Are 'minutes' or 'seconds' keys that exist in 'args'?
elif 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
pause_type = 'minutes'
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
pause_type = 'seconds'
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError, e:
return dict(failed=True, msg="non-integer value given for prompt duration:\n%s" % str(e))
# Is 'prompt' a key in 'args'?
elif 'prompt' in self._task.args:
pause_type = 'prompt'
#prompt = "[%s]\n%s:\n" % (hosts, self._task.args['prompt'])
prompt = "[%s]\n%s:\n" % (self._task.get_name().strip(), self._task.args['prompt'])
# I have no idea what you're trying to do. But it's so wrong.
else:
return dict(failed=True, msg="invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES))
#vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
# (self.pause_type, self.duration_unit, self.seconds, self.prompt))
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = str(datetime.datetime.now())
# FIXME: this is all very broken right now, as prompting from the worker side
# is not really going to be supported, and actions marked as BYPASS_HOST_LOOP
# probably should not be run through the executor engine at all. Also, ctrl+c
# is now captured on the parent thread, so it can't be caught here via the
# KeyboardInterrupt exception.
try:
if not pause_type == 'prompt':
print "(^C-c = continue early, ^C-a = abort)"
#print("[%s]\nPausing for %s seconds" % (hosts, seconds))
print("[%s]\nPausing for %s seconds" % (self._task.get_name().strip(), seconds))
time.sleep(seconds)
else:
# Clear out any unflushed buffered input which would
# otherwise be consumed by raw_input() prematurely.
#tcflush(sys.stdin, TCIFLUSH)
result['user_input'] = raw_input(prompt.encode(sys.stdout.encoding))
except KeyboardInterrupt:
while True:
print '\nAction? (a)bort/(c)ontinue: '
c = getch()
if c == 'c':
# continue playbook evaluation
break
elif c == 'a':
# abort further playbook evaluation
raise ae('user requested abort!')
finally:
duration = time.time() - start
result['stop'] = str(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
return result
| gpl-3.0 |
heeraj123/oh-mainline | vendor/packages/twisted/twisted/names/dns.py | 18 | 53586 | # -*- test-case-name: twisted.names.test.test_dns -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DNS protocol implementation.
Future Plans:
- Get rid of some toplevels, maybe.
@author: Moshe Zadka
@author: Jean-Paul Calderone
"""
__all__ = [
'IEncodable', 'IRecord',
'A', 'A6', 'AAAA', 'AFSDB', 'CNAME', 'DNAME', 'HINFO',
'MAILA', 'MAILB', 'MB', 'MD', 'MF', 'MG', 'MINFO', 'MR', 'MX',
'NAPTR', 'NS', 'NULL', 'PTR', 'RP', 'SOA', 'SPF', 'SRV', 'TXT', 'WKS',
'ANY', 'CH', 'CS', 'HS', 'IN',
'ALL_RECORDS', 'AXFR', 'IXFR',
'EFORMAT', 'ENAME', 'ENOTIMP', 'EREFUSED', 'ESERVER',
'Record_A', 'Record_A6', 'Record_AAAA', 'Record_AFSDB', 'Record_CNAME',
'Record_DNAME', 'Record_HINFO', 'Record_MB', 'Record_MD', 'Record_MF',
'Record_MG', 'Record_MINFO', 'Record_MR', 'Record_MX', 'Record_NAPTR',
'Record_NS', 'Record_NULL', 'Record_PTR', 'Record_RP', 'Record_SOA',
'Record_SPF', 'Record_SRV', 'Record_TXT', 'Record_WKS',
'QUERY_CLASSES', 'QUERY_TYPES', 'REV_CLASSES', 'REV_TYPES', 'EXT_QUERIES',
'Charstr', 'Message', 'Name', 'Query', 'RRHeader', 'SimpleRecord',
'DNSDatagramProtocol', 'DNSMixin', 'DNSProtocol',
'OK', 'OP_INVERSE', 'OP_NOTIFY', 'OP_QUERY', 'OP_STATUS', 'OP_UPDATE',
'PORT',
'AuthoritativeDomainError', 'DNSQueryTimeoutError', 'DomainError',
]
# System imports
import warnings
import struct, random, types, socket
try:
import cStringIO as StringIO
except ImportError:
import StringIO
AF_INET6 = socket.AF_INET6
from zope.interface import implements, Interface, Attribute
# Twisted imports
from twisted.internet import protocol, defer
from twisted.internet.error import CannotListenError
from twisted.python import log, failure
from twisted.python import util as tputil
from twisted.python import randbytes
def randomSource():
"""
Wrapper around L{randbytes.secureRandom} to return 2 random chars.
"""
return struct.unpack('H', randbytes.secureRandom(2, fallback=True))[0]
PORT = 53
(A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT,
RP, AFSDB) = range(1, 19)
AAAA = 28
SRV = 33
NAPTR = 35
A6 = 38
DNAME = 39
SPF = 99
QUERY_TYPES = {
A: 'A',
NS: 'NS',
MD: 'MD',
MF: 'MF',
CNAME: 'CNAME',
SOA: 'SOA',
MB: 'MB',
MG: 'MG',
MR: 'MR',
NULL: 'NULL',
WKS: 'WKS',
PTR: 'PTR',
HINFO: 'HINFO',
MINFO: 'MINFO',
MX: 'MX',
TXT: 'TXT',
RP: 'RP',
AFSDB: 'AFSDB',
# 19 through 27? Eh, I'll get to 'em.
AAAA: 'AAAA',
SRV: 'SRV',
NAPTR: 'NAPTR',
A6: 'A6',
DNAME: 'DNAME',
SPF: 'SPF'
}
IXFR, AXFR, MAILB, MAILA, ALL_RECORDS = range(251, 256)
# "Extended" queries (Hey, half of these are deprecated, good job)
EXT_QUERIES = {
IXFR: 'IXFR',
AXFR: 'AXFR',
MAILB: 'MAILB',
MAILA: 'MAILA',
ALL_RECORDS: 'ALL_RECORDS'
}
REV_TYPES = dict([
(v, k) for (k, v) in QUERY_TYPES.items() + EXT_QUERIES.items()
])
IN, CS, CH, HS = range(1, 5)
ANY = 255
QUERY_CLASSES = {
IN: 'IN',
CS: 'CS',
CH: 'CH',
HS: 'HS',
ANY: 'ANY'
}
REV_CLASSES = dict([
(v, k) for (k, v) in QUERY_CLASSES.items()
])
# Opcodes
OP_QUERY, OP_INVERSE, OP_STATUS = range(3)
OP_NOTIFY = 4 # RFC 1996
OP_UPDATE = 5 # RFC 2136
# Response Codes
OK, EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED = range(6)
class IRecord(Interface):
"""
An single entry in a zone of authority.
"""
TYPE = Attribute("An indicator of what kind of record this is.")
# Backwards compatibility aliases - these should be deprecated or something I
# suppose. -exarkun
from twisted.names.error import DomainError, AuthoritativeDomainError
from twisted.names.error import DNSQueryTimeoutError
def str2time(s):
suffixes = (
('S', 1), ('M', 60), ('H', 60 * 60), ('D', 60 * 60 * 24),
('W', 60 * 60 * 24 * 7), ('Y', 60 * 60 * 24 * 365)
)
if isinstance(s, types.StringType):
s = s.upper().strip()
for (suff, mult) in suffixes:
if s.endswith(suff):
return int(float(s[:-1]) * mult)
try:
s = int(s)
except ValueError:
raise ValueError, "Invalid time interval specifier: " + s
return s
def readPrecisely(file, l):
buff = file.read(l)
if len(buff) < l:
raise EOFError
return buff
class IEncodable(Interface):
"""
Interface for something which can be encoded to and decoded
from a file object.
"""
def encode(strio, compDict = None):
"""
Write a representation of this object to the given
file object.
@type strio: File-like object
@param strio: The stream to which to write bytes
@type compDict: C{dict} or C{None}
@param compDict: A dictionary of backreference addresses that have
have already been written to this stream and that may be used for
compression.
"""
def decode(strio, length = None):
"""
Reconstruct an object from data read from the given
file object.
@type strio: File-like object
@param strio: The stream from which bytes may be read
@type length: C{int} or C{None}
@param length: The number of bytes in this RDATA field. Most
implementations can ignore this value. Only in the case of
records similar to TXT where the total length is in no way
encoded in the data is it necessary.
"""
class Charstr(object):
implements(IEncodable)
def __init__(self, string=''):
if not isinstance(string, str):
raise ValueError("%r is not a string" % (string,))
self.string = string
def encode(self, strio, compDict=None):
"""
Encode this Character string into the appropriate byte format.
@type strio: file
@param strio: The byte representation of this Charstr will be written
to this file.
"""
string = self.string
ind = len(string)
strio.write(chr(ind))
strio.write(string)
def decode(self, strio, length=None):
"""
Decode a byte string into this Name.
@type strio: file
@param strio: Bytes will be read from this file until the full string
is decoded.
@raise EOFError: Raised when there are not enough bytes available from
C{strio}.
"""
self.string = ''
l = ord(readPrecisely(strio, 1))
self.string = readPrecisely(strio, l)
def __eq__(self, other):
if isinstance(other, Charstr):
return self.string == other.string
return False
def __hash__(self):
return hash(self.string)
def __str__(self):
return self.string
class Name:
implements(IEncodable)
def __init__(self, name=''):
assert isinstance(name, types.StringTypes), "%r is not a string" % (name,)
self.name = name
def encode(self, strio, compDict=None):
"""
Encode this Name into the appropriate byte format.
@type strio: file
@param strio: The byte representation of this Name will be written to
this file.
@type compDict: dict
@param compDict: dictionary of Names that have already been encoded
and whose addresses may be backreferenced by this Name (for the purpose
of reducing the message size).
"""
name = self.name
while name:
if compDict is not None:
if name in compDict:
strio.write(
struct.pack("!H", 0xc000 | compDict[name]))
return
else:
compDict[name] = strio.tell() + Message.headerSize
ind = name.find('.')
if ind > 0:
label, name = name[:ind], name[ind + 1:]
else:
label, name = name, ''
ind = len(label)
strio.write(chr(ind))
strio.write(label)
strio.write(chr(0))
def decode(self, strio, length=None):
"""
Decode a byte string into this Name.
@type strio: file
@param strio: Bytes will be read from this file until the full Name
is decoded.
@raise EOFError: Raised when there are not enough bytes available
from C{strio}.
"""
self.name = ''
off = 0
while 1:
l = ord(readPrecisely(strio, 1))
if l == 0:
if off > 0:
strio.seek(off)
return
if (l >> 6) == 3:
new_off = ((l&63) << 8
| ord(readPrecisely(strio, 1)))
if off == 0:
off = strio.tell()
strio.seek(new_off)
continue
label = readPrecisely(strio, l)
if self.name == '':
self.name = label
else:
self.name = self.name + '.' + label
def __eq__(self, other):
if isinstance(other, Name):
return str(self) == str(other)
return 0
def __hash__(self):
return hash(str(self))
def __str__(self):
return self.name
class Query:
"""
Represent a single DNS query.
@ivar name: The name about which this query is requesting information.
@ivar type: The query type.
@ivar cls: The query class.
"""
implements(IEncodable)
name = None
type = None
cls = None
def __init__(self, name='', type=A, cls=IN):
"""
@type name: C{str}
@param name: The name about which to request information.
@type type: C{int}
@param type: The query type.
@type cls: C{int}
@param cls: The query class.
"""
self.name = Name(name)
self.type = type
self.cls = cls
def encode(self, strio, compDict=None):
self.name.encode(strio, compDict)
strio.write(struct.pack("!HH", self.type, self.cls))
def decode(self, strio, length = None):
self.name.decode(strio)
buff = readPrecisely(strio, 4)
self.type, self.cls = struct.unpack("!HH", buff)
def __hash__(self):
return hash((str(self.name).lower(), self.type, self.cls))
def __cmp__(self, other):
return isinstance(other, Query) and cmp(
(str(self.name).lower(), self.type, self.cls),
(str(other.name).lower(), other.type, other.cls)
) or cmp(self.__class__, other.__class__)
def __str__(self):
t = QUERY_TYPES.get(self.type, EXT_QUERIES.get(self.type, 'UNKNOWN (%d)' % self.type))
c = QUERY_CLASSES.get(self.cls, 'UNKNOWN (%d)' % self.cls)
return '<Query %s %s %s>' % (self.name, t, c)
def __repr__(self):
return 'Query(%r, %r, %r)' % (str(self.name), self.type, self.cls)
class RRHeader(tputil.FancyEqMixin):
"""
A resource record header.
@cvar fmt: C{str} specifying the byte format of an RR.
@ivar name: The name about which this reply contains information.
@ivar type: The query type of the original request.
@ivar cls: The query class of the original request.
@ivar ttl: The time-to-live for this record.
@ivar payload: An object that implements the IEncodable interface
@ivar auth: Whether this header is authoritative or not.
"""
implements(IEncodable)
compareAttributes = ('name', 'type', 'cls', 'ttl', 'payload', 'auth')
fmt = "!HHIH"
name = None
type = None
cls = None
ttl = None
payload = None
rdlength = None
cachedResponse = None
def __init__(self, name='', type=A, cls=IN, ttl=0, payload=None, auth=False):
"""
@type name: C{str}
@param name: The name about which this reply contains information.
@type type: C{int}
@param type: The query type.
@type cls: C{int}
@param cls: The query class.
@type ttl: C{int}
@param ttl: Time to live for this record.
@type payload: An object implementing C{IEncodable}
@param payload: A Query Type specific data object.
"""
assert (payload is None) or (payload.TYPE == type)
self.name = Name(name)
self.type = type
self.cls = cls
self.ttl = ttl
self.payload = payload
self.auth = auth
def encode(self, strio, compDict=None):
self.name.encode(strio, compDict)
strio.write(struct.pack(self.fmt, self.type, self.cls, self.ttl, 0))
if self.payload:
prefix = strio.tell()
self.payload.encode(strio, compDict)
aft = strio.tell()
strio.seek(prefix - 2, 0)
strio.write(struct.pack('!H', aft - prefix))
strio.seek(aft, 0)
def decode(self, strio, length = None):
self.name.decode(strio)
l = struct.calcsize(self.fmt)
buff = readPrecisely(strio, l)
r = struct.unpack(self.fmt, buff)
self.type, self.cls, self.ttl, self.rdlength = r
def isAuthoritative(self):
return self.auth
def __str__(self):
t = QUERY_TYPES.get(self.type, EXT_QUERIES.get(self.type, 'UNKNOWN (%d)' % self.type))
c = QUERY_CLASSES.get(self.cls, 'UNKNOWN (%d)' % self.cls)
return '<RR name=%s type=%s class=%s ttl=%ds auth=%s>' % (self.name, t, c, self.ttl, self.auth and 'True' or 'False')
__repr__ = __str__
class SimpleRecord(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
A Resource Record which consists of a single RFC 1035 domain-name.
@type name: L{Name}
@ivar name: The name associated with this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
showAttributes = (('name', 'name', '%s'), 'ttl')
compareAttributes = ('name', 'ttl')
TYPE = None
name = None
def __init__(self, name='', ttl=None):
self.name = Name(name)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
self.name.encode(strio, compDict)
def decode(self, strio, length = None):
self.name = Name()
self.name.decode(strio)
def __hash__(self):
return hash(self.name)
# Kinds of RRs - oh my!
class Record_NS(SimpleRecord):
"""
An authoritative nameserver.
"""
TYPE = NS
fancybasename = 'NS'
class Record_MD(SimpleRecord):
"""
A mail destination.
This record type is obsolete.
@see: L{Record_MX}
"""
TYPE = MD
fancybasename = 'MD'
class Record_MF(SimpleRecord):
"""
A mail forwarder.
This record type is obsolete.
@see: L{Record_MX}
"""
TYPE = MF
fancybasename = 'MF'
class Record_CNAME(SimpleRecord):
"""
The canonical name for an alias.
"""
TYPE = CNAME
fancybasename = 'CNAME'
class Record_MB(SimpleRecord):
"""
A mailbox domain name.
This is an experimental record type.
"""
TYPE = MB
fancybasename = 'MB'
class Record_MG(SimpleRecord):
"""
A mail group member.
This is an experimental record type.
"""
TYPE = MG
fancybasename = 'MG'
class Record_MR(SimpleRecord):
"""
A mail rename domain name.
This is an experimental record type.
"""
TYPE = MR
fancybasename = 'MR'
class Record_PTR(SimpleRecord):
"""
A domain name pointer.
"""
TYPE = PTR
fancybasename = 'PTR'
class Record_DNAME(SimpleRecord):
"""
A non-terminal DNS name redirection.
This record type provides the capability to map an entire subtree of the
DNS name space to another domain. It differs from the CNAME record which
maps a single node of the name space.
@see: U{http://www.faqs.org/rfcs/rfc2672.html}
@see: U{http://www.faqs.org/rfcs/rfc3363.html}
"""
TYPE = DNAME
fancybasename = 'DNAME'
class Record_A(tputil.FancyEqMixin):
"""
An IPv4 host address.
@type address: C{str}
@ivar address: The packed network-order representation of the IPv4 address
associated with this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
compareAttributes = ('address', 'ttl')
TYPE = A
address = None
def __init__(self, address='0.0.0.0', ttl=None):
address = socket.inet_aton(address)
self.address = address
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(self.address)
def decode(self, strio, length = None):
self.address = readPrecisely(strio, 4)
def __hash__(self):
return hash(self.address)
def __str__(self):
return '<A address=%s ttl=%s>' % (self.dottedQuad(), self.ttl)
__repr__ = __str__
def dottedQuad(self):
return socket.inet_ntoa(self.address)
class Record_SOA(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
Marks the start of a zone of authority.
This record describes parameters which are shared by all records within a
particular zone.
@type mname: L{Name}
@ivar mname: The domain-name of the name server that was the original or
primary source of data for this zone.
@type rname: L{Name}
@ivar rname: A domain-name which specifies the mailbox of the person
responsible for this zone.
@type serial: C{int}
@ivar serial: The unsigned 32 bit version number of the original copy of
the zone. Zone transfers preserve this value. This value wraps and
should be compared using sequence space arithmetic.
@type refresh: C{int}
@ivar refresh: A 32 bit time interval before the zone should be refreshed.
@type minimum: C{int}
@ivar minimum: The unsigned 32 bit minimum TTL field that should be
exported with any RR from this zone.
@type expire: C{int}
@ivar expire: A 32 bit time value that specifies the upper limit on the
time interval that can elapse before the zone is no longer
authoritative.
@type retry: C{int}
@ivar retry: A 32 bit time interval that should elapse before a failed
refresh should be retried.
@type ttl: C{int}
@ivar ttl: The default TTL to use for records served from this zone.
"""
implements(IEncodable, IRecord)
fancybasename = 'SOA'
compareAttributes = ('serial', 'mname', 'rname', 'refresh', 'expire', 'retry', 'minimum', 'ttl')
showAttributes = (('mname', 'mname', '%s'), ('rname', 'rname', '%s'), 'serial', 'refresh', 'retry', 'expire', 'minimum', 'ttl')
TYPE = SOA
def __init__(self, mname='', rname='', serial=0, refresh=0, retry=0, expire=0, minimum=0, ttl=None):
self.mname, self.rname = Name(mname), Name(rname)
self.serial, self.refresh = str2time(serial), str2time(refresh)
self.minimum, self.expire = str2time(minimum), str2time(expire)
self.retry = str2time(retry)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
self.mname.encode(strio, compDict)
self.rname.encode(strio, compDict)
strio.write(
struct.pack(
'!LlllL',
self.serial, self.refresh, self.retry, self.expire,
self.minimum
)
)
def decode(self, strio, length = None):
self.mname, self.rname = Name(), Name()
self.mname.decode(strio)
self.rname.decode(strio)
r = struct.unpack('!LlllL', readPrecisely(strio, 20))
self.serial, self.refresh, self.retry, self.expire, self.minimum = r
def __hash__(self):
return hash((
self.serial, self.mname, self.rname,
self.refresh, self.expire, self.retry
))
class Record_NULL(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
A null record.
This is an experimental record type.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
fancybasename = 'NULL'
showAttributes = compareAttributes = ('payload', 'ttl')
TYPE = NULL
def __init__(self, payload=None, ttl=None):
self.payload = payload
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(self.payload)
def decode(self, strio, length = None):
self.payload = readPrecisely(strio, length)
def __hash__(self):
return hash(self.payload)
class Record_WKS(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
A well known service description.
This record type is obsolete. See L{Record_SRV}.
@type address: C{str}
@ivar address: The packed network-order representation of the IPv4 address
associated with this record.
@type protocol: C{int}
@ivar protocol: The 8 bit IP protocol number for which this service map is
relevant.
@type map: C{str}
@ivar map: A bitvector indicating the services available at the specified
address.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
fancybasename = "WKS"
compareAttributes = ('address', 'protocol', 'map', 'ttl')
showAttributes = [('_address', 'address', '%s'), 'protocol', 'ttl']
TYPE = WKS
_address = property(lambda self: socket.inet_ntoa(self.address))
def __init__(self, address='0.0.0.0', protocol=0, map='', ttl=None):
self.address = socket.inet_aton(address)
self.protocol, self.map = protocol, map
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(self.address)
strio.write(struct.pack('!B', self.protocol))
strio.write(self.map)
def decode(self, strio, length = None):
self.address = readPrecisely(strio, 4)
self.protocol = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.map = readPrecisely(strio, length - 5)
def __hash__(self):
return hash((self.address, self.protocol, self.map))
class Record_AAAA(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
An IPv6 host address.
@type address: C{str}
@ivar address: The packed network-order representation of the IPv6 address
associated with this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc1886.html}
"""
implements(IEncodable, IRecord)
TYPE = AAAA
fancybasename = 'AAAA'
showAttributes = (('_address', 'address', '%s'), 'ttl')
compareAttributes = ('address', 'ttl')
_address = property(lambda self: socket.inet_ntop(AF_INET6, self.address))
def __init__(self, address = '::', ttl=None):
self.address = socket.inet_pton(AF_INET6, address)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(self.address)
def decode(self, strio, length = None):
self.address = readPrecisely(strio, 16)
def __hash__(self):
return hash(self.address)
class Record_A6(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
An IPv6 address.
This is an experimental record type.
@type prefixLen: C{int}
@ivar prefixLen: The length of the suffix.
@type suffix: C{str}
@ivar suffix: An IPv6 address suffix in network order.
@type prefix: L{Name}
@ivar prefix: If specified, a name which will be used as a prefix for other
A6 records.
@type bytes: C{int}
@ivar bytes: The length of the prefix.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc2874.html}
@see: U{http://www.faqs.org/rfcs/rfc3363.html}
@see: U{http://www.faqs.org/rfcs/rfc3364.html}
"""
implements(IEncodable, IRecord)
TYPE = A6
fancybasename = 'A6'
showAttributes = (('_suffix', 'suffix', '%s'), ('prefix', 'prefix', '%s'), 'ttl')
compareAttributes = ('prefixLen', 'prefix', 'suffix', 'ttl')
_suffix = property(lambda self: socket.inet_ntop(AF_INET6, self.suffix))
def __init__(self, prefixLen=0, suffix='::', prefix='', ttl=None):
self.prefixLen = prefixLen
self.suffix = socket.inet_pton(AF_INET6, suffix)
self.prefix = Name(prefix)
self.bytes = int((128 - self.prefixLen) / 8.0)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!B', self.prefixLen))
if self.bytes:
strio.write(self.suffix[-self.bytes:])
if self.prefixLen:
# This may not be compressed
self.prefix.encode(strio, None)
def decode(self, strio, length = None):
self.prefixLen = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.bytes = int((128 - self.prefixLen) / 8.0)
if self.bytes:
self.suffix = '\x00' * (16 - self.bytes) + readPrecisely(strio, self.bytes)
if self.prefixLen:
self.prefix.decode(strio)
def __eq__(self, other):
if isinstance(other, Record_A6):
return (self.prefixLen == other.prefixLen and
self.suffix[-self.bytes:] == other.suffix[-self.bytes:] and
self.prefix == other.prefix and
self.ttl == other.ttl)
return NotImplemented
def __hash__(self):
return hash((self.prefixLen, self.suffix[-self.bytes:], self.prefix))
def __str__(self):
return '<A6 %s %s (%d) ttl=%s>' % (
self.prefix,
socket.inet_ntop(AF_INET6, self.suffix),
self.prefixLen, self.ttl
)
class Record_SRV(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
The location of the server(s) for a specific protocol and domain.
This is an experimental record type.
@type priority: C{int}
@ivar priority: The priority of this target host. A client MUST attempt to
contact the target host with the lowest-numbered priority it can reach;
target hosts with the same priority SHOULD be tried in an order defined
by the weight field.
@type weight: C{int}
@ivar weight: Specifies a relative weight for entries with the same
priority. Larger weights SHOULD be given a proportionately higher
probability of being selected.
@type port: C{int}
@ivar port: The port on this target host of this service.
@type target: L{Name}
@ivar target: The domain name of the target host. There MUST be one or
more address records for this name, the name MUST NOT be an alias (in
the sense of RFC 1034 or RFC 2181). Implementors are urged, but not
required, to return the address record(s) in the Additional Data
section. Unless and until permitted by future standards action, name
compression is not to be used for this field.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc2782.html}
"""
implements(IEncodable, IRecord)
TYPE = SRV
fancybasename = 'SRV'
compareAttributes = ('priority', 'weight', 'target', 'port', 'ttl')
showAttributes = ('priority', 'weight', ('target', 'target', '%s'), 'port', 'ttl')
def __init__(self, priority=0, weight=0, port=0, target='', ttl=None):
self.priority = int(priority)
self.weight = int(weight)
self.port = int(port)
self.target = Name(target)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!HHH', self.priority, self.weight, self.port))
# This can't be compressed
self.target.encode(strio, None)
def decode(self, strio, length = None):
r = struct.unpack('!HHH', readPrecisely(strio, struct.calcsize('!HHH')))
self.priority, self.weight, self.port = r
self.target = Name()
self.target.decode(strio)
def __hash__(self):
return hash((self.priority, self.weight, self.port, self.target))
class Record_NAPTR(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
The location of the server(s) for a specific protocol and domain.
@type order: C{int}
@ivar order: An integer specifying the order in which the NAPTR records
MUST be processed to ensure the correct ordering of rules. Low numbers
are processed before high numbers.
@type preference: C{int}
@ivar preference: An integer that specifies the order in which NAPTR
records with equal "order" values SHOULD be processed, low numbers
being processed before high numbers.
@type flag: L{Charstr}
@ivar flag: A <character-string> containing flags to control aspects of the
rewriting and interpretation of the fields in the record. Flags
aresingle characters from the set [A-Z0-9]. The case of the alphabetic
characters is not significant.
At this time only four flags, "S", "A", "U", and "P", are defined.
@type service: L{Charstr}
@ivar service: Specifies the service(s) available down this rewrite path.
It may also specify the particular protocol that is used to talk with a
service. A protocol MUST be specified if the flags field states that
the NAPTR is terminal.
@type regexp: L{Charstr}
@ivar regexp: A STRING containing a substitution expression that is applied
to the original string held by the client in order to construct the
next domain name to lookup.
@type replacement: L{Name}
@ivar replacement: The next NAME to query for NAPTR, SRV, or address
records depending on the value of the flags field. This MUST be a
fully qualified domain-name.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc2915.html}
"""
implements(IEncodable, IRecord)
TYPE = NAPTR
compareAttributes = ('order', 'preference', 'flags', 'service', 'regexp',
'replacement')
fancybasename = 'NAPTR'
showAttributes = ('order', 'preference', ('flags', 'flags', '%s'),
('service', 'service', '%s'), ('regexp', 'regexp', '%s'),
('replacement', 'replacement', '%s'), 'ttl')
def __init__(self, order=0, preference=0, flags='', service='', regexp='',
replacement='', ttl=None):
self.order = int(order)
self.preference = int(preference)
self.flags = Charstr(flags)
self.service = Charstr(service)
self.regexp = Charstr(regexp)
self.replacement = Name(replacement)
self.ttl = str2time(ttl)
def encode(self, strio, compDict=None):
strio.write(struct.pack('!HH', self.order, self.preference))
# This can't be compressed
self.flags.encode(strio, None)
self.service.encode(strio, None)
self.regexp.encode(strio, None)
self.replacement.encode(strio, None)
def decode(self, strio, length=None):
r = struct.unpack('!HH', readPrecisely(strio, struct.calcsize('!HH')))
self.order, self.preference = r
self.flags = Charstr()
self.service = Charstr()
self.regexp = Charstr()
self.replacement = Name()
self.flags.decode(strio)
self.service.decode(strio)
self.regexp.decode(strio)
self.replacement.decode(strio)
def __hash__(self):
return hash((
self.order, self.preference, self.flags,
self.service, self.regexp, self.replacement))
class Record_AFSDB(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
Map from a domain name to the name of an AFS cell database server.
@type subtype: C{int}
@ivar subtype: In the case of subtype 1, the host has an AFS version 3.0
Volume Location Server for the named AFS cell. In the case of subtype
2, the host has an authenticated name server holding the cell-root
directory node for the named DCE/NCA cell.
@type hostname: L{Name}
@ivar hostname: The domain name of a host that has a server for the cell
named by this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc1183.html}
"""
implements(IEncodable, IRecord)
TYPE = AFSDB
fancybasename = 'AFSDB'
compareAttributes = ('subtype', 'hostname', 'ttl')
showAttributes = ('subtype', ('hostname', 'hostname', '%s'), 'ttl')
def __init__(self, subtype=0, hostname='', ttl=None):
self.subtype = int(subtype)
self.hostname = Name(hostname)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!H', self.subtype))
self.hostname.encode(strio, compDict)
def decode(self, strio, length = None):
r = struct.unpack('!H', readPrecisely(strio, struct.calcsize('!H')))
self.subtype, = r
self.hostname.decode(strio)
def __hash__(self):
return hash((self.subtype, self.hostname))
class Record_RP(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
The responsible person for a domain.
@type mbox: L{Name}
@ivar mbox: A domain name that specifies the mailbox for the responsible
person.
@type txt: L{Name}
@ivar txt: A domain name for which TXT RR's exist (indirection through
which allows information sharing about the contents of this RP record).
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc1183.html}
"""
implements(IEncodable, IRecord)
TYPE = RP
fancybasename = 'RP'
compareAttributes = ('mbox', 'txt', 'ttl')
showAttributes = (('mbox', 'mbox', '%s'), ('txt', 'txt', '%s'), 'ttl')
def __init__(self, mbox='', txt='', ttl=None):
self.mbox = Name(mbox)
self.txt = Name(txt)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
self.mbox.encode(strio, compDict)
self.txt.encode(strio, compDict)
def decode(self, strio, length = None):
self.mbox = Name()
self.txt = Name()
self.mbox.decode(strio)
self.txt.decode(strio)
def __hash__(self):
return hash((self.mbox, self.txt))
class Record_HINFO(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
Host information.
@type cpu: C{str}
@ivar cpu: Specifies the CPU type.
@type os: C{str}
@ivar os: Specifies the OS.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
TYPE = HINFO
fancybasename = 'HINFO'
showAttributes = compareAttributes = ('cpu', 'os', 'ttl')
def __init__(self, cpu='', os='', ttl=None):
self.cpu, self.os = cpu, os
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!B', len(self.cpu)) + self.cpu)
strio.write(struct.pack('!B', len(self.os)) + self.os)
def decode(self, strio, length = None):
cpu = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.cpu = readPrecisely(strio, cpu)
os = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.os = readPrecisely(strio, os)
def __eq__(self, other):
if isinstance(other, Record_HINFO):
return (self.os.lower() == other.os.lower() and
self.cpu.lower() == other.cpu.lower() and
self.ttl == other.ttl)
return NotImplemented
def __hash__(self):
return hash((self.os.lower(), self.cpu.lower()))
class Record_MINFO(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
Mailbox or mail list information.
This is an experimental record type.
@type rmailbx: L{Name}
@ivar rmailbx: A domain-name which specifies a mailbox which is responsible
for the mailing list or mailbox. If this domain name names the root,
the owner of the MINFO RR is responsible for itself.
@type emailbx: L{Name}
@ivar emailbx: A domain-name which specifies a mailbox which is to receive
error messages related to the mailing list or mailbox specified by the
owner of the MINFO record. If this domain name names the root, errors
should be returned to the sender of the message.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
TYPE = MINFO
rmailbx = None
emailbx = None
fancybasename = 'MINFO'
compareAttributes = ('rmailbx', 'emailbx', 'ttl')
showAttributes = (('rmailbx', 'responsibility', '%s'),
('emailbx', 'errors', '%s'),
'ttl')
def __init__(self, rmailbx='', emailbx='', ttl=None):
self.rmailbx, self.emailbx = Name(rmailbx), Name(emailbx)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
self.rmailbx.encode(strio, compDict)
self.emailbx.encode(strio, compDict)
def decode(self, strio, length = None):
self.rmailbx, self.emailbx = Name(), Name()
self.rmailbx.decode(strio)
self.emailbx.decode(strio)
def __hash__(self):
return hash((self.rmailbx, self.emailbx))
class Record_MX(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
Mail exchange.
@type preference: C{int}
@ivar preference: Specifies the preference given to this RR among others at
the same owner. Lower values are preferred.
@type name: L{Name}
@ivar name: A domain-name which specifies a host willing to act as a mail
exchange.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
TYPE = MX
fancybasename = 'MX'
compareAttributes = ('preference', 'name', 'ttl')
showAttributes = ('preference', ('name', 'name', '%s'), 'ttl')
def __init__(self, preference=0, name='', ttl=None, **kwargs):
self.preference, self.name = int(preference), Name(kwargs.get('exchange', name))
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!H', self.preference))
self.name.encode(strio, compDict)
def decode(self, strio, length = None):
self.preference = struct.unpack('!H', readPrecisely(strio, 2))[0]
self.name = Name()
self.name.decode(strio)
def exchange(self):
warnings.warn("use Record_MX.name instead", DeprecationWarning, stacklevel=2)
return self.name
exchange = property(exchange)
def __hash__(self):
return hash((self.preference, self.name))
# Oh god, Record_TXT how I hate thee.
class Record_TXT(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
Freeform text.
@type data: C{list} of C{str}
@ivar data: Freeform text which makes up this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be cached.
"""
implements(IEncodable, IRecord)
TYPE = TXT
fancybasename = 'TXT'
showAttributes = compareAttributes = ('data', 'ttl')
def __init__(self, *data, **kw):
self.data = list(data)
# arg man python sucks so bad
self.ttl = str2time(kw.get('ttl', None))
def encode(self, strio, compDict = None):
for d in self.data:
strio.write(struct.pack('!B', len(d)) + d)
def decode(self, strio, length = None):
soFar = 0
self.data = []
while soFar < length:
L = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.data.append(readPrecisely(strio, L))
soFar += L + 1
if soFar != length:
log.msg(
"Decoded %d bytes in %s record, but rdlength is %d" % (
soFar, self.fancybasename, length
)
)
def __hash__(self):
return hash(tuple(self.data))
class Record_SPF(Record_TXT):
"""
Structurally, freeform text. Semantically, a policy definition, formatted
as defined in U{rfc 4408<http://www.faqs.org/rfcs/rfc4408.html>}.
@type data: C{list} of C{str}
@ivar data: Freeform text which makes up this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be cached.
"""
TYPE = SPF
fancybasename = 'SPF'
class Message:
"""
L{Message} contains all the information represented by a single
DNS request or response.
"""
headerFmt = "!H2B4H"
headerSize = struct.calcsize(headerFmt)
# Question, answer, additional, and nameserver lists
queries = answers = add = ns = None
def __init__(self, id=0, answer=0, opCode=0, recDes=0, recAv=0,
auth=0, rCode=OK, trunc=0, maxSize=512):
self.maxSize = maxSize
self.id = id
self.answer = answer
self.opCode = opCode
self.auth = auth
self.trunc = trunc
self.recDes = recDes
self.recAv = recAv
self.rCode = rCode
self.queries = []
self.answers = []
self.authority = []
self.additional = []
def addQuery(self, name, type=ALL_RECORDS, cls=IN):
"""
Add another query to this Message.
@type name: C{str}
@param name: The name to query.
@type type: C{int}
@param type: Query type
@type cls: C{int}
@param cls: Query class
"""
self.queries.append(Query(name, type, cls))
def encode(self, strio):
compDict = {}
body_tmp = StringIO.StringIO()
for q in self.queries:
q.encode(body_tmp, compDict)
for q in self.answers:
q.encode(body_tmp, compDict)
for q in self.authority:
q.encode(body_tmp, compDict)
for q in self.additional:
q.encode(body_tmp, compDict)
body = body_tmp.getvalue()
size = len(body) + self.headerSize
if self.maxSize and size > self.maxSize:
self.trunc = 1
body = body[:self.maxSize - self.headerSize]
byte3 = (( ( self.answer & 1 ) << 7 )
| ((self.opCode & 0xf ) << 3 )
| ((self.auth & 1 ) << 2 )
| ((self.trunc & 1 ) << 1 )
| ( self.recDes & 1 ) )
byte4 = ( ( (self.recAv & 1 ) << 7 )
| (self.rCode & 0xf ) )
strio.write(struct.pack(self.headerFmt, self.id, byte3, byte4,
len(self.queries), len(self.answers),
len(self.authority), len(self.additional)))
strio.write(body)
def decode(self, strio, length=None):
self.maxSize = 0
header = readPrecisely(strio, self.headerSize)
r = struct.unpack(self.headerFmt, header)
self.id, byte3, byte4, nqueries, nans, nns, nadd = r
self.answer = ( byte3 >> 7 ) & 1
self.opCode = ( byte3 >> 3 ) & 0xf
self.auth = ( byte3 >> 2 ) & 1
self.trunc = ( byte3 >> 1 ) & 1
self.recDes = byte3 & 1
self.recAv = ( byte4 >> 7 ) & 1
self.rCode = byte4 & 0xf
self.queries = []
for i in range(nqueries):
q = Query()
try:
q.decode(strio)
except EOFError:
return
self.queries.append(q)
items = ((self.answers, nans), (self.authority, nns), (self.additional, nadd))
for (l, n) in items:
self.parseRecords(l, n, strio)
def parseRecords(self, list, num, strio):
for i in range(num):
header = RRHeader()
try:
header.decode(strio)
except EOFError:
return
t = self.lookupRecordType(header.type)
if not t:
continue
header.payload = t(ttl=header.ttl)
try:
header.payload.decode(strio, header.rdlength)
except EOFError:
return
list.append(header)
# Create a mapping from record types to their corresponding Record_*
# classes. This relies on the global state which has been created so
# far in initializing this module (so don't define Record classes after
# this).
_recordTypes = {}
for name in globals():
if name.startswith('Record_'):
_recordTypes[globals()[name].TYPE] = globals()[name]
# Clear the iteration variable out of the class namespace so it
# doesn't become an attribute.
del name
def lookupRecordType(self, type):
"""
Retrieve the L{IRecord} implementation for the given record type.
@param type: A record type, such as L{A} or L{NS}.
@type type: C{int}
@return: An object which implements L{IRecord} or C{None} if none
can be found for the given type.
@rtype: L{types.ClassType}
"""
return self._recordTypes.get(type, None)
def toStr(self):
strio = StringIO.StringIO()
self.encode(strio)
return strio.getvalue()
def fromStr(self, str):
strio = StringIO.StringIO(str)
self.decode(strio)
class DNSMixin(object):
"""
DNS protocol mixin shared by UDP and TCP implementations.
@ivar _reactor: A L{IReactorTime} and L{IReactorUDP} provider which will
be used to issue DNS queries and manage request timeouts.
"""
id = None
liveMessages = None
def __init__(self, controller, reactor=None):
self.controller = controller
self.id = random.randrange(2 ** 10, 2 ** 15)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
def pickID(self):
"""
Return a unique ID for queries.
"""
while True:
id = randomSource()
if id not in self.liveMessages:
return id
def callLater(self, period, func, *args):
"""
Wrapper around reactor.callLater, mainly for test purpose.
"""
return self._reactor.callLater(period, func, *args)
def _query(self, queries, timeout, id, writeMessage):
"""
Send out a message with the given queries.
@type queries: C{list} of C{Query} instances
@param queries: The queries to transmit
@type timeout: C{int} or C{float}
@param timeout: How long to wait before giving up
@type id: C{int}
@param id: Unique key for this request
@type writeMessage: C{callable}
@param writeMessage: One-parameter callback which writes the message
@rtype: C{Deferred}
@return: a C{Deferred} which will be fired with the result of the
query, or errbacked with any errors that could happen (exceptions
during writing of the query, timeout errors, ...).
"""
m = Message(id, recDes=1)
m.queries = queries
try:
writeMessage(m)
except:
return defer.fail()
resultDeferred = defer.Deferred()
cancelCall = self.callLater(timeout, self._clearFailed, resultDeferred, id)
self.liveMessages[id] = (resultDeferred, cancelCall)
return resultDeferred
def _clearFailed(self, deferred, id):
"""
Clean the Deferred after a timeout.
"""
try:
del self.liveMessages[id]
except KeyError:
pass
deferred.errback(failure.Failure(DNSQueryTimeoutError(id)))
class DNSDatagramProtocol(DNSMixin, protocol.DatagramProtocol):
"""
DNS protocol over UDP.
"""
resends = None
def stopProtocol(self):
"""
Stop protocol: reset state variables.
"""
self.liveMessages = {}
self.resends = {}
self.transport = None
def startProtocol(self):
"""
Upon start, reset internal state.
"""
self.liveMessages = {}
self.resends = {}
def writeMessage(self, message, address):
"""
Send a message holding DNS queries.
@type message: L{Message}
"""
self.transport.write(message.toStr(), address)
def startListening(self):
self._reactor.listenUDP(0, self, maxPacketSize=512)
def datagramReceived(self, data, addr):
"""
Read a datagram, extract the message in it and trigger the associated
Deferred.
"""
m = Message()
try:
m.fromStr(data)
except EOFError:
log.msg("Truncated packet (%d bytes) from %s" % (len(data), addr))
return
except:
# Nothing should trigger this, but since we're potentially
# invoking a lot of different decoding methods, we might as well
# be extra cautious. Anything that triggers this is itself
# buggy.
log.err(failure.Failure(), "Unexpected decoding error")
return
if m.id in self.liveMessages:
d, canceller = self.liveMessages[m.id]
del self.liveMessages[m.id]
canceller.cancel()
# XXX we shouldn't need this hack of catching exception on callback()
try:
d.callback(m)
except:
log.err()
else:
if m.id not in self.resends:
self.controller.messageReceived(m, self, addr)
def removeResend(self, id):
"""
Mark message ID as no longer having duplication suppression.
"""
try:
del self.resends[id]
except KeyError:
pass
def query(self, address, queries, timeout=10, id=None):
"""
Send out a message with the given queries.
@type address: C{tuple} of C{str} and C{int}
@param address: The address to which to send the query
@type queries: C{list} of C{Query} instances
@param queries: The queries to transmit
@rtype: C{Deferred}
"""
if not self.transport:
# XXX transport might not get created automatically, use callLater?
try:
self.startListening()
except CannotListenError:
return defer.fail()
if id is None:
id = self.pickID()
else:
self.resends[id] = 1
def writeMessage(m):
self.writeMessage(m, address)
return self._query(queries, timeout, id, writeMessage)
class DNSProtocol(DNSMixin, protocol.Protocol):
"""
DNS protocol over TCP.
"""
length = None
buffer = ''
def writeMessage(self, message):
"""
Send a message holding DNS queries.
@type message: L{Message}
"""
s = message.toStr()
self.transport.write(struct.pack('!H', len(s)) + s)
def connectionMade(self):
"""
Connection is made: reset internal state, and notify the controller.
"""
self.liveMessages = {}
self.controller.connectionMade(self)
def connectionLost(self, reason):
"""
Notify the controller that this protocol is no longer
connected.
"""
self.controller.connectionLost(self)
def dataReceived(self, data):
self.buffer += data
while self.buffer:
if self.length is None and len(self.buffer) >= 2:
self.length = struct.unpack('!H', self.buffer[:2])[0]
self.buffer = self.buffer[2:]
if len(self.buffer) >= self.length:
myChunk = self.buffer[:self.length]
m = Message()
m.fromStr(myChunk)
try:
d, canceller = self.liveMessages[m.id]
except KeyError:
self.controller.messageReceived(m, self)
else:
del self.liveMessages[m.id]
canceller.cancel()
# XXX we shouldn't need this hack
try:
d.callback(m)
except:
log.err()
self.buffer = self.buffer[self.length:]
self.length = None
else:
break
def query(self, queries, timeout=60):
"""
Send out a message with the given queries.
@type queries: C{list} of C{Query} instances
@param queries: The queries to transmit
@rtype: C{Deferred}
"""
id = self.pickID()
return self._query(queries, timeout, id, self.writeMessage)
| agpl-3.0 |
shriyanka/daemo-forum | spirit/core/utils/timezone.py | 9 | 2362 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
TIMEZONE_CHOICES = [
('Etc/GMT+12', _("(GMT -12:00) Eniwetok, Kwajalein")),
('Etc/GMT+11', _("(GMT -11:00) Midway Island, Samoa")),
('Etc/GMT+10', _("(GMT -10:00) Hawaii")),
('Pacific/Marquesas', _("(GMT -9:30) Marquesas Islands")),
('Etc/GMT+9', _("(GMT -9:00) Alaska")),
('Etc/GMT+8', _("(GMT -8:00) Pacific Time (US & Canada)")),
('Etc/GMT+7', _("(GMT -7:00) Mountain Time (US & Canada)")),
('Etc/GMT+6', _("(GMT -6:00) Central Time (US & Canada), Mexico City")),
('Etc/GMT+5', _("(GMT -5:00) Eastern Time (US & Canada), Bogota, Lima")),
('America/Caracas', _("(GMT -4:30) Venezuela")),
('Etc/GMT+4', _("(GMT -4:00) Atlantic Time (Canada), Caracas, La Paz")),
('Etc/GMT+3', _("(GMT -3:00) Brazil, Buenos Aires, Georgetown")),
('Etc/GMT+2', _("(GMT -2:00) Mid-Atlantic")),
('Etc/GMT+1', _("(GMT -1:00) Azores, Cape Verde Islands")),
('UTC', _("(GMT) Western Europe Time, London, Lisbon, Casablanca")),
('Etc/GMT-1', _("(GMT +1:00) Brussels, Copenhagen, Madrid, Paris")),
('Etc/GMT-2', _("(GMT +2:00) Kaliningrad, South Africa")),
('Etc/GMT-3', _("(GMT +3:00) Baghdad, Riyadh, Moscow, St. Petersburg")),
('Etc/GMT-4', _("(GMT +4:00) Abu Dhabi, Muscat, Baku, Tbilisi")),
('Asia/Kabul', _("(GMT +4:30) Afghanistan")),
('Etc/GMT-5', _("(GMT +5:00) Ekaterinburg, Islamabad, Karachi, Tashkent")),
('Asia/Kolkata', _("(GMT +5:30) India, Sri Lanka")),
('Asia/Kathmandu', _("(GMT +5:45) Nepal")),
('Etc/GMT-6', _("(GMT +6:00) Almaty, Dhaka, Colombo")),
('Indian/Cocos', _("(GMT +6:30) Cocos Islands, Myanmar")),
('Etc/GMT-7', _("(GMT +7:00) Bangkok, Hanoi, Jakarta")),
('Etc/GMT-8', _("(GMT +8:00) Beijing, Perth, Singapore, Hong Kong")),
('Australia/Eucla', _("(GMT +8:45) Australia (Eucla)")),
('Etc/GMT-9', _("(GMT +9:00) Tokyo, Seoul, Osaka, Sapporo, Yakutsk")),
('Australia/North', _("(GMT +9:30) Australia (Northern Territory)")),
('Etc/GMT-10', _("(GMT +10:00) Eastern Australia, Guam, Vladivostok")),
('Etc/GMT-11', _("(GMT +11:00) Magadan, Solomon Islands, New Caledonia")),
('Pacific/Norfolk', _("(GMT +11:30) Norfolk Island")),
('Etc/GMT-12', _("(GMT +12:00) Auckland, Wellington, Fiji, Kamchatka")),
]
| mit |
wuhengzhi/chromium-crosswalk | tools/cr/cr/base/arch.py | 113 | 1544 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the basic architectures supported by cr."""
import cr
DEFAULT = cr.Config.From(
CR_ENVSETUP_ARCH='{CR_ARCH}',
)
class Arch(cr.Plugin, cr.Plugin.Type):
"""Base class for implementing cr architecture targets."""
SELECTOR = 'CR_ARCH'
@classmethod
def AddArguments(cls, parser):
parser.add_argument(
'--architecture', dest=cls.SELECTOR,
choices=cls.Choices(),
default=None,
help='Sets the target architecture to use. Overrides ' + cls.SELECTOR
)
class IA32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='ia32',
)
class Mips32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='mipsel',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class X64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='x64',
)
@property
def priority(self):
return super(X64Arch, self).priority + 1
class Arm32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm',
)
@property
def priority(self):
return super(Arm32Arch, self).priority + 2
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class Arm64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm64',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
| bsd-3-clause |
tchernomax/ansible | lib/ansible/modules/network/eos/eos_lldp.py | 58 | 2962 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_lldp
version_added: "2.5"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage LLDP configuration on Arista EOS network devices
description:
- This module provides declarative management of LLDP service
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
state:
description:
- State of the LLDP configuration. If value is I(present) lldp will be enabled
else if it is I(absent) it will be disabled.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: Enable LLDP service
eos_lldp:
state: present
- name: Disable LLDP service
eos_lldp:
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- lldp run
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.eos.eos import get_config, load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec
def has_lldp(module):
config = get_config(module, flags=['| section lldp'])
is_lldp_enable = False
if "no lldp run" not in config:
is_lldp_enable = True
return is_lldp_enable
def main():
""" main entry point for module execution
"""
argument_spec = dict(
state=dict(default='present',
choices=['present', 'absent',
'enabled', 'disabled'])
)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
HAS_LLDP = has_lldp(module)
commands = []
if module.params['state'] == 'absent' and HAS_LLDP:
commands.append('no lldp run')
elif module.params['state'] == 'present' and not HAS_LLDP:
commands.append('lldp run')
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
deadblue/baidupan_shell | baidupan/vcode.py | 1 | 2375 | # -*- coding: utf-8 -*-
from StringIO import StringIO
try:
import Image
except:
from PIL import Image
__author__ = 'deadblue'
def convert_ascii(img_data):
return _matrix_to_ascii(
_crop_and_border(
_image_to_matrix(img_data)
)
)
def _image_to_matrix(img_data):
img = Image.open(StringIO(img_data)).convert('L')
w,h = img.size
# 生成矩阵
martix = []
for y in xrange(h / 2):
row = []
for x in xrange(w):
p1 = img.getpixel((x, y * 2))
p2 = img.getpixel((x, y * 2 + 1))
if p1 > 192 and p2 > 192:
row.append(0)
elif p1 > 192:
row.append(1)
elif p2 > 192:
row.append(2)
else:
row.append(3)
martix.append(row)
return martix
def _crop_and_border(matrix):
# 统计四周空白大小
t,b,l,r = 0,0,0,0
for y in xrange(len(matrix)):
if sum(matrix[y]) == 0:
t += 1
else: break
for y in xrange(len(matrix)):
if sum(matrix[-1 - y]) == 0:
b += 1
else: break
for x in xrange(len(matrix[0])):
if sum( map(lambda row:row[x], matrix) ) == 0:
l += 1
else: break
for x in xrange(len(matrix[0])):
if sum( map(lambda row:row[-1 - x], matrix) ) == 0:
r += 1
else: break
# 上下裁剪与补边
w = len(matrix[0])
if t > 0:
matrix = matrix[t-1:]
else:
matrix.insert(0, [0] * w)
if b > 1:
matrix = matrix[:1-b]
elif b == 0:
matrix.append([0] * w)
# 左右裁剪与补边
for ri in xrange(len(matrix)):
row = matrix[ri]
if l > 0:
row = row[l-1:]
else:
row.insert(0, 0)
if r > 1:
row = row[:1-r]
elif r == 0:
row.append(0)
matrix[ri] = row
return matrix
def _matrix_to_ascii(matrix):
buf = []
for row in matrix:
rbuf = []
for cell in row:
if cell == 0:
rbuf.append('#')
elif cell == 1:
rbuf.append('"')
elif cell == 2:
rbuf.append(',')
elif cell == 3:
rbuf.append(' ')
buf.append(''.join(rbuf))
return '\n'.join(buf) | gpl-2.0 |
eltonsantos/django | tests/generic_views/test_edit.py | 2 | 16597 | from __future__ import absolute_import
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django import forms
from django.test import TestCase
from django.utils.unittest import expectedFailure
from django.views.generic.base import View
from django.views.generic.edit import FormMixin, CreateView, UpdateView
from . import views
from .models import Artist, Author
class FormMixinTests(TestCase):
def test_initial_data(self):
""" Test instance independence of initial data dict (see #16138) """
initial_1 = FormMixin().get_initial()
initial_1['foo'] = 'bar'
initial_2 = FormMixin().get_initial()
self.assertNotEqual(initial_1, initial_2)
class BasicFormTests(TestCase):
urls = 'generic_views.urls'
def test_post_data(self):
res = self.client.post('/contact/', {'name': "Me", 'message': "Hello"})
self.assertRedirects(res, 'http://testserver/list/authors/')
class ModelFormMixinTests(TestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
class CreateViewTests(TestCase):
urls = 'generic_views.urls'
def test_create(self):
res = self.client.get('/edit/authors/create/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertFalse('object' in res.context)
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.post('/edit/authors/create/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_invalid(self):
res = self.client.post('/edit/authors/create/',
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertEqual(Author.objects.count(), 0)
def test_create_with_object_url(self):
res = self.client.post('/edit/artists/create/',
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
artist = Artist.objects.get(name='Rene Magritte')
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % artist.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_create_with_redirect(self):
res = self.client.post('/edit/authors/create/redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_with_interpolated_redirect(self):
res = self.client.post('/edit/authors/create/interpolate_redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.all()[0].pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
def test_create_with_special_properties(self):
res = self.client.get('/edit/authors/create/special/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertFalse('object' in res.context)
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/authors/create/special/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
obj = Author.objects.get(slug='randall-munroe')
self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk}))
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_without_redirect(self):
try:
res = self.client.post('/edit/authors/create/naive/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_create_restricted(self):
res = self.client.post('/edit/authors/create/restricted/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/accounts/login/?next=/edit/authors/create/restricted/')
def test_create_view_with_restricted_fields(self):
class MyCreateView(CreateView):
model = Author
fields = ['name']
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name'])
def test_create_view_all_fields(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PendingDeprecationWarning)
class MyCreateView(CreateView):
model = Author
fields = '__all__'
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
self.assertEqual(len(w), 0)
def test_create_view_without_explicit_fields(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PendingDeprecationWarning)
class MyCreateView(CreateView):
model = Author
# Until end of the deprecation cycle, should still create the form
# as before:
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
# but with a warning:
self.assertEqual(w[0].category, PendingDeprecationWarning)
class UpdateViewTests(TestCase):
urls = 'generic_views.urls'
def test_update_post(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@expectedFailure
def test_update_put(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.put('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
# Here is the expected failure. PUT data are not processed in any special
# way by django. So the request will equal to a POST without data, hence
# the form will be invalid and redisplayed with errors (status code 200).
# See also #12635
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_invalid(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_update_with_object_url(self):
a = Artist.objects.create(name='Rene Magritte')
res = self.client.post('/edit/artists/%d/update/' % a.pk,
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % a.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_update_with_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_with_interpolated_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/interpolate_redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.all()[0].pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
def test_update_with_special_properties(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/author/%d/update/special/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/author/%d/' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_without_redirect(self):
try:
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/naive/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_update_get_object(self):
a = Author.objects.create(
pk=1,
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/update/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/update/',
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
class DeleteViewTests(TestCase):
urls = 'generic_views.urls'
def test_delete_by_post(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html')
# Deletion with POST
res = self.client.post('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_by_delete(self):
# Deletion with browser compatible DELETE method
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.delete('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_interpolated_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/interpolate_redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/?deleted=%d' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_special_properties(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/confirm_delete.html')
res = self.client.post('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_without_redirect(self):
try:
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/delete/naive/' % a.pk)
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
| bsd-3-clause |
ace8957/SeniorDesignKernel | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
saturday-shi/spark | python/pyspark/find_spark_home.py | 66 | 2761 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script attempt to determine the correct setting for SPARK_HOME given
# that Spark may have been installed on the system with pip.
from __future__ import print_function
import os
import sys
def _find_spark_home():
"""Find the SPARK_HOME."""
# If the enviroment has SPARK_HOME set trust it.
if "SPARK_HOME" in os.environ:
return os.environ["SPARK_HOME"]
def is_spark_home(path):
"""Takes a path and returns true if the provided path could be a reasonable SPARK_HOME"""
return (os.path.isfile(os.path.join(path, "bin/spark-submit")) and
(os.path.isdir(os.path.join(path, "jars")) or
os.path.isdir(os.path.join(path, "assembly"))))
paths = ["../", os.path.dirname(os.path.realpath(__file__))]
# Add the path of the PySpark module if it exists
if sys.version < "3":
import imp
try:
module_home = imp.find_module("pyspark")[1]
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
else:
from importlib.util import find_spec
try:
module_home = os.path.dirname(find_spec("pyspark").origin)
paths.append(module_home)
# If we are installed in edit mode also look two dirs up
paths.append(os.path.join(module_home, "../../"))
except ImportError:
# Not pip installed no worries
pass
# Normalize the paths
paths = [os.path.abspath(p) for p in paths]
try:
return next(path for path in paths if is_spark_home(path))
except StopIteration:
print("Could not find valid SPARK_HOME while searching {0}".format(paths), file=sys.stderr)
exit(-1)
if __name__ == "__main__":
print(_find_spark_home())
| apache-2.0 |
product-definition-center/pdc-client | .tito/lib/copr_user/releasers.py | 8 | 2716 | import os.path
from tito.release import CoprReleaser
from tito.common import run_command, find_git_root, tito_config_dir, info_out
from tito.compat import RawConfigParser
class CoprUserReleaser(CoprReleaser):
def _load_user_config(self):
"""
Try to load copr user configs if any
# 1. copr config file should be located at `~/.config/copr`
# or
# 2. user defined should be stored in `copr_user.conf`
# next to `releasers.conf`
"""
config = RawConfigParser()
config.add_section('copr-user')
config.set('copr-user', 'ssh_key', '~/.ssh/id_rsa')
copr_conf = os.path.expanduser("~/.config/copr")
if os.path.exists(copr_conf):
config.read(copr_conf)
config.set('copr-user', 'username', config.get('copr-cli', 'username'))
tito_dir = os.path.join(find_git_root(), tito_config_dir())
copr_local = os.path.join(tito_dir, "copr_user.conf")
if os.path.exists(copr_local):
config.read(copr_local)
if not config.has_option('copr-user', 'username'):
raise Exception("Can not load username from '~/.config/copr' and 'copr_user.conf'")
return config
def _submit_build(self, executable, koji_opts, tag, srpm_location):
""" Copy srpm to remote destination and submit it to Copr """
cmd = self.releaser_config.get(self.target, "upload_command")
url = self.releaser_config.get(self.target, "remote_location")
if self.srpm_submitted:
srpm_location = self.srpm_submitted
srpm_base_name = os.path.basename(srpm_location)
copr_user_config = self._load_user_config()
# e.g. "scp -i %(private_key)s %(srpm)s %(user)[email protected]:public_html/my_srpm/"
cmd_upload = cmd % {"srpm": srpm_location,
"user": copr_user_config.get("copr-user", "username"),
"private_key": copr_user_config.get("copr-user", "ssh_key")}
cmd_submit = "/usr/bin/copr-cli build %s %s%s" % (
self.releaser_config.get(self.target, "project_name"),
url % {'user': copr_user_config.get("copr-user", "username")},
srpm_base_name)
if self.dry_run:
self.print_dry_run_warning(cmd_upload)
self.print_dry_run_warning(cmd_submit)
return
# TODO: no error handling when run_command fails:
if not self.srpm_submitted:
print("Uploading src.rpm.")
print(run_command(cmd_upload))
self.srpm_submitted = srpm_location
info_out("Submiting build into %s." % self.NAME)
print(run_command(cmd_submit))
| mit |
Adel-Magebinary/odoo | addons/sale/edi/sale_order.py | 403 | 10861 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.addons.edi import EDIMixin
from openerp.tools.translate import _
from werkzeug import url_encode
SALE_ORDER_LINE_EDI_STRUCT = {
'sequence': True,
'name': True,
#custom: 'date_planned'
'product_id': True,
'product_uom': True,
'price_unit': True,
#custom: 'product_qty'
'discount': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
SALE_ORDER_EDI_STRUCT = {
'name': True,
'origin': True,
'company_id': True, # -> to be changed into partner
#custom: 'partner_ref'
'date_order': True,
'partner_id': True,
#custom: 'partner_address'
#custom: 'notes'
'order_line': SALE_ORDER_LINE_EDI_STRUCT,
# fields used for web preview only - discarded on import
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
'payment_term': True,
'order_policy': True,
'user_id': True,
'state': True,
}
class sale_order(osv.osv, EDIMixin):
_inherit = 'sale.order'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a Sale order"""
edi_struct = dict(edi_struct or SALE_ORDER_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner_obj = self.pool.get('res.partner')
edi_doc_list = []
for order in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, order, context=context)
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(sale_order,self).edi_export(cr, uid, [order], edi_struct, context)[0]
edi_doc.update({
# force trans-typing to purchase.order upon import
'__import_model': 'purchase.order',
'__import_module': 'purchase',
'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context),
'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id],
context=context)[0],
'partner_ref': order.client_order_ref or False,
'notes': order.note or False,
})
edi_doc_list.append(edi_doc)
return edi_doc_list
def _edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
company_address_edi['customer'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document['partner_invoice_id'] = partner_edi_m2o
edi_document['partner_shipping_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None):
# TODO: refactor into common place for purchase/sale, e.g. into product module
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
pricelist = partner.property_product_pricelist
if not pricelist:
pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'list0', context=context)
if not pricelist.currency_id == currency:
# look for a pricelist with the right type and currency, or make a new one
pricelist_type = 'sale'
product_pricelist = self.pool.get('product.pricelist')
match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type),
('currency_id','=',currency.id)])
if match_pricelist_ids:
pricelist_id = match_pricelist_ids[0]
else:
pricelist_name = _('EDI Pricelist (%s)') % (currency.name,)
pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name,
'type': pricelist_type,
'currency_id': currency.id,
})
self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name,
'pricelist_id': pricelist_id})
pricelist = product_pricelist.browse(cr, uid, pricelist_id)
return self.edi_m2o(cr, uid, pricelist, context=context)
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document)
#import company as a new partner
partner_id = self._edi_import_company(cr, uid, edi_document, context=context)
# currency for rounding the discount calculations and for the pricelist
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
order_currency = res_currency.browse(cr, uid, currency_id)
partner_ref = edi_document.pop('partner_ref', False)
edi_document['client_order_ref'] = edi_document['name']
edi_document['name'] = partner_ref or edi_document['name']
edi_document['note'] = edi_document.pop('notes', False)
edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context)
# discard web preview fields, if present
edi_document.pop('amount_total', None)
edi_document.pop('amount_tax', None)
edi_document.pop('amount_untaxed', None)
order_lines = edi_document['order_line']
for order_line in order_lines:
self._edi_requires_attributes(('product_id', 'product_uom', 'product_qty', 'price_unit'), order_line)
order_line['product_uom_qty'] = order_line['product_qty']
del order_line['product_qty']
# discard web preview fields, if present
order_line.pop('price_subtotal', None)
return super(sale_order,self).edi_import(cr, uid, edi_document, context=context)
def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None):
res = dict.fromkeys(ids, False)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy in ('prepaid', 'manual') and \
order.company_id.paypal_account and order.state != 'draft':
params = {
"cmd": "_xclick",
"business": order.company_id.paypal_account,
"item_name": order.company_id.name + " Order " + order.name,
"invoice": order.name,
"amount": order.amount_total,
"currency_code": order.pricelist_id.currency_id.name,
"button_subtype": "services",
"no_note": "1",
"bn": "OpenERP_Order_PayNow_" + order.pricelist_id.currency_id.name,
}
res[order.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params)
return res
_columns = {
'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'),
}
class sale_order_line(osv.osv, EDIMixin):
_inherit='sale.order.line'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Overridden to provide sale order line fields with the expected names
(sale and purchase orders have different column names)"""
edi_struct = dict(edi_struct or SALE_ORDER_LINE_EDI_STRUCT)
edi_doc_list = []
for line in records:
edi_doc = super(sale_order_line,self).edi_export(cr, uid, [line], edi_struct, context)[0]
edi_doc['__import_model'] = 'purchase.order.line'
edi_doc['product_qty'] = line.product_uom_qty
if line.product_uos:
edi_doc.update(product_uom=line.product_uos,
product_qty=line.product_uos_qty)
edi_doc_list.append(edi_doc)
return edi_doc_list
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sencha/chromium-spacewalk | chrome/common/extensions/docs/examples/apps/hello-python/httplib2/iri2uri.py | 885 | 3850 | """
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:[email protected]",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| bsd-3-clause |
firebase/grpc-SwiftPM | bazel/test/python_test_repo/helloworld_moved.py | 13 | 2584 | # Copyright 2019 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
import contextlib
import datetime
import logging
import unittest
import grpc
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from concurrent import futures
from google.cloud import helloworld_pb2
from google.cloud import helloworld_pb2_grpc
_HOST = 'localhost'
_SERVER_ADDRESS = '{}:0'.format(_HOST)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
request_in_flight = datetime.datetime.now() - \
request.request_initiation.ToDatetime()
request_duration = duration_pb2.Duration()
request_duration.FromTimedelta(request_in_flight)
return helloworld_pb2.HelloReply(
message='Hello, %s!' % request.name,
request_duration=request_duration,
)
@contextlib.contextmanager
def _listening_server():
server = grpc.server(futures.ThreadPoolExecutor())
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
port = server.add_insecure_port(_SERVER_ADDRESS)
server.start()
try:
yield port
finally:
server.stop(0)
class ImportTest(unittest.TestCase):
def test_import(self):
with _listening_server() as port:
with grpc.insecure_channel('{}:{}'.format(_HOST, port)) as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
request_timestamp = timestamp_pb2.Timestamp()
request_timestamp.GetCurrentTime()
response = stub.SayHello(helloworld_pb2.HelloRequest(
name='you',
request_initiation=request_timestamp,
),
wait_for_ready=True)
self.assertEqual(response.message, "Hello, you!")
self.assertGreater(response.request_duration.nanos, 0)
if __name__ == '__main__':
logging.basicConfig()
unittest.main()
| apache-2.0 |
0xffea/keystone | keystone/service.py | 1 | 2930 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import routes
from keystone import auth
from keystone import catalog
from keystone.common import logging
from keystone.common import wsgi
from keystone.contrib import ec2
from keystone import identity
from keystone import policy
from keystone import routers
from keystone import token
from keystone import trust
LOG = logging.getLogger(__name__)
DRIVERS = dict(
catalog_api=catalog.Manager(),
ec2_api=ec2.Manager(),
identity_api=identity.Manager(),
policy_api=policy.Manager(),
token_api=token.Manager(),
trust_api=trust.Manager())
@logging.fail_gracefully
def public_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return wsgi.ComposingRouter(routes.Mapper(),
[identity.routers.Public(),
token.routers.Router(),
routers.Version('public'),
routers.Extension(False)])
@logging.fail_gracefully
def admin_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return wsgi.ComposingRouter(routes.Mapper(),
[identity.routers.Admin(),
token.routers.Router(),
routers.Version('admin'),
routers.Extension()])
@logging.fail_gracefully
def public_version_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return wsgi.ComposingRouter(routes.Mapper(),
[routers.Versions('public')])
@logging.fail_gracefully
def admin_version_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return wsgi.ComposingRouter(routes.Mapper(),
[routers.Versions('admin')])
@logging.fail_gracefully
def v3_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
mapper = routes.Mapper()
v3routers = []
for module in [auth, catalog, identity, policy, trust]:
module.routers.append_v3_routers(mapper, v3routers)
# TODO(ayoung): put token routes here
return wsgi.ComposingRouter(mapper, v3routers)
| apache-2.0 |
vynncentgm/limbo-android | jni/qemu/roms/seabios/tools/readserial.py | 118 | 6006 | #!/usr/bin/env python
# Script that can read from a serial device and show timestamps.
#
# Copyright (C) 2009 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
# Usage:
# tools/readserial.py /dev/ttyUSB0 115200
import sys
import time
import select
import optparse
# Reset time counter after this much idle time.
RESTARTINTERVAL = 60
# Number of bits in a transmitted byte - 8N1 is 1 start bit + 8 data
# bits + 1 stop bit.
BITSPERBYTE = 10
def calibrateserialwrite(outfile, byteadjust):
# Build 4000 bytes of dummy data.
data = "0123456789" * 4 + "012345678" + "\n"
data = data * 80
while 1:
st = time.time()
outfile.write(data)
outfile.flush()
et = time.time()
sys.stdout.write(
"Wrote %d - %.1fus per char (theory states %.1fus)\n" % (
len(data), (et-st) / len(data) * 1000000, byteadjust * 1000000))
sys.stdout.flush()
time.sleep(3)
def calibrateserialread(infile, byteadjust):
starttime = lasttime = 0
totalchars = 0
while 1:
select.select([infile], [], [])
d = infile.read(4096)
curtime = time.time()
if curtime - lasttime > 1.0:
if starttime and totalchars:
sys.stdout.write(
"Calibrating on %d bytes - %.1fus per char"
" (theory states %.1fus)\n" % (
totalchars,
float(lasttime - starttime) * 1000000 / totalchars,
byteadjust * 1000000))
totalchars = 0
starttime = curtime
else:
totalchars += len(d)
lasttime = curtime
def readserial(infile, logfile, byteadjust):
lasttime = 0
while 1:
# Read data
try:
res = select.select([infile, sys.stdin], [], [])
except KeyboardInterrupt:
sys.stdout.write("\n")
break
if sys.stdin in res[0]:
# Got keyboard input - force reset on next serial input
sys.stdin.read(1)
lasttime = 0
if len(res[0]) == 1:
continue
d = infile.read(4096)
if not d:
break
datatime = time.time()
datatime -= len(d) * byteadjust
# Reset start time if no data for some time
if datatime - lasttime > RESTARTINTERVAL:
starttime = datatime
charcount = 0
isnewline = 1
msg = "\n\n======= %s (adjust=%.1fus)\n" % (
time.asctime(time.localtime(datatime)), byteadjust * 1000000)
sys.stdout.write(msg)
logfile.write(msg)
lasttime = datatime
# Translate unprintable chars; add timestamps
out = ""
for c in d:
if isnewline:
delta = datatime - starttime - (charcount * byteadjust)
out += "%06.3f: " % delta
isnewline = 0
oc = ord(c)
charcount += 1
datatime += byteadjust
if oc == 0x0d:
continue
if oc == 0x00:
out += "<00>\n"
isnewline = 1
continue
if oc == 0x0a:
out += "\n"
isnewline = 1
continue
if oc < 0x20 or oc >= 0x7f and oc != 0x09:
out += "<%02x>" % oc
continue
out += c
sys.stdout.write(out)
sys.stdout.flush()
logfile.write(out)
logfile.flush()
def main():
usage = "%prog [options] [<serialdevice> [<baud>]]"
opts = optparse.OptionParser(usage)
opts.add_option("-f", "--file",
action="store_false", dest="serial", default=True,
help="read from file instead of serialdevice")
opts.add_option("-n", "--no-adjust",
action="store_false", dest="adjustbaud", default=True,
help="don't adjust times by serial rate")
opts.add_option("-c", "--calibrate-read",
action="store_true", dest="calibrate_read", default=False,
help="read from serial port to calibrate it")
opts.add_option("-C", "--calibrate-write",
action="store_true", dest="calibrate_write", default=False,
help="write to serial port to calibrate it")
opts.add_option("-t", "--time",
type="float", dest="time", default=None,
help="time to write one byte on serial port (in us)")
options, args = opts.parse_args()
serialport = 0
baud = 115200
if len(args) > 2:
opts.error("Too many arguments")
if len(args) > 0:
serialport = args[0]
if len(args) > 1:
baud = int(args[1])
byteadjust = float(BITSPERBYTE) / baud
if options.time is not None:
byteadjust = options.time / 1000000.0
if not options.adjustbaud:
byteadjust = 0.0
if options.serial:
# Read from serial port
try:
import serial
except ImportError:
print """
Unable to find pyserial package ( http://pyserial.sourceforge.net/ ).
On Linux machines try: yum install pyserial
Or: apt-get install python-serial
"""
sys.exit(1)
ser = serial.Serial(serialport, baud, timeout=0)
else:
# Read from a file
ser = open(serialport, 'rb')
import fcntl
import os
fcntl.fcntl(ser, fcntl.F_SETFL
, fcntl.fcntl(ser, fcntl.F_GETFL) | os.O_NONBLOCK)
if options.calibrate_read:
calibrateserialread(ser, byteadjust)
return
if options.calibrate_write:
calibrateserialwrite(ser, byteadjust)
return
logname = time.strftime("seriallog-%Y%m%d_%H%M%S.log")
f = open(logname, 'wb')
readserial(ser, f, byteadjust)
if __name__ == '__main__':
main()
| gpl-2.0 |
nathanaevitas/odoo | openerp/addons/auth_signup/res_users.py | 8 | 15061 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from datetime import datetime, timedelta
import random
from urlparse import urljoin
import werkzeug
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT, ustr
from ast import literal_eval
from openerp.tools.translate import _
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.SystemRandom().choice(chars) for i in xrange(20))
def now(**kwargs):
dt = datetime.now() + timedelta(**kwargs)
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class res_partner(osv.Model):
_inherit = 'res.partner'
def _get_signup_valid(self, cr, uid, ids, name, arg, context=None):
dt = now()
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = bool(partner.signup_token) and \
(not partner.signup_expiration or dt <= partner.signup_expiration)
return res
def _get_signup_url_for_action(self, cr, uid, ids, action=None, view_type=None, menu_id=None, res_id=None, model=None, context=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
if context is None:
context= {}
res = dict.fromkeys(ids, False)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
for partner in self.browse(cr, uid, ids, context):
# when required, make sure the partner has a valid signup token
if context.get('signup_valid') and not partner.user_ids:
self.signup_prepare(cr, uid, [partner.id], context=context)
route = 'login'
# the parameters to encode for the query
query = dict(db=cr.dbname)
signup_type = context.get('signup_force_type_in_url', partner.signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.signup_token and signup_type:
query['token'] = partner.signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
fragment = dict()
if action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['id'] = res_id
if fragment:
query['redirect'] = '/web#' + werkzeug.url_encode(fragment)
res[partner.id] = urljoin(base_url, "/web/%s?%s" % (route, werkzeug.url_encode(query)))
return res
def _get_signup_url(self, cr, uid, ids, name, arg, context=None):
""" proxy for function field towards actual implementation """
return self._get_signup_url_for_action(cr, uid, ids, context=context)
_columns = {
'signup_token': fields.char('Signup Token', copy=False),
'signup_type': fields.char('Signup Token Type', copy=False),
'signup_expiration': fields.datetime('Signup Expiration', copy=False),
'signup_valid': fields.function(_get_signup_valid, type='boolean', string='Signup Token is Valid'),
'signup_url': fields.function(_get_signup_url, type='char', string='Signup URL'),
}
def action_signup_prepare(self, cr, uid, ids, context=None):
return self.signup_prepare(cr, uid, ids, context=context)
def signup_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'signup_token': False, 'signup_type': False, 'signup_expiration': False}, context=context)
def signup_prepare(self, cr, uid, ids, signup_type="signup", expiration=False, context=None):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self.browse(cr, uid, ids, context):
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(cr, uid, token, context=context):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
def _signup_retrieve_partner(self, cr, uid, token,
check_validity=False, raise_exception=False, context=None):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner_ids = self.search(cr, uid, [('signup_token', '=', token)], context=context)
if not partner_ids:
if raise_exception:
raise SignupError("Signup token '%s' is not valid" % token)
return False
partner = self.browse(cr, uid, partner_ids[0], context)
if check_validity and not partner.signup_valid:
if raise_exception:
raise SignupError("Signup token '%s' is no longer valid" % token)
return False
return partner
def signup_retrieve_info(self, cr, uid, token, context=None):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(cr, uid, token, raise_exception=True, context=None)
res = {'db': cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = partner.email or ''
return res
class res_users(osv.Model):
_inherit = 'res.users'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
for user in self.browse(cr, uid, ids, context):
res[user.id] = ('active' if user.login_date else 'new')
return res
_columns = {
'state': fields.function(_get_state, string='Status', type='selection',
selection=[('new', 'Never Connected'), ('active', 'Activated')]),
}
def signup(self, cr, uid, values, token=None, context=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
res_partner = self.pool.get('res.partner')
partner = res_partner._signup_retrieve_partner(
cr, uid, token, check_validity=True, raise_exception=True, context=None)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
return (cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
self._signup_create_user(cr, uid, values, context=context)
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(cr, uid, values, context=context)
return (cr.dbname, values.get('login'), values.get('password'))
def _signup_create_user(self, cr, uid, values, context=None):
""" create a new user from the template user """
ir_config_parameter = self.pool.get('ir.config_parameter')
template_user_id = literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.template_user_id', 'False'))
assert template_user_id and self.exists(cr, uid, template_user_id, context=context), 'Signup: invalid template user'
# check that uninvited users may sign up
if 'partner_id' not in values:
if not literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')):
raise SignupError('Signup is not allowed for uninvited users')
assert values.get('login'), "Signup: no login given for new user"
assert values.get('partner_id') or values.get('name'), "Signup: no name or partner given for new user"
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
context = dict(context or {}, no_reset_password=True)
try:
with cr.savepoint():
return self.copy(cr, uid, template_user_id, values, context=context)
except Exception, e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, cr, uid, login, context=None):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
user_ids = self.search(cr, uid, [('login', '=', login)], context=context)
if not user_ids:
user_ids = self.search(cr, uid, [('email', '=', login)], context=context)
if len(user_ids) != 1:
raise Exception(_('Reset password: invalid username or email'))
return self.action_reset_password(cr, uid, user_ids, context=context)
def action_reset_password(self, cr, uid, ids, context=None):
""" create signup token for each user, and send their signup url by email """
# prepare reset password signup
res_partner = self.pool.get('res.partner')
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context)]
res_partner.signup_prepare(cr, uid, partner_ids, signup_type="reset", expiration=now(days=+1), context=context)
if not context:
context = {}
# send email to users with their signup url
template = False
if context.get('create_user'):
try:
# get_object() raises ValueError if record does not exist
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'set_password_email')
except ValueError:
pass
if not bool(template):
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'reset_password_email')
assert template._name == 'email.template'
for user in self.browse(cr, uid, ids, context):
if not user.email:
raise osv.except_osv(_("Cannot send email: user has no email address."), user.name)
self.pool.get('email.template').send_mail(cr, uid, template.id, user.id, force_send=True, raise_exception=True, context=context)
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
# overridden to automatically invite user to sign up
user_id = super(res_users, self).create(cr, uid, values, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.email and not context.get('no_reset_password'):
context = dict(context, create_user=True)
try:
self.action_reset_password(cr, uid, [user.id], context=context)
except MailDeliveryException:
self.pool.get('res.partner').signup_cancel(cr, uid, [user.partner_id.id], context=context)
return user_id
def copy(self, cr, uid, id, default=None, context=None):
if not default or not default.get('email'):
# avoid sending email to the user we are duplicating
context = dict(context or {}, reset_password=False)
return super(res_users, self).copy(cr, uid, id, default=default, context=context)
| agpl-3.0 |
mozilla/normandy | normandy/recipes/migrations/0007_convert_simple_filters_to_filter_objects.py | 1 | 2458 | import json
from django.db import migrations
def simple_filters_to_filter_objects(apps, schema_editor):
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
for revision in RecipeRevision.objects.all():
filter_object = []
if revision.locales.count():
filter_object.append(
{"type": "locale", "locales": [locale.code for locale in revision.locales.all()]}
)
revision.locales.set([])
if revision.countries.count():
filter_object.append(
{"type": "country", "countries": [c.code for c in revision.countries.all()]}
)
revision.countries.set([])
if revision.channels.count():
filter_object.append(
{"type": "channel", "channels": [c.slug for c in revision.channels.all()]}
)
revision.channels.set([])
if filter_object:
revision.filter_object_json = json.dumps(filter_object)
revision.save()
def filter_objects_to_simple_filters(apps, schema_editor):
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
Channel = apps.get_model("recipes", "Channel")
Country = apps.get_model("recipes", "Country")
Locale = apps.get_model("recipes", "Locale")
for revision in RecipeRevision.objects.all():
filter_object = json.loads(revision.filter_object_json)
remaining_filters = []
for filter in filter_object:
if filter["type"] == "channel":
revision.channels.set([Channel.objects.get(slug=c) for c in filter["channels"]])
elif filter["type"] == "country":
revision.countries.set([Country.objects.get(code=c) for c in filter["countries"]])
elif filter["type"] == "locale":
revision.locales.set(
[Locale.objects.get(code=locale) for locale in filter["locales"]]
)
else:
remaining_filters.append(filter)
if remaining_filters:
revision.filter_object_json = json.dumps(remaining_filters)
else:
revision.filter_object_json = None
revision.save()
class Migration(migrations.Migration):
dependencies = [("recipes", "0006_reciperevision_filter_object_json")]
operations = [
migrations.RunPython(simple_filters_to_filter_objects, filter_objects_to_simple_filters)
]
| mpl-2.0 |
AndreasMadsen/gensim | gensim/test/test_lee.py | 15 | 5404 | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated test to reproduce the results of Lee et al. (2005)
Lee et al. (2005) compares different models for semantic
similarity and verifies the results with similarity judgements from humans.
As a validation of the gensim implementation we reproduced the results
of Lee et al. (2005) in this test.
Many thanks to Michael D. Lee ([email protected]) who provideded us
with his corpus and similarity data.
If you need to reference this dataset, please cite:
Lee, M., Pincombe, B., & Welsh, M. (2005).
An empirical evaluation of models of text document similarity.
Proceedings of the 27th Annual Conference of the Cognitive Science Society
"""
from __future__ import with_statement
import logging
import os.path
import unittest
import numpy as np
from gensim import corpora, models, utils, matutils
from gensim.parsing.preprocessing import preprocess_documents, preprocess_string, DEFAULT_FILTERS
bg_corpus = None
corpus = None
human_sim_vector = None
class TestLeeTest(unittest.TestCase):
def setUp(self):
"""setup lee test corpora"""
global bg_corpus, corpus, human_sim_vector, bg_corpus2, corpus2
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
bg_corpus_file = 'lee_background.cor'
corpus_file = 'lee.cor'
sim_file = 'similarities0-1.txt'
# read in the corpora
latin1 = lambda line: utils.to_unicode(line, encoding='latin1')
with utils.smart_open(os.path.join(pre_path, bg_corpus_file)) as f:
bg_corpus = preprocess_documents(latin1(line) for line in f)
with utils.smart_open(os.path.join(pre_path, corpus_file)) as f:
corpus = preprocess_documents(latin1(line) for line in f)
with utils.smart_open(os.path.join(pre_path, bg_corpus_file)) as f:
bg_corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
with utils.smart_open(os.path.join(pre_path, corpus_file)) as f:
corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
# read the human similarity data
sim_matrix = np.loadtxt(os.path.join(pre_path, sim_file))
sim_m_size = np.shape(sim_matrix)[0]
human_sim_vector = sim_matrix[matutils.triu_indices(sim_m_size, 1)]
def test_corpus(self):
"""availability and integrity of corpus"""
documents_in_bg_corpus = 300
documents_in_corpus = 50
len_sim_vector = 1225
self.assertEqual(len(bg_corpus), documents_in_bg_corpus)
self.assertEqual(len(corpus), documents_in_corpus)
self.assertEqual(len(human_sim_vector), len_sim_vector)
def test_lee(self):
"""correlation with human data > 0.6
(this is the value which was achieved in the original paper)
"""
global bg_corpus, corpus
# create a dictionary and corpus (bag of words)
dictionary = corpora.Dictionary(bg_corpus)
bg_corpus = [dictionary.doc2bow(text) for text in bg_corpus]
corpus = [dictionary.doc2bow(text) for text in corpus]
# transform the bag of words with log_entropy normalization
log_ent = models.LogEntropyModel(bg_corpus)
bg_corpus_ent = log_ent[bg_corpus]
# initialize an LSI transformation from background corpus
lsi = models.LsiModel(bg_corpus_ent, id2word=dictionary, num_topics=200)
# transform small corpus to lsi bow->log_ent->fold-in-lsi
corpus_lsi = lsi[log_ent[corpus]]
# compute pairwise similarity matrix and extract upper triangular
res = np.zeros((len(corpus), len(corpus)))
for i, par1 in enumerate(corpus_lsi):
for j, par2 in enumerate(corpus_lsi):
res[i, j] = matutils.cossim(par1, par2)
flat = res[matutils.triu_indices(len(corpus), 1)]
cor = np.corrcoef(flat, human_sim_vector)[0, 1]
logging.info("LSI correlation coefficient is %s" % cor)
self.assertTrue(cor > 0.6)
# def test_lee_mallet(self):
# global bg_corpus, corpus, bg_corpus2, corpus2
# # create a dictionary and corpus (bag of words)
# dictionary = corpora.Dictionary(bg_corpus2)
# bg_corpus = [dictionary.doc2bow(text) for text in bg_corpus2]
# corpus = [dictionary.doc2bow(text) for text in corpus2]
# # initialize an LDA transformation from background corpus
# lda = models.LdaMallet('/Users/kofola/Downloads/mallet-2.0.7/bin/mallet',
# corpus=bg_corpus, id2word=dictionary, num_topics=200, optimize_interval=10)
# corpus_lda = lda[corpus]
# # compute pairwise similarity matrix and extract upper triangular
# res = np.zeros((len(corpus), len(corpus)))
# for i, par1 in enumerate(corpus_lda):
# for j, par2 in enumerate(corpus_lda):
# res[i, j] = matutils.cossim(par1, par2)
# flat = res[matutils.triu_indices(len(corpus), 1)]
# cor = np.corrcoef(flat, human_sim_vector)[0, 1]
# logging.info("LDA correlation coefficient is %s" % cor)
# self.assertTrue(cor > 0.35)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| gpl-3.0 |
oliverlee/sympy | sympy/interactive/session.py | 43 | 15119 | """Tools for setting up interactive sessions. """
from __future__ import print_function, division
from distutils.version import LooseVersion as V
from sympy.core.compatibility import range
from sympy.external import import_module
from sympy.interactive.printing import init_printing
preexec_source = """\
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
init_printing()
"""
verbose_message = """\
These commands were executed:
%(source)s
Documentation can be found at http://docs.sympy.org/%(version)s
"""
no_ipython = """\
Couldn't locate IPython. Having IPython installed is greatly recommended.
See http://ipython.scipy.org for more details. If you use Debian/Ubuntu,
just install the 'ipython' package and start isympy again.
"""
def _make_message(ipython=True, quiet=False, source=None):
"""Create a banner for an interactive session. """
from sympy import __version__ as sympy_version
from sympy.polys.domains import GROUND_TYPES
from sympy.utilities.misc import ARCH
from sympy import SYMPY_DEBUG
import sys
import os
python_version = "%d.%d.%d" % sys.version_info[:3]
if ipython:
shell_name = "IPython"
else:
shell_name = "Python"
info = ['ground types: %s' % GROUND_TYPES]
cache = os.getenv('SYMPY_USE_CACHE')
if cache is not None and cache.lower() == 'no':
info.append('cache: off')
if SYMPY_DEBUG:
info.append('debugging: on')
args = shell_name, sympy_version, python_version, ARCH, ', '.join(info)
message = "%s console for SymPy %s (Python %s-%s) (%s)\n" % args
if not quiet:
if source is None:
source = preexec_source
_source = ""
for line in source.split('\n')[:-1]:
if not line:
_source += '\n'
else:
_source += '>>> ' + line + '\n'
doc_version = sympy_version
if 'dev' in doc_version:
doc_version = "dev"
else:
doc_version = "%s.%s.%s/" % tuple(doc_version.split('.')[:3])
message += '\n' + verbose_message % {'source': _source,
'version': doc_version}
return message
def int_to_Integer(s):
"""
Wrap integer literals with Integer.
This is based on the decistmt example from
http://docs.python.org/library/tokenize.html.
Only integer literals are converted. Float literals are left alone.
Examples
========
>>> from __future__ import division
>>> from sympy.interactive.session import int_to_Integer
>>> from sympy import Integer
>>> s = '1.2 + 1/2 - 0x12 + a1'
>>> int_to_Integer(s)
'1.2 +Integer (1 )/Integer (2 )-Integer (0x12 )+a1 '
>>> s = 'print (1/2)'
>>> int_to_Integer(s)
'print (Integer (1 )/Integer (2 ))'
>>> exec(s)
0.5
>>> exec(int_to_Integer(s))
1/2
"""
from tokenize import generate_tokens, untokenize, NUMBER, NAME, OP
from sympy.core.compatibility import StringIO
def _is_int(num):
"""
Returns true if string value num (with token NUMBER) represents an integer.
"""
# XXX: Is there something in the standard library that will do this?
if '.' in num or 'j' in num.lower() or 'e' in num.lower():
return False
return True
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and _is_int(tokval): # replace NUMBER tokens
result.extend([
(NAME, 'Integer'),
(OP, '('),
(NUMBER, tokval),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result)
def enable_automatic_int_sympification(app):
"""
Allow IPython to automatically convert integer literals to Integer.
"""
hasshell = hasattr(app, 'shell')
import ast
if hasshell:
old_run_cell = app.shell.run_cell
else:
old_run_cell = app.run_cell
def my_run_cell(cell, *args, **kwargs):
try:
# Check the cell for syntax errors. This way, the syntax error
# will show the original input, not the transformed input. The
# downside here is that IPython magic like %timeit will not work
# with transformed input (but on the other hand, IPython magic
# that doesn't expect transformed input will continue to work).
ast.parse(cell)
except SyntaxError:
pass
else:
cell = int_to_Integer(cell)
old_run_cell(cell, *args, **kwargs)
if hasshell:
app.shell.run_cell = my_run_cell
else:
app.run_cell = my_run_cell
def enable_automatic_symbols(app):
"""Allow IPython to automatially create symbols (``isympy -a``). """
# XXX: This should perhaps use tokenize, like int_to_Integer() above.
# This would avoid re-executing the code, which can lead to subtle
# issues. For example:
#
# In [1]: a = 1
#
# In [2]: for i in range(10):
# ...: a += 1
# ...:
#
# In [3]: a
# Out[3]: 11
#
# In [4]: a = 1
#
# In [5]: for i in range(10):
# ...: a += 1
# ...: print b
# ...:
# b
# b
# b
# b
# b
# b
# b
# b
# b
# b
#
# In [6]: a
# Out[6]: 12
#
# Note how the for loop is executed again because `b` was not defined, but `a`
# was already incremented once, so the result is that it is incremented
# multiple times.
import re
re_nameerror = re.compile(
"name '(?P<symbol>[A-Za-z_][A-Za-z0-9_]*)' is not defined")
def _handler(self, etype, value, tb, tb_offset=None):
"""Handle :exc:`NameError` exception and allow injection of missing symbols. """
if etype is NameError and tb.tb_next and not tb.tb_next.tb_next:
match = re_nameerror.match(str(value))
if match is not None:
# XXX: Make sure Symbol is in scope. Otherwise you'll get infinite recursion.
self.run_cell("%(symbol)s = Symbol('%(symbol)s')" %
{'symbol': match.group("symbol")}, store_history=False)
try:
code = self.user_ns['In'][-1]
except (KeyError, IndexError):
pass
else:
self.run_cell(code, store_history=False)
return None
finally:
self.run_cell("del %s" % match.group("symbol"),
store_history=False)
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if hasattr(app, 'shell'):
app.shell.set_custom_exc((NameError,), _handler)
else:
# This was restructured in IPython 0.13
app.set_custom_exc((NameError,), _handler)
def init_ipython_session(argv=[], auto_symbols=False, auto_int_to_Integer=False):
"""Construct new IPython session. """
import IPython
if V(IPython.__version__) >= '0.11':
# use an app to parse the command line, and init config
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if V(IPython.__version__) >= '1.0':
from IPython.terminal import ipapp
else:
from IPython.frontend.terminal import ipapp
app = ipapp.TerminalIPythonApp()
# don't draw IPython banner during initialization:
app.display_banner = False
app.initialize(argv)
if auto_symbols:
readline = import_module("readline")
if readline:
enable_automatic_symbols(app)
if auto_int_to_Integer:
enable_automatic_int_sympification(app)
return app.shell
else:
from IPython.Shell import make_IPython
return make_IPython(argv)
def init_python_session():
"""Construct new Python session. """
from code import InteractiveConsole
class SymPyConsole(InteractiveConsole):
"""An interactive console with readline support. """
def __init__(self):
InteractiveConsole.__init__(self)
try:
import readline
except ImportError:
pass
else:
import os
import atexit
readline.parse_and_bind('tab: complete')
if hasattr(readline, 'read_history_file'):
history = os.path.expanduser('~/.sympy-history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
return SymPyConsole()
def init_session(ipython=None, pretty_print=True, order=None,
use_unicode=None, use_latex=None, quiet=False, auto_symbols=False,
auto_int_to_Integer=False, argv=[]):
"""
Initialize an embedded IPython or Python session. The IPython session is
initiated with the --pylab option, without the numpy imports, so that
matplotlib plotting can be interactive.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify;
if False, use sstrrepr to stringify.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: boolean or None
If True, use latex rendering if IPython GUI's;
if False, do not use latex rendering.
quiet: boolean
If True, init_session will not print messages regarding its status;
if False, init_session will print messages regarding its status.
auto_symbols: boolean
If True, IPython will automatically create symbols for you.
If False, it will not.
The default is False.
auto_int_to_Integer: boolean
If True, IPython will automatically wrap int literals with Integer, so
that things like 1/2 give Rational(1, 2).
If False, it will not.
The default is False.
ipython: boolean or None
If True, printing will initialize for an IPython console;
if False, printing will initialize for a normal console;
The default is None, which automatically determines whether we are in
an ipython instance or not.
argv: list of arguments for IPython
See sympy.bin.isympy for options that can be used to initialize IPython.
See Also
========
sympy.interactive.printing.init_printing: for examples and the rest of the parameters.
Examples
========
>>> from sympy import init_session, Symbol, sin, sqrt
>>> sin(x) #doctest: +SKIP
NameError: name 'x' is not defined
>>> init_session() #doctest: +SKIP
>>> sin(x) #doctest: +SKIP
sin(x)
>>> sqrt(5) #doctest: +SKIP
___
\/ 5
>>> init_session(pretty_print=False) #doctest: +SKIP
>>> sqrt(5) #doctest: +SKIP
sqrt(5)
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + x + y**2 + y
>>> init_session(order='grlex') #doctest: +SKIP
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + y**2 + x + y
>>> init_session(order='grevlex') #doctest: +SKIP
>>> y * x**2 + x * y**2 #doctest: +SKIP
x**2*y + x*y**2
>>> init_session(order='old') #doctest: +SKIP
>>> x**2 + y**2 + x + y #doctest: +SKIP
x + y + x**2 + y**2
>>> theta = Symbol('theta') #doctest: +SKIP
>>> theta #doctest: +SKIP
theta
>>> init_session(use_unicode=True) #doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
"""
import sys
in_ipython = False
if ipython is not False:
try:
import IPython
except ImportError:
if ipython is True:
raise RuntimeError("IPython is not available on this system")
ip = None
else:
if V(IPython.__version__) >= '0.11':
try:
ip = get_ipython()
except NameError:
ip = None
else:
ip = IPython.ipapi.get()
if ip:
ip = ip.IP
in_ipython = bool(ip)
if ipython is None:
ipython = in_ipython
if ipython is False:
ip = init_python_session()
mainloop = ip.interact
else:
if ip is None:
ip = init_ipython_session(argv=argv, auto_symbols=auto_symbols,
auto_int_to_Integer=auto_int_to_Integer)
if V(IPython.__version__) >= '0.11':
# runsource is gone, use run_cell instead, which doesn't
# take a symbol arg. The second arg is `store_history`,
# and False means don't add the line to IPython's history.
ip.runsource = lambda src, symbol='exec': ip.run_cell(src, False)
#Enable interactive plotting using pylab.
try:
ip.enable_pylab(import_all=False)
except Exception:
# Causes an import error if matplotlib is not installed.
# Causes other errors (depending on the backend) if there
# is no display, or if there is some problem in the
# backend, so we have a bare "except Exception" here
pass
if not in_ipython:
mainloop = ip.mainloop
readline = import_module("readline")
if auto_symbols and (not ipython or V(IPython.__version__) < '0.11' or not readline):
raise RuntimeError("automatic construction of symbols is possible only in IPython 0.11 or above with readline support")
if auto_int_to_Integer and (not ipython or V(IPython.__version__) < '0.11'):
raise RuntimeError("automatic int to Integer transformation is possible only in IPython 0.11 or above")
_preexec_source = preexec_source
ip.runsource(_preexec_source, symbol='exec')
init_printing(pretty_print=pretty_print, order=order,
use_unicode=use_unicode, use_latex=use_latex, ip=ip)
message = _make_message(ipython, quiet, _preexec_source)
if not in_ipython:
mainloop(message)
sys.exit('Exiting ...')
else:
ip.write(message)
import atexit
atexit.register(lambda ip: ip.write("Exiting ...\n"), ip)
| bsd-3-clause |
sandeepgupta2k4/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/test_util.py | 139 | 1741 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for writing tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class Base(test.TestCase):
"""A class with some useful methods for testing."""
def eval(self, tensors):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
try:
results = sess.run(tensors)
finally:
coord.request_stop()
coord.join(threads)
return results
def assertTensorsEqual(self, tensor_0, tensor_1):
[tensor_0_eval, tensor_1_eval] = self.eval([tensor_0, tensor_1])
self.assertAllEqual(tensor_0_eval, tensor_1_eval)
def assertLabeledTensorsEqual(self, tensor_0, tensor_1):
self.assertEqual(tensor_0.axes, tensor_1.axes)
self.assertTensorsEqual(tensor_0.tensor, tensor_1.tensor)
| apache-2.0 |
garrettcap/Bulletproof-Backup | Python2.7/lib/python2.7/site-packages/setuptools/command/upload_docs.py | 77 | 6975 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
import os
import socket
import zipfile
import tempfile
import sys
import shutil
from base64 import standard_b64encode
from pkg_resources import iter_entry_points
from distutils import log
from distutils.errors import DistutilsOptionError
try:
from distutils.command.upload import upload
except ImportError:
from setuptools.command.upload import upload
from setuptools.compat import httplib, urlparse, unicode, iteritems
_IS_PYTHON3 = sys.version > '3'
if _IS_PYTHON3:
errors = 'surrogateescape'
else:
errors = 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if sys.version_info >= (3,):
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if type(values) != type([]):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = httplib.HTTPConnection(netloc)
elif schema == 'https':
conn = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema "+schema)
data = ''
loglevel = log.INFO
try:
conn.connect()
conn.putrequest("POST", url)
conn.putheader('Content-type',
'multipart/form-data; boundary=%s'%boundary)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error:
e = sys.exc_info()[1]
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-'*75, r.read(), '-'*75)
| gpl-2.0 |
jabibi/sale-workflow | sale_payment_term_interest/__openerp__.py | 15 | 1227 | # -*- coding: utf-8 -*-
#
#
# Authors: Guewen Baconnier
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{'name': 'Sales Payment Term Interests',
'version': '8.0.1.0.0',
'author': 'Camptocamp,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Accounting & Finance',
'depends': ['sale'],
'website': 'http://www.camptocamp.com',
'data': ['data/product_data.xml',
'view/sale_order_view.xml',
'view/account_payment_term_view.xml',
],
'test': [],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
Qalthos/ansible | lib/ansible/modules/windows/win_domain_membership.py | 51 | 3428 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
module: win_domain_membership
short_description: Manage domain/workgroup membership for a Windows host
version_added: '2.3'
description:
- Manages domain membership or workgroup membership for a Windows host. Also supports hostname changes.
- This module may require subsequent use of the M(win_reboot) action if changes are made.
options:
dns_domain_name:
description:
- When C(state) is C(domain), the DNS name of the domain to which the targeted Windows host should be joined.
type: str
domain_admin_user:
description:
- Username of a domain admin for the target domain (required to join or leave the domain).
type: str
required: yes
domain_admin_password:
description:
- Password for the specified C(domain_admin_user).
type: str
hostname:
description:
- The desired hostname for the Windows host.
type: str
domain_ou_path:
description:
- The desired OU path for adding the computer object.
- This is only used when adding the target host to a domain, if it is already a member then it is ignored.
type: str
version_added: "2.4"
state:
description:
- Whether the target host should be a member of a domain or workgroup.
type: str
choices: [ domain, workgroup ]
workgroup_name:
description:
- When C(state) is C(workgroup), the name of the workgroup that the Windows host should be in.
type: str
seealso:
- module: win_domain
- module: win_domain_controller
- module: win_domain_computer
- module: win_domain_group
- module: win_domain_user
- module: win_group
- module: win_group_membership
- module: win_user
author:
- Matt Davis (@nitzmahone)
'''
RETURN = r'''
reboot_required:
description: True if changes were made that require a reboot.
returned: always
type: bool
sample: true
'''
EXAMPLES = r'''
# host should be a member of domain ansible.vagrant; module will ensure the hostname is mydomainclient
# and will use the passed credentials to join domain if necessary.
# Ansible connection should use local credentials if possible.
# If a reboot is required, the second task will trigger one and wait until the host is available.
- hosts: winclient
gather_facts: no
tasks:
- win_domain_membership:
dns_domain_name: ansible.vagrant
hostname: mydomainclient
domain_admin_user: [email protected]
domain_admin_password: password123!
domain_ou_path: "OU=Windows,OU=Servers,DC=ansible,DC=vagrant"
state: domain
register: domain_state
- win_reboot:
when: domain_state.reboot_required
# Host should be in workgroup mywg- module will use the passed credentials to clean-unjoin domain if possible.
# Ansible connection should use local credentials if possible.
# The domain admin credentials can be sourced from a vault-encrypted variable
- hosts: winclient
gather_facts: no
tasks:
- win_domain_membership:
workgroup_name: mywg
domain_admin_user: '{{ win_domain_admin_user }}'
domain_admin_password: '{{ win_domain_admin_password }}'
state: workgroup
'''
| gpl-3.0 |
koditr/xbmc-tr-team-turkish-addons | script.module.html5lib/lib/html5lib/inputstream.py | 618 | 30855 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| gpl-2.0 |
patsissons/Flexget | tests/test_whatcd.py | 13 | 1146 | from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase, use_vcr
class TestInputWhatCD(FlexGetBase):
__yaml__ = """
tasks:
no_fields:
whatcd:
no_user:
whatcd:
password: test
no_pass:
whatcd:
username: test
"""
def test_missing_fields(self):
self.execute_task('no_fields', abort_ok=True)
assert self.task.aborted, 'Task not aborted with no fields present'
self.execute_task('no_user', abort_ok=True)
assert self.task.aborted, 'Task not aborted with no username'
self.execute_task('no_pass', abort_ok=True)
assert self.task.aborted, 'Task not aborted with no password'
class TestWhatCDOnline(FlexGetBase):
__yaml__ = """
tasks:
badlogin:
whatcd:
username: invalid
password: invalid
"""
@use_vcr
def test_invalid_login(self):
self.execute_task("badlogin", abort_ok=True)
assert self.task.aborted, 'Task not aborted with invalid login credentials'
| mit |
madhurrajn/samashthi | lib/gevent/builtins.py | 9 | 4440 | # Copyright (c) 2015 gevent contributors. See LICENSE for details.
"""gevent friendly implementations of builtin functions."""
from __future__ import absolute_import
import imp # deprecated since 3.4; issues PendingDeprecationWarning in 3.5
import sys
import weakref
from gevent.lock import RLock
# Normally we'd have the "expected" case inside the try
# (Python 3, because Python 3 is the way forward). But
# under Python 2, the popular `future` library *also* provides
# a `builtins` module---which lacks the __import__ attribute.
# So we test for the old, deprecated version first
try: # Py2
import __builtin__ as builtins
_allowed_module_name_types = (basestring,)
__target__ = '__builtin__'
except ImportError:
import builtins
_allowed_module_name_types = (str,)
__target__ = 'builtins'
_import = builtins.__import__
# We need to protect imports both across threads and across greenlets.
# And the order matters. Note that under 3.4, the global import lock
# and imp module are deprecated. It seems that in all Py3 versions, a
# module lock is used such that this fix is not necessary.
# We emulate the per-module locking system under Python 2 in order to
# avoid issues acquiring locks in multiple-level-deep imports
# that attempt to use the gevent blocking API at runtime; using one lock
# could lead to a LoopExit error as a greenlet attempts to block on it while
# it's already held by the main greenlet (issue #798).
# We base this approach on a simplification of what `importlib._boonstrap`
# does; notably, we don't check for deadlocks
_g_import_locks = {} # name -> wref of RLock
__lock_imports = True
def __module_lock(name):
# Return the lock for the given module, creating it if necessary.
# It will be removed when no longer needed.
# Nothing in this function yields, so we're multi-greenlet safe
# (But not multi-threading safe.)
# XXX: What about on PyPy, where the GC is asynchronous (not ref-counting)?
# (Does it stop-the-world first?)
lock = None
try:
lock = _g_import_locks[name]()
except KeyError:
pass
if lock is None:
lock = RLock()
def cb(_):
# We've seen a KeyError on PyPy on RPi2
_g_import_locks.pop(name, None)
_g_import_locks[name] = weakref.ref(lock, cb)
return lock
def __import__(*args, **kwargs):
"""
__import__(name, globals=None, locals=None, fromlist=(), level=0) -> object
Normally python protects imports against concurrency by doing some locking
at the C level (at least, it does that in CPython). This function just
wraps the normal __import__ functionality in a recursive lock, ensuring that
we're protected against greenlet import concurrency as well.
"""
if len(args) > 0 and not issubclass(type(args[0]), _allowed_module_name_types):
# if a builtin has been acquired as a bound instance method,
# python knows not to pass 'self' when the method is called.
# No such protection exists for monkey-patched builtins,
# however, so this is necessary.
args = args[1:]
if not __lock_imports:
return _import(*args, **kwargs)
module_lock = __module_lock(args[0]) # Get a lock for the module name
imp.acquire_lock()
try:
module_lock.acquire()
try:
result = _import(*args, **kwargs)
finally:
module_lock.release()
finally:
imp.release_lock()
return result
def _unlock_imports():
"""
Internal function, called when gevent needs to perform imports
lazily, but does not know the state of the system. It may be impossible
to take the import lock because there are no other running greenlets, for
example. This causes a monkey-patched __import__ to avoid taking any locks.
until the corresponding call to lock_imports. This should only be done for limited
amounts of time and when the set of imports is statically known to be "safe".
"""
global __lock_imports
# This could easily become a list that we push/pop from or an integer
# we increment if we need to do this recursively, but we shouldn't get
# that complex.
__lock_imports = False
def _lock_imports():
global __lock_imports
__lock_imports = True
if sys.version_info[:2] >= (3, 3):
__implements__ = []
else:
__implements__ = ['__import__']
__all__ = __implements__
| bsd-3-clause |
madhusudancs/test-infra | gubernator/github/classifier_test.py | 2 | 10021 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import classifier
class DeduperTest(unittest.TestCase):
@staticmethod
def dedup(obj):
return classifier.Deduper().dedup(obj)
def test_types(self):
a = (u'foo', 2, {'bar': ['foo', 'bar']})
self.assertEqual(self.dedup(a), a)
def test_dedupe(self):
# Python interns strings in structs, so...
a = ['foo', 'foo']
self.assertIs(a[0], a[1])
# Use json.loads to get around it
b = json.loads('["foo", "foo"]')
self.assertIsNot(b[0], b[1])
# When deduplicated, the strings are now the same object.
c = self.dedup(b)
self.assertIs(c[0], c[1])
class MergedTest(unittest.TestCase):
def test_merged(self):
self.assertEqual(classifier.get_merged(zip('abcd', [
{'issue': {'n': 1, 'a': 2}},
{'pull_request': {'n': 2, 'b': 3}},
{'c': 4},
{'issue': {'n': 3, 'd': 4},
'pull_request': {'n': 4, 'e': 5}}
], [0] * 4)), {'n': 4, 'a': 2, 'b': 3, 'd': 4, 'e': 5})
def diffs_to_events(*diffs):
events = []
for diff in diffs:
label = {'name': diff[1:], 'color': '#fff'}
if diff[0] == '+':
action = 'labeled'
elif diff[0] == '-':
action = 'unlabeled'
events.append(('pull_request',
{'action': action,
'label': label}, 0))
return events
class LabelsTest(unittest.TestCase):
def expect_labels(self, events, names):
labels = classifier.get_labels(events)
self.assertEqual(sorted(labels.keys()), sorted(names))
def test_empty(self):
self.expect_labels([('comment', {'body': 'no labels here'}, 0)], [])
def test_colors(self):
self.assertEqual(classifier.get_labels(
[('c', {'issue':
{'labels': [{'name': 'foo', 'color': '#abc'}]}
}, 0)]),
{'foo': '#abc'})
def test_labeled_action(self):
self.expect_labels(diffs_to_events('+a'), ['a'])
self.expect_labels(diffs_to_events('+a', '+a'), ['a'])
self.expect_labels(diffs_to_events('+a', '-a'), [])
self.expect_labels(diffs_to_events('+a', '+b', '-c', '-b'), ['a'])
def test_issue_overrides_action(self):
labels = [{'name': 'x', 'color': 'y'}]
self.expect_labels(diffs_to_events('+a') +
[('other_event', {'issue': {'labels': labels}}, 0)], ['x'])
def test_labeled_action_missing_label(self):
self.expect_labels([('pull_request', {'action': 'labeled'}, 0)], [])
def make_comment_event(num, name, msg='', event='issue_comment',
action='created', ts=None):
return event, {
'action': action,
'sender': {'login': name},
'comment': {
'id': num,
'user': {'login': name},
'body': msg,
'created_at': ts,
}
}, ts
class CalculateTest(unittest.TestCase):
def test_classify(self):
# A quick integration test to ensure that all the sub-parts are included.
# If this test fails, a smaller unit test SHOULD fail as well.
self.assertEqual(classifier.classify([
('pull_request', {
'pull_request': {
'state': 'open',
'user': {'login': 'a'},
'assignees': [{'login': 'b'}],
'title': 'some fix',
'head': {'sha': 'abcdef'},
'additions': 1,
'deletions': 1,
}
}, 1),
make_comment_event(1, 'k8s-bot',
'failure in https://k8s-gubernator.appspot.com/build/bucket/job/123/', ts=2),
('pull_request', {
'action': 'labeled',
'label': {'name': 'release-note-none', 'color': 'orange'},
}, 3)
], {'e2e': ['failure', None, 'stuff is broken']}
),
(True, True, ['a', 'b'],
{
'author': 'a',
'assignees': ['b'],
'additions': 1,
'deletions': 1,
'attn': {'a': 'fix tests', 'b': 'needs review#0#0'},
'title': 'some fix',
'labels': {'release-note-none': 'orange'},
'head': 'abcdef',
'needs_rebase': False,
'status': {'e2e': ['failure', None, 'stuff is broken']},
'xrefs': ['/bucket/job/123'],
}))
def test_distill(self):
self.assertEqual(classifier.distill_events([
make_comment_event(1, 'a', ts=1),
make_comment_event(2, 'b', ts=2),
make_comment_event(1, 'a', action='deleted', ts=3),
make_comment_event(3, 'c', event='pull_request_review_comment', ts=4),
make_comment_event(4, 'k8s-bot', ts=4),
('pull_request', {'action': 'synchronize', 'sender': {'login': 'auth'}}, 5),
('pull_request', {'action': 'labeled', 'sender': {'login': 'rev'},
'label': {'name': 'lgtm'}}, 6),
]),
[
('comment', 'b', 2),
('comment', 'c', 4),
('push', 'auth', 5),
('label lgtm', 'rev', 6),
])
def test_calculate_attention(self):
def expect(payload, events, expected_attn):
self.assertEqual(classifier.calculate_attention(events, payload),
expected_attn)
def make_payload(author, assignees=None, labels=None, **kwargs):
ret = {'author': author, 'assignees': assignees or [], 'labels': labels or []}
ret.update(kwargs)
return ret
expect(make_payload('alpha', needs_rebase=True), [],
{'alpha': 'needs rebase'})
expect(make_payload('beta', labels={'release-note-label-needed'}), [],
{'beta': 'needs release-note label'})
expect(make_payload('gamma', status={'ci': ['failure', '', '']}), [],
{'gamma': 'fix tests'})
expect(make_payload('gamma', status={'ci': ['failure', '', '']}),
[('comment', 'other', 1)],
{'gamma': 'address comments#1#1'})
expect(make_payload('delta', ['epsilon']), [],
{'epsilon': 'needs review#0#0'})
expect(make_payload('alpha', ['alpha']), [('comment', 'other', 1)],
{'alpha': 'address comments#1#1'})
def test_author_state(self):
def expect(events, result):
self.assertEqual(classifier.get_author_state('author', events),
result)
expect([], ('waiting', 0, 0))
expect([('comment', 'author', 1)], ('waiting', 0, 0))
expect([('comment', 'other', 1)], ('address comments', 1, 1))
expect([('comment', 'other', 1), ('push', 'author', 2)], ('waiting', 2, 2))
expect([('comment', 'other', 1), ('comment', 'author', 2)], ('waiting', 2, 2))
expect([('comment', 'other', 1), ('comment', 'other', 2)], ('address comments', 1, 2))
def test_assignee_state(self):
def expect(events, result):
self.assertEqual(classifier.get_assignee_state('me', 'author', events),
result)
expect([], ('needs review', 0, 0))
expect([('comment', 'other', 1)], ('needs review', 0, 0))
expect([('comment', 'me', 1)], ('waiting', 1, 1))
expect([('label lgtm', 'other', 1)], ('needs review', 0, 0))
expect([('label lgtm', 'me', 1)], ('waiting', 1, 1))
expect([('comment', 'me', 1), ('push', 'author', 2)], ('needs review', 2, 2))
expect([('comment', 'me', 1), ('comment', 'author', 2)], ('needs review', 2, 2))
expect([('comment', 'me', 1), ('comment', 'author', 2), ('comment', 'author', 3)],
('needs review', 2, 3))
def test_xrefs(self):
def expect(body, comments, result):
self.assertEqual(result, classifier.get_xrefs(
[{'comment': c} for c in comments], {'body': body}))
def fail(path):
return 'foobar https://k8s-gubernator.appspot.com/build%s asdf' % path
expect(None, [], [])
expect('something', [], [])
expect(fail('/a/b/34/'), [], ['/a/b/34'])
expect(None, [fail('/a/b/34/')], ['/a/b/34'])
expect(fail('/a/b/34/'), [fail('/a/b/34]')], ['/a/b/34'])
expect(fail('/a/b/34/)'), [fail('/a/b/35]')], ['/a/b/34', '/a/b/35'])
class CommentsTest(unittest.TestCase):
def test_basic(self):
self.assertEqual(classifier.get_comments([make_comment_event(1, 'aaa', 'msg', ts=2016)]),
[{'author': 'aaa', 'comment': 'msg', 'timestamp': 2016}])
def test_deleted(self):
self.assertEqual(classifier.get_comments([
make_comment_event(1, 'aaa', 'msg', 2016),
make_comment_event(1, None, None, None, action='deleted'),
make_comment_event(2, '', '', '', action='deleted')]),
[])
def test_edited(self):
self.assertEqual(classifier.get_comments([
make_comment_event(1, 'aaa', 'msg', ts=2016),
make_comment_event(1, 'aaa', 'redacted', ts=2016.1, action='edited')]),
[{'author': 'aaa', 'comment': 'redacted', 'timestamp': 2016.1}])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dxwu/BinderFilter | resources/android-toolchain-16/lib/python2.7/lib2to3/fixes/fix_itertools.py | 148 | 1549 | """ Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in (u'ifilterfalse', u'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node wich contains ('.', 'function') with the
# function (to be consistant with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
| mit |
WillieMaddox/numpy | numpy/f2py/crackfortran.py | 44 | 126845 | #!/usr/bin/env python
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files,funcs)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
from __future__ import division, absolute_import, print_function
import sys
import string
import fileinput
import re
import os
import copy
import platform
from . import __version__
# The eviroment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
f2py_version = __version__.version
# Global flags:
strictf77 = 1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform = 'fix' # 'fix','free'
quiet = 0 # Be verbose if 0 (Obsolete: not used any more)
verbose = 1 # Be quiet if 0, extra verbose if > 1.
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0 # for old F77 programs without 'program' statement
ignorecontains = 1
dolowercase = 1
debug = []
# Global variables
beginpattern = ''
currentfilename = ''
expectbegin = 1
f90modulevars = {}
filepositiontext = ''
gotnextfile = 1
groupcache = None
groupcounter = 0
grouplist = {groupcounter: []}
groupname = ''
include_paths = []
neededmodule = -1
onlyfuncs = []
previous_context = None
skipblocksuntil = -1
skipfuncs = []
skipfunctions = []
usermodules = []
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin
global skipblocksuntil, usermodules, f90modulevars, gotnextfile
global filepositiontext, currentfilename, skipfunctions, skipfuncs
global onlyfuncs, include_paths, previous_context
global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename
global f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter: []}
neededmodule = -1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
def outmess(line, flag=1):
global filepositiontext
if not verbose:
return
if not quiet:
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE = 50
defaultimplicitrules = {}
for c in "abcdefghopqrstuvwxyz$_":
defaultimplicitrules[c] = {'typespec': 'real'}
for c in "ijklmn":
defaultimplicitrules[c] = {'typespec': 'integer'}
del c
badnames = {}
invbadnames = {}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n] = n + '_bn'
invbadnames[n + '_bn'] = n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n' %
(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names):
return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'
% (name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names):
return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i = name.rfind('.')
if i == -1:
return ''
if '\\' in name[i:]:
return ''
if '/' in name[i:]:
return ''
return name[i + 1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open(file, 'r')
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n > 0 and line:
if line[0] != '!' and line.strip():
n -= 1
if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
result = 1
break
line = f.readline()
f.close()
return result
# Read fortran (77,90) code
def readfortrancode(ffile, dowithline=show, istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77
global beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile == []:
return
localdolowercase = dolowercase
cont = 0
finalline = ''
ll = ''
commentline = re.compile(
r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)')
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
cont2 = re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop:
dowithline('', -1)
ll, l1 = '', ''
spacedigits = [' '] + [str(_m) for _m in range(10)]
filepositiontext = ''
fin = fileinput.FileInput(ffile)
while True:
l = fin.readline()
if not l:
break
if fin.isfirstline():
filepositiontext = ''
currentfilename = fin.filename()
gotnextfile = 1
l1 = l
strictf77 = 0
sourcecodeform = 'fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77 = 1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform = 'free'
if strictf77:
beginpattern = beginpattern77
else:
beginpattern = beginpattern90
outmess('\tReading file %s (format:%s%s)\n'
% (repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l = l.expandtabs().replace('\xa0', ' ')
# Get rid of newline characters
while not l == '':
if l[-1] not in "\n\r\f":
break
l = l[:-1]
if not strictf77:
r = commentline.match(l)
if r:
l = r.group('line') + ' ' # Strip comments starting with `!'
rl = r.group('rest')
if rl[:4].lower() == 'f2py': # f2py directive
l = l + 4 * ' '
r = commentline.match(rl[4:])
if r:
l = l + r.group('line')
else:
l = l + rl[4:]
if l.strip() == '': # Skip empty line
cont = 0
continue
if sourcecodeform == 'fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower() == 'f2py': # f2py directive
l = ' ' + l[5:]
else: # Skip comment line
cont = 0
continue
elif strictf77:
if len(l) > 72:
l = l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):
# Continuation of a previous line
ll = ll + l[6:]
finalline = ''
origfinalline = ''
else:
if not strictf77:
# F90 continuation
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
elif sourcecodeform == 'free':
if not cont and ext == '.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess(
'Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
raise ValueError(
"Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform))
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [
os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1 = ll
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext = ''
fin.close()
if istop:
dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase = saveglobals
# Crack line
beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \
r'\s*(?P<this>(\b(%s)\b))' + \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes = 'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern = re.compile(
beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit = re.compile(beforethisafter % (
'', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)
#
functionpattern = re.compile(beforethisafter % (
'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern = re.compile(beforethisafter % (
'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77 = r'program|block\s*data'
beginpattern77 = re.compile(
beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90 = groupbegins77 + \
r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90 = re.compile(
beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, '[\w\s]*'), re.I), 'end'
# endifs='end\s*(if|do|where|select|while|forall)'
endifs = '(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern = re.compile(
beforethisafter % ('[\w]*?', endifs, endifs, '[\w\s]*'), re.I), 'endif'
#
implicitpattern = re.compile(
beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern = re.compile(beforethisafter % (
'', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern = re.compile(
beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern = re.compile(
beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern = re.compile(
beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'
publicpattern = re.compile(
beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
privatepattern = re.compile(
beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
intrisicpattern = re.compile(
beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic'
intentpattern = re.compile(beforethisafter % (
'', 'intent|depend|note|check', 'intent|depend|note|check', '\s*\(.*?\).*'), re.I), 'intent'
parameterpattern = re.compile(
beforethisafter % ('', 'parameter', 'parameter', '\s*\(.*'), re.I), 'parameter'
datapattern = re.compile(
beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'
callpattern = re.compile(
beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'
entrypattern = re.compile(
beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern = re.compile(
beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern = re.compile(
beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'
usepattern = re.compile(
beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'
containspattern = re.compile(
beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern = re.compile(
beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'
# Non-fortran and f2py-specific statements
f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',
'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'
multilinepattern = re.compile(
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I)
def crackline(line, reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occured
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
if ';' in line and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
for l in line.split(';'):
# XXX: non-zero reset values need testing
assert reset == 0, repr(reset)
crackline(l, reset)
return
if reset < 0:
groupcounter = 0
groupname = {groupcounter: ''}
groupcache = {groupcounter: {}}
grouplist = {groupcounter: []}
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = ''
groupcache[groupcounter]['name'] = ''
neededmodule = -1
skipblocksuntil = -1
return
if reset > 0:
fl = 0
if f77modulename and neededmodule == groupcounter:
fl = 2
while groupcounter > fl:
outmess('crackline: groupcounter=%s groupname=%s\n' %
(repr(groupcounter), repr(groupname)))
outmess(
'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if f77modulename and neededmodule == groupcounter:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end module
neededmodule = -1
return
if line == '':
return
flag = 0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrisicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag = flag + 1
if not m:
re_1 = crackline_re_1
if 0 <= skipblocksuntil <= groupcounter:
return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name = invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1 = re.match(
r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line = 'callfun %s(%s) result (%s)' % (
name, a, m2.group('result'))
else:
line = 'callfun %s(%s)' % (name, a)
m = callfunpattern[0].match(line)
if not m:
outmess(
'crackline: could not resolve function call for line=%s.\n' % repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n' % (groupcounter))
return
elif pat[1] == 'end':
if 0 <= skipblocksuntil < groupcounter:
groupcounter = groupcounter - 1
if skipblocksuntil <= groupcounter:
return
if groupcounter <= 0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.'
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this') == groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' %
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil == groupcounter:
skipblocksuntil = -1
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if not skipemptyends:
expectbegin = 1
elif pat[1] == 'begin':
if 0 <= skipblocksuntil <= groupcounter:
groupcounter = groupcounter + 1
return
gotnextfile = 0
analyzeline(m, pat[1], line)
expectbegin = 0
elif pat[1] == 'endif':
pass
elif pat[1] == 'contains':
if ignorecontains:
return
if 0 <= skipblocksuntil <= groupcounter:
return
skipblocksuntil = groupcounter
else:
if 0 <= skipblocksuntil <= groupcounter:
return
analyzeline(m, pat[1], line)
def markouterparen(line):
l = ''
f = 0
for c in line:
if c == '(':
f = f + 1
if f == 1:
l = l + '@(@'
continue
elif c == ')':
f = f - 1
if f == 0:
l = l + '@)@'
continue
l = l + c
return l
def markoutercomma(line, comma=','):
l = ''
f = 0
cc = ''
for c in line:
if (not cc or cc == ')') and c == '(':
f = f + 1
cc = ')'
elif not cc and c == '\'' and (not l or l[-1] != '\\'):
f = f + 1
cc = '\''
elif c == cc:
f = f - 1
if f == 0:
cc = ''
elif c == comma and f == 0:
l = l + '@' + comma + '@'
continue
l = l + c
assert not f, repr((f, line, l, cc))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl, decl2, force=1):
if not decl:
decl = {}
if not decl2:
return decl
if decl is decl2:
return decl
for k in list(decl2.keys()):
if k == 'typespec':
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'attrspec':
for l in decl2[k]:
decl = setattrspec(decl, l, force)
elif k == 'kindselector':
decl = setkindselector(decl, decl2[k], force)
elif k == 'charselector':
decl = setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'note':
pass
elif k in ['intent', 'check', 'dimension', 'optional', 'required']:
errmess('appenddecl: "%s" not implemented.\n' % k)
else:
raise Exception('appenddecl: Unknown variable definition key:' +
str(k))
return decl
selectpattern = re.compile(
r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
nameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
callnameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(
r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(
r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1 = nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
m1 = callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
global groupcounter, groupname, groupcache, grouplist, filepositiontext
global currentfilename, f77modulename, neededinterface, neededmodule
global expectbegin, gotnextfile, previous_context
block = m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter < 1:
newname = os.path.basename(currentfilename).split('.')[0]
outmess(
'analyzeline: no group yet. Creating program group with name "%s".\n' % newname)
gotnextfile = 0
groupcounter = groupcounter + 1
groupname[groupcounter] = 'program'
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = 'program'
groupcache[groupcounter]['name'] = newname
groupcache[groupcounter]['from'] = 'fromsky'
expectbegin = 0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I):
block = 'block data'
if re.match(r'python\s*module', block, re.I):
block = 'python module'
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is None:
if block == 'block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
if '' in args:
while '' in args:
args.remove('')
outmess(
'analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule = 0
needinterface = 0
if case in ['call', 'callfun']:
needinterface = 1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name'] == name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block = {'call': 'subroutine', 'callfun': 'function'}[case]
if f77modulename and neededmodule == -1 and groupcounter <= 1:
neededmodule = groupcounter + 2
needmodule = 1
if block != 'interface':
needinterface = 1
# Create new block(s)
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needmodule:
if verbose > 1:
outmess('analyzeline: Creating module block %s\n' %
repr(f77modulename), 0)
groupname[groupcounter] = 'module'
groupcache[groupcounter]['block'] = 'python module'
groupcache[groupcounter]['name'] = f77modulename
groupcache[groupcounter]['from'] = ''
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needinterface:
if verbose > 1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (
groupcounter), 0)
groupname[groupcounter] = 'interface'
groupcache[groupcounter]['block'] = 'interface'
groupcache[groupcounter]['name'] = 'unknown_interface'
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupname[groupcounter] = block
groupcache[groupcounter]['block'] = block
if not name:
name = 'unknown_' + block
groupcache[groupcounter]['prefix'] = m.group('before')
groupcache[groupcounter]['name'] = rmbadname1(name)
groupcache[groupcounter]['result'] = result
if groupcounter == 1:
groupcache[groupcounter]['from'] = currentfilename
else:
if f77modulename and groupcounter == 3:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], currentfilename)
else:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args'] = args
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['entry'] = {}
# end of creation
if block == 'type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter - 2]['externals']:
groupcache[groupcounter - 2]['externals'].append(name)
groupcache[groupcounter]['vars'] = copy.deepcopy(
groupcache[groupcounter - 2]['vars'])
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except:
pass
if block in ['function', 'subroutine']: # set global attributes
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
except:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name == result:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
except:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
if t:
typespec, selector, attr, edecl = cracktypespec0(
t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end routine
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
elif case == 'entry':
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case == 'type':
typespec, selector, attr, edecl = cracktypespec0(
block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']:
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()
i = ll.find('::')
if i < 0 and case == 'intent':
i = markouterparen(ll).find('@)@') - 2
ll = ll[:i + 1] + '::' + ll[i + 1:]
i = ll.find('::')
if ll[i:] == '::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n' %
(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i < 0:
i = 0
pl = ''
else:
pl = ll[:i].strip()
ll = ll[i + 2:]
ch = markoutercomma(pl).split('@,@')
if len(ch) > 1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (
','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1 = namepattern.match(e)
if not m1:
if case in ['public', 'private']:
k = ''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % (
case, repr(e)))
continue
else:
k = rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k] = {}
if case == 'dimension':
ap = case + m1.group('after')
if case == 'intent':
ap = m.group('this') + pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter > 1:
if '__user__' not in groupcache[groupcounter - 2]['name']:
outmess(
'analyzeline: missing __user__ module (could be nothing)\n')
# fixes ticket 1693
if k != groupcache[groupcounter]['name']:
outmess('analyzeline: appending intent(callback) %s'
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess(
'analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'
' in argument list' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']:
ap = case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec'] = [ap]
if case == 'external':
if groupcache[groupcounter]['block'] == 'program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'parameter':
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
except:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
params = get_parameters(edecl)
k = rmbadname1(k)
if k not in edecl:
edecl[k] = {}
if '=' in edecl[k] and (not edecl[k]['='] == initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % (
k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec') == 'real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec') == 'complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else:
edecl[k]['attrspec'] = ['parameter']
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'implicit':
if m.group('after').strip().lower() == 'none':
groupcache[groupcounter]['implicit'] = None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl = groupcache[groupcounter]['implicit']
else:
impl = {}
if impl is None:
outmess(
'analyzeline: Overwriting earlier "implicit none" statement.\n')
impl = {}
for e in markoutercomma(m.group('after')).split('@,@'):
decl = {}
m1 = re.match(
r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess(
'analyzeline: could not extract info of implicit statement part "%s"\n' % (e))
continue
m2 = typespattern4implicit.match(m1.group('this'))
if not m2:
outmess(
'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e))
continue
typespec, selector, attr, edecl = cracktypespec0(
m2.group('this'), m2.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
decl['typespec'] = typespec
decl['kindselector'] = kindselect
decl['charselector'] = charselect
decl['typename'] = typename
for k in list(decl.keys()):
if not decl[k]:
del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
except:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
else:
begc = endc = r.strip()
if not len(begc) == len(endc) == 1:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r)
continue
for o in range(ord(begc), ord(endc) + 1):
impl[chr(o)] = decl
groupcache[groupcounter]['implicit'] = impl
elif case == 'data':
ll = []
dl = ''
il = ''
f = 0
fc = 1
inp = 0
for c in m.group('after'):
if not inp:
if c == "'":
fc = not fc
if c == '/' and fc:
f = f + 1
continue
if c == '(':
inp = inp + 1
elif c == ')':
inp = inp - 1
if f == 0:
dl = dl + c
elif f == 1:
il = il + c
elif f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl = c
il = ''
f = 0
if f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars = {}
if 'vars' in groupcache[groupcounter]:
vars = groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l = [x.strip() for x in l]
if l[0][0] == ',':
l[0] = l[0][1:]
if l[0][0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0])
continue
i = 0
j = 0
llen = len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for
# wrapping.
continue
fc = 0
while (i < llen) and (fc or not l[1][i] == ','):
if l[1][i] == "'":
fc = not fc
i = i + 1
i = i + 1
if v not in vars:
vars[v] = {}
if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (
v, vars[v]['='], l[1][j:i - 1]))
vars[v]['='] = l[1][j:i - 1]
j = i
last_name = v
groupcache[groupcounter]['vars'] = vars
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'common':
line = m.group('after').strip()
if not line[0] == '/':
line = '//' + line
cl = []
f = 0
bn = ''
ol = ''
for c in line:
if c == '/':
f = f + 1
continue
if f >= 3:
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
f = f - 2
bn = ''
ol = ''
if f % 2:
bn = bn + c
else:
ol = ol + c
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
commonkey = {}
if 'common' in groupcache[groupcounter]:
commonkey = groupcache[groupcounter]['common']
for c in cl:
if c[0] not in commonkey:
commonkey[c[0]] = []
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i:
commonkey[c[0]].append(i)
groupcache[groupcounter]['common'] = commonkey
previous_context = ('common', bn, groupcounter)
elif case == 'use':
m1 = re.match(
r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm = m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use'] = {}
name = m1.group('name')
groupcache[groupcounter]['use'][name] = {}
isonly = 0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly = 1
groupcache[groupcounter]['use'][name]['only'] = isonly
ll = [x.strip() for x in mm['list'].split(',')]
rl = {}
for l in ll:
if '=' in l:
m2 = re.match(
r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I)
if m2:
rl[m2.group('local').strip()] = m2.group(
'use').strip()
else:
outmess(
'analyzeline: Not local=>use pattern found in %s\n' % repr(l))
else:
rl[l] = l
groupcache[groupcounter]['use'][name]['map'] = rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this') == 'usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case == 'multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose > 1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector = None
attr = None
if re.match(r'double\s*complex', typespec, re.I):
typespec = 'double complex'
elif re.match(r'double\s*precision', typespec, re.I):
typespec = 'double precision'
else:
typespec = typespec.strip().lower()
m1 = selectpattern.match(markouterparen(ll))
if not m1:
outmess(
'cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d = m1.groupdict()
for k in list(d.keys()):
d[k] = unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector = d['this']
ll = d['after']
i = ll.find('::')
if i >= 0:
attr = ll[:i].strip()
ll = ll[i + 2:]
return typespec, selector, attr, ll
#####
namepattern = re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector = re.compile(
r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector = re.compile(
r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern = re.compile(
r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I)
lenarraypattern = re.compile(
r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr = expr.strip()
if len(expr) <= 1:
return expr
expr2 = expr[0]
for i in range(1, len(expr) - 1):
if (expr[i] == ' ' and
((expr[i + 1] in "()[]{}=+-/* ") or
(expr[i - 1] in "()[]{}=+-/* "))):
continue
expr2 = expr2 + expr[i]
expr2 = expr2 + expr[-1]
return expr2
def markinnerspaces(line):
l = ''
f = 0
cc = '\''
cb = ''
for c in line:
if cb == '\\' and c in ['\\', '\'', '"']:
l = l + c
cb = c
continue
if f == 0 and c in ['\'', '"']:
cc = c
if c == cc:
f = f + 1
elif c == cc:
f = f - 1
elif c == ' ' and f == 1:
l = l + '@_@'
continue
l = l + c
cb = c
return l
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename = cracktypespec(typespec, selector)
if attrspec:
attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1 = []
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1:
el1.append(e1.replace('@_@', ' '))
for e in el1:
m = namepattern.match(e)
if not m:
outmess(
'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e)))
continue
ename = rmbadname1(m.group('name'))
edecl = {}
if ename in groupcache[groupcounter]['vars']:
edecl = groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec'] = typespec
elif typespec and (not typespec == edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector'] = copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['kindselector'][k], kindselect[k]))
else:
edecl['kindselector'][k] = copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector'] = charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n'
% (ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['charselector'][k], charselect[k]))
else:
edecl['charselector'][k] = copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename'] = typename
elif typename and (not edecl['typename'] == typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec'] = copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec'] = copy.copy(typespec)
edecl['kindselector'] = copy.copy(kindselect)
edecl['charselector'] = copy.copy(charselect)
edecl['typename'] = typename
edecl['attrspec'] = copy.copy(attrspec)
if m.group('after'):
m1 = lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1 = m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk + '2'] is not None:
d1[lk] = d1[lk + '2']
del d1[lk + '2']
for k in list(d1.keys()):
if d1[k] is not None:
d1[k] = unmarkouterparen(d1[k])
else:
del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len'] == '':
d1['len'] = d1['array']
del d1['array']
else:
d1['array'] = d1['array'] + ',' + d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (
typespec, e, typespec, ename, d1['array']))
if 'array' in d1:
dm = 'dimension(%s)' % d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec'] = [dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9] == 'dimension' and dm1 != dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n'
% (ename, dm1, dm))
break
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector'] = {}
edecl['kindselector']['*'] = d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector'] = {}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['='] == d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['='], d1['init']))
else:
edecl['='] = d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (
ename + m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename] = edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect = None
charselect = None
typename = None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect = kindselector.match(selector)
if not kindselect:
outmess(
'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector)))
return
kindselect = kindselect.groupdict()
kindselect['*'] = kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]:
del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec == 'character':
charselect = charselector.match(selector)
if not charselect:
outmess(
'cracktypespec: no charselector pattern found for %s\n' % (repr(selector)))
return
charselect = charselect.groupdict()
charselect['*'] = charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind = lenkindpattern.match(
markoutercomma(charselect['lenkind']))
lenkind = lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk + '2']:
lenkind[lk] = lenkind[lk + '2']
charselect[lk] = lenkind[lk]
del lenkind[lk + '2']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]:
del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec == 'type':
typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename:
typename = typename.group('name')
else:
outmess('cracktypespec: no typename found in %s\n' %
(repr(typespec + selector)))
else:
outmess('cracktypespec: no selector used for %s\n' %
(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl, attr, force=0):
if not decl:
decl = {}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec'] = [attr]
return decl
if force:
decl['attrspec'].append(attr)
if attr in decl['attrspec']:
return decl
if attr == 'static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'public' and 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'private' and 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k] = sel[k]
return decl
def setcharselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k] = sel[k]
return decl
def getblockname(block, unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
# post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
except:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' %
(usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'
' value from module %s' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block, tab='', param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = []
for g in block:
g = postcrack2(g, tab=tab + '\t', param_map=param_map)
ret.append(g)
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = []
for b in block['body']:
b = postcrack2(b, tab=tab + '\t', param_map=param_map)
new_body.append(b)
block['body'] = new_body
return block
def postcrack(block, args=None, tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret = []
uret = []
for g in block:
setmesstext(g)
g = postcrack(g, tab=tab + '\t')
# sort user routines to appear first
if 'name' in g and '__user__' in g['name']:
uret.append(g)
else:
gret.append(g)
return uret + gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' +
str(block))
if 'name' in block and not block['name'] == 'unknown_interface':
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
block = analyzeargs(block)
block = analyzecommon(block)
block['vars'] = analyzevars(block)
block['sortvars'] = sortvarnames(block['vars'])
if 'args' in block and block['args']:
args = block['args']
block['body'] = analyzebody(block, args, tab=tab)
userisdefined = []
if 'use' in block:
useblock = block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
else:
useblock = {}
name = ''
if 'name' in block:
name = block['name']
# and not userisdefined: # Build a __user__ module
if 'externals' in block and block['externals']:
interfaced = []
if 'interfaced' in block:
interfaced = block['interfaced']
mvars = copy.copy(block['vars'])
if name:
mname = name + '__user__routines'
else:
mname = 'unknown__user__routines'
if mname in userisdefined:
i = 1
while '%s_%i' % (mname, i) in userisdefined:
i = i + 1
mname = '%s_%i' % (mname, i)
interface = {'block': 'interface', 'body': [],
'vars': {}, 'name': name + '_user_interface'}
for e in block['externals']:
if e in interfaced:
edef = []
j = -1
for b in block['body']:
j = j + 1
if b['block'] == 'interface':
i = -1
for bb in b['body']:
i = i + 1
if 'name' in bb and bb['name'] == e:
edef = copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']:
del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e] = mvars[e]
if interface['vars'] or interface['body']:
block['interfaced'] = interfaced
mblock = {'block': 'python module', 'body': [
interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']}
useblock[mname] = {}
usermodules.append(mblock)
if useblock:
block['use'] = useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
else:
indep.append(v)
n = len(dep)
i = 0
while dep: # XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:] + [v]
i = i + 1
if i > n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+ ', '.join(dep) + '\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
return indep
def analyzecommon(block):
if not hascommon(block):
return block
commonvars = []
for k in list(block['common'].keys()):
comvars = []
for e in block['common'][k]:
m = re.match(
r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims = []
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
n = m.group('name').strip()
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
'dimension(%s)' % (','.join(dims)))
else:
block['vars'][n]['attrspec'] = [
'dimension(%s)' % (','.join(dims))]
else:
if dims:
block['vars'][n] = {
'attrspec': ['dimension(%s)' % (','.join(dims))]}
else:
block['vars'][n] = {}
if n not in commonvars:
commonvars.append(n)
else:
n = e
errmess(
'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k))
comvars.append(n)
block['common'][k] = comvars
if 'commonvars' not in block:
block['commonvars'] = commonvars
else:
block['commonvars'] = block['commonvars'] + commonvars
return block
def analyzebody(block, args, tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
body = []
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_ = b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(
b, '\n' + ' ' * 6, as_interface=True)
else:
as_ = args
b = postcrack(b, as_, tab=tab + '\t')
if b['block'] == 'interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '') == 'pythonmodule':
usermodules.append(b)
else:
if b['block'] == 'module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules = defaultimplicitrules
attrrules = {}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules = None
if verbose > 1:
outmess(
'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k] = block['implicit'][k]
else:
attrrules[k] = block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e, g=None, l=None):
r = eval(e, g, l)
if type(r) in [type(0), type(0.0)]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x) > len_e:
continue
if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
except:
pass
break
return None, None, None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl, args, star='*'):
edl = []
try:
edl.append(myeval(dl[0], {}, {}))
except:
edl.append(dl[0])
try:
edl.append(myeval(dl[1], {}, {}))
except:
edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1 - edl[0]
if p1 == 0:
d = str(dl[1])
elif p1 < 0:
d = '%s-%s' % (dl[1], -p1)
else:
d = '%s+%s' % (dl[1], p1)
elif isinstance(edl[1], int):
p1 = 1 + edl[1]
if p1 == 0:
d = '-(%s)' % (dl[0])
else:
d = '%s-(%s)' % (p1, dl[0])
else:
d = '%s-(%s)+1' % (dl[1], dl[0])
try:
return repr(myeval(d, {}, {})), None, None
except:
pass
d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
if (d1[0], d2[0]) == (0, 0):
return repr(d2[1] - d1[1] + 1), None, None
b = d2[1] - d1[1] + 1
d1 = (d1[0], 0, d1[2])
d2 = (d2[0], b, d2[2])
if d1[0] == 0 and d2[2] in args:
if b < 0:
return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0])
elif b:
return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0])
else:
return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0])
if d2[0] == 0 and d1[2] in args:
if b < 0:
return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0])
elif b:
return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0])
else:
return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0])
if d1[2] == d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a:
return repr(b), None, None
if b < 0:
return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a)
elif b:
return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a)
else:
return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a)
if d1[0] == d2[0] == 1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)' % c
if b == 0:
d = '%s-%s' % (d2[2], c)
elif b < 0:
d = '%s-%s-%s' % (d2[2], c, -b)
else:
d = '%s-%s+%s' % (d2[2], c, b)
elif d1[0] == 0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = c2
elif b < 0:
d = '%s-%s' % (c2, -b)
else:
d = '%s+%s' % (c2, b)
elif d2[0] == 0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
if b == 0:
d = c1
elif b < 0:
d = '%s-%s' % (c1, -b)
else:
d = '%s+%s' % (c1, b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = '%s%s' % (c2, c1)
elif b < 0:
d = '%s%s-%s' % (c2, c1, -b)
else:
d = '%s%s+%s' % (c2, c1, b)
return d, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
# XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind(' + string + ')'
def _selected_int_kind_func(r):
# XXX: This should be processor dependent
m = 10 ** r
if m <= 2 ** 8:
return 1
if m <= 2 ** 16:
return 2
if m <= 2 ** 32:
return 4
if m <= 2 ** 63:
return 8
if m <= 2 ** 128:
return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
# XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7:
return 4
if p < 16:
return 8
if platform.machine().lower().startswith('power'):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func), ]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(
r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(
r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
# TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
if isinteger(vars[n]) and not selected_kind_re.match(v):
v = v.split('_')[0]
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
if iscomplex(vars[n]):
if v[0] == '(' and v[-1] == ')':
# FIXME, unused l looks like potential bug
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl != n:
params[nl] = params[n]
else:
print(vars[n])
outmess(
'get_parameters:parameter %s does not have value?!\n' % (repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
except (NameError, SyntaxError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
'(available names: %s)\n'
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
vars = copy.copy(block['vars'])
if block['block'] == 'function' and block['name'] not in vars:
vars[block['name']] = {}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen = block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n] = setattrspec(vars[n], k)
svars = []
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args:
svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n] = setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
continue
if k not in vars[n]:
vars[n][k] = implicitrules[ln0][k]
elif k == 'attrspec':
for l in implicitrules[ln0][k]:
vars[n] = setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr = vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec'] = []
dim, intent, depend, check, note = None, None, None, None, None
for a in attr:
if a[:9] == 'dimension':
dim = (a[9:].strip())[1:-1]
elif a[:6] == 'intent':
intent = (a[6:].strip())[1:-1]
elif a[:6] == 'depend':
depend = (a[6:].strip())[1:-1]
elif a[:5] == 'check':
check = (a[5:].strip())[1:-1]
elif a[:4] == 'note':
note = (a[4:].strip())[1:-1]
else:
vars[n] = setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent'] = []
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent = None
if note:
note = note.replace('\\n\\n', '\n\n')
note = note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note'] = [note]
else:
vars[n]['note'].append(note)
note = None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend = None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check'] = []
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check = None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension'] = []
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d == ':':
star = ':'
if d in params:
d = str(params[d])
for p in list(params.keys()):
re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I)
m = re_1.match(d)
while m:
d = m.group('before') + \
str(params[p]) + m.group('after')
m = re_1.match(d)
if d == star:
dl = [star]
else:
dl = markoutercomma(d, ':').split('@:@')
if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl) == 1 and not dl[0] == star:
dl = ['1', dl[0]]
if len(dl) == 2:
d, v, di = getarrlen(dl, list(block['vars'].keys()))
if d[:4] == '1 * ':
d = d[4:]
if di and di[-4:] == '/(1)':
di = di[:-4]
if v:
savelindims[d] = v, di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape' # 'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'
% (d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension'] + [d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess(
"analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend'] = []
vars[n]['check'] = []
if 'dimension' in vars[n]:
#/----< no check
i = -1
ni = len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps = [] # dependecies of 'd'
ad = ''
pd = ''
if d not in vars:
if d in savelindims:
pd, ad = '(', savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
if r not in vars:
continue
if re.match(r'.*?\b' + r + r'\b', d, re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6] == 'depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps = ddeps + vars[d]['depend']
i = i + 1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend'] = [n]
if ni > 1:
vars[d]['='] = '%s%s(%s,%s)%s' % (
pd, shape_macro, n, i, ad)
else:
vars[d]['='] = '%slen(%s)%s' % (pd, n, ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni > 1:
vars[d]['check'] = ['%s%s(%s,%i)%s==%s'
% (pd, shape_macro, n, i, ad, d)]
else:
vars[d]['check'] = [
'%slen(%s)%s>=%s' % (pd, n, ad, d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec'] = ['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*', ':']:
#/----< no check
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length = '1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*'] = length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*'] = length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec'] = []
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for v, m in list(dep_matches.items()):
if m(vars[n]['=']):
vars[n]['depend'].append(v)
if not vars[n]['depend']:
del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n == block['name']: # n is block name
if 'note' in vars[n]:
block['note'] = vars[n]['note']
if block['block'] == 'function':
if 'result' in block and block['result'] in vars:
vars[n] = appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr = block['prefix']
ispure = 0
isrec = 1
pr1 = pr.replace('pure', '')
ispure = (not pr == pr1)
pr = pr1.replace('recursive', '')
isrec = (not pr == pr1)
m = typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl = cracktypespec0(
m.group('this'), m.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
vars[n]['typespec'] = typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
except:
pass
vars[n]['kindselector'] = kindselect
if charselect:
vars[n]['charselector'] = charselect
if typename:
vars[n]['typename'] = typename
if ispure:
vars[n] = setattrspec(vars[n], 'pure')
if isrec:
vars[n] = setattrspec(vars[n], 'recursive')
else:
outmess(
'analyzevars: prefix (%s) were not used\n' % repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars = copy.copy(block['args'] + block['commonvars'])
else:
neededvars = copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block'] == 'function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules = buildimplicitrules(block)
at = determineexprtype(a, block['vars'], implicitrules)
na = 'e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase + string.digits:
c = '_'
na = na + c
if na[-1] == '_':
na = na + 'e'
else:
na = na + '_e'
a = na
while a in block['vars'] or a in block['args']:
a = a + 'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a] = at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a] = {}
if 'externals' in block and orig_a in block['externals'] + block['interfaced']:
block['vars'][a] = setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
if 'args' not in block:
block['args'] = []
args = []
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args'] = args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a] = {}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals'] = []
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']] = {}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(
r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec': 'integer'}
if isinstance(r, float):
return {'typespec': 'real'}
if isinstance(r, complex):
return {'typespec': 'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr, vars, rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr = expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec': 'complex'}
m = determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t = {}
if determineexprtype_re_4.match(expr): # in parenthesis
t = determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn = m.group('name')
t = determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec': 'character', 'charselector': {'*': '*'}}
if not t:
outmess(
'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr)))
return t
######
def crack2fortrangen(block, tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret = ''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret = ret + crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix = ''
name = ''
args = ''
blocktype = block['block']
if blocktype == 'program':
return ''
argsl = []
if 'name' in block:
name = block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block'] == 'function' or argsl:
args = '(%s)' % ','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s' % (
f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype == 'function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s' %\
(f2pyenhancements, tab + tabchar,
','.join(intent_lst), name)
use = ''
if 'use' in block:
use = use2fortran(block['use'], tab + tabchar)
common = ''
if 'common' in block:
common = common2fortran(block['common'], tab + tabchar)
if name == 'unknown_interface':
name = ''
result = ''
if 'result' in block:
result = ' result (%s)' % block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
body = crack2fortrangen(block['body'], tab + tabchar)
vars = vars2fortran(
block, block['vars'], argsl, tab + tabchar, as_interface=as_interface)
mess = ''
if 'from' in block and not as_interface:
mess = '! in %s' % block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab + tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype == 'block data' and name == '_BLOCK_DATA_':
name = ''
ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % (
tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common, tab=''):
ret = ''
for k in list(common.keys()):
if k == '_BLNK_':
ret = '%s%scommon %s' % (ret, tab, ','.join(common[k]))
else:
ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use, tab=''):
ret = ''
for m in list(use.keys()):
ret = '%s%suse %s,' % (ret, tab, m)
if use[m] == {}:
if ret and ret[-1] == ',':
ret = ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret = '%s only:' % (ret)
if 'map' in use[m] and use[m]['map']:
c = ' '
for k in list(use[m]['map'].keys()):
if k == use[m]['map'][k]:
ret = '%s%s%s' % (ret, c, k)
c = ','
else:
ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k])
c = ','
if ret and ret[-1] == ',':
ret = ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
c = eval('isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
ret.append(intent)
return ret
def vars2fortran(block, vars, args, tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret = ''
nout = []
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess(
'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess(
'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret = '%s%sintent(callback) %s' % (ret, tab, a)
ret = '%s%sexternal %s' % (ret, tab, a)
if isoptional(vars[a]):
ret = '%s%soptional %s' % (ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont = 1
for b in block['body']:
if a == b['name'] and b['block'] == 'function':
cont = 0
break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n' % a)
continue
if a == block['name'] and not block['block'] == 'function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret = '%s%sexternal %s' % (ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n' % a)
continue
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
attr = []
for l in vars[a]['attrspec']:
if l not in ['external']:
attr.append(l)
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
if 'dimension' in vars[a]:
vardef = '%s%sdimension(%s)' % (
vardef, c, ','.join(vars[a]['dimension']))
c = ','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst))
c = ','
if 'check' in vars[a]:
vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check']))
c = ','
if 'depend' in vars[a]:
vardef = '%s%sdepend(%s)' % (
vardef, c, ','.join(vars[a]['depend']))
c = ','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
vardef = '%s :: %s' % (vardef, a)
ret = '%s%s%s' % (ret, tab, vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules = []
postlist = postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n', 0)
postlist = postcrack2(postlist)
return usermodules + postlist
def crack2fortran(block):
global f2py_version
pyf = crack2fortrangen(block) + '\n'
header = """! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer = """
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
""" % (f2py_version)
return header + pyf + footer
if __name__ == "__main__":
files = []
funcs = []
f = 1
f2 = 0
f3 = 0
showblocklist = 0
for l in sys.argv[1:]:
if l == '':
pass
elif l[0] == ':':
f = 0
elif l == '-quiet':
quiet = 1
verbose = 0
elif l == '-verbose':
verbose = 2
quiet = 0
elif l == '-fix':
if strictf77:
outmess(
'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends = 1
sourcecodeform = 'fix'
elif l == '-skipemptyends':
skipemptyends = 1
elif l == '--ignore-contains':
ignorecontains = 1
elif l == '-f77':
strictf77 = 1
sourcecodeform = 'fix'
elif l == '-f90':
strictf77 = 0
sourcecodeform = 'free'
skipemptyends = 1
elif l == '-h':
f2 = 1
elif l == '-show':
showblocklist = 1
elif l == '-m':
f3 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
elif f2:
f2 = 0
pyffilename = l
elif f3:
f3 = 0
f77modulename = l
elif f:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s\n' % str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specifyied module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""", 0)
postlist = crackfortran(files, funcs)
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
f = open(pyffilename, 'w')
f.write(pyf)
f.close()
if showblocklist:
show(postlist)
| bsd-3-clause |
Jelloeater/forgeLandWallGUI | wallGui/requests/packages/urllib3/request.py | 567 | 5808 | # urllib3/request.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are encoded
in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the option
to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the
payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request signing,
such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example: ::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will be
overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {},
boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw)
| mit |
riklaunim/django-custom-multisite | tests/modeltests/tablespaces/models.py | 150 | 1819 | from django.db import models
# Since the test database doesn't have tablespaces, it's impossible for Django
# to create the tables for models where db_tablespace is set. To avoid this
# problem, we mark the models as unmanaged, and temporarily revert them to
# managed during each test. We also set them to use the same tables as the
# "reference" models to avoid errors when other tests run 'syncdb'
# (proxy_models_inheritance does).
class ScientistRef(models.Model):
name = models.CharField(max_length=50)
class ArticleRef(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True)
authors = models.ManyToManyField(ScientistRef, related_name='articles_written_set')
reviewers = models.ManyToManyField(ScientistRef, related_name='articles_reviewed_set')
class Scientist(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = 'tablespaces_scientistref'
db_tablespace = 'tbl_tbsp'
managed = False
class Article(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True, db_tablespace='idx_tbsp')
authors = models.ManyToManyField(Scientist, related_name='articles_written_set')
reviewers = models.ManyToManyField(Scientist, related_name='articles_reviewed_set', db_tablespace='idx_tbsp')
class Meta:
db_table = 'tablespaces_articleref'
db_tablespace = 'tbl_tbsp'
managed = False
# Also set the tables for automatically created models
Authors = Article._meta.get_field('authors').rel.through
Authors._meta.db_table = 'tablespaces_articleref_authors'
Reviewers = Article._meta.get_field('reviewers').rel.through
Reviewers._meta.db_table = 'tablespaces_articleref_reviewers'
| bsd-3-clause |
akpotter/OWASP-ZSC | lib/encoder/linux_x86/xor_random.py | 2 | 16830 | #!/usr/bin/env python
'''
ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
'''
import random,binascii,string
chars = string.digits + string.ascii_letters
def start(shellcode,job):
if 'chmod(' in job:
t = True
eax = str('0x0f')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if '00' not in eax_1 and '00' not in eax_2:
t = False
eax = 'push $%s'%(str(eax))
eax_xor = 'push $0x%s\npop %%eax\npush $0x%s\npop %%ebx\nxor %%eax,%%ebx\npush %%ebx\n'%(eax_1,eax_2)
shellcode = shellcode.replace(eax,eax_xor)
ecx = str(shellcode.rsplit('\n')[8])
ecx_value = str(shellcode.rsplit('\n')[8].rsplit()[1][1:])
t = True
while t:
ecx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ecx_2 = "%x" % (int(ecx_value, 16) ^ int(ecx_1, 16))
if '00' not in ecx_1 and '00' not in ecx_2:
t = False
ecx_xor = 'push $0x%s\npop %%ebx\npush $0x%s\npop %%ecx\nxor %%ecx,%%ebx\npush %%ebx\n_z3r0d4y_\n'%(str(ecx_1),str(ecx_2))
shellcode = shellcode.replace(ecx,ecx_xor)
n = 0
start = ''
middle = ''
end = ''
add = 0
for l in shellcode.rsplit('\n'):
n += 1
if add is 0:
if '_z3r0d4y_' not in l:
start += l + '\n'
else:
add = 1
if add is 1:
if '_z3r0d4y_' not in l:
if '%esp,%ebx' not in l:
middle += l + '\n'
else:
add = 2
if add is 2:
end += l + '\n'
for l in middle.rsplit('\n'):
if 'push $0x' in l:
ebx = l.rsplit()[1][1:]
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(ebx, 16) ^ int(ebx_1, 16))
command = 'push $0x%s\npop %%ebx\npush $0x%s\npop %%edx\nxor %%ebx,%%edx\npush %%edx'%(str(ebx_1),str(ebx_2))
middle = middle.replace(l,command)
shellcode = start + middle + end
if 'dir_create(' in job:
shellcode = 'xor %edx,%edx\n' + shellcode.replace('push $0xb\npop %eax\ncltd','').replace('push %ebx\nmov %esp,%ecx','push %ebx\nmov %esp,%ecx'+'\n'+'push $0xb\npop %eax\ncltd')
t = True
eax = str('0xb')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0xb\npop %eax\ncltd',eax_xor+'\ncltd\n')
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n'%(str(ebx_1),str(ebx_2))
shellcode = shellcode.replace(line,command)
t = False
if 'download_execute(' in job:
shellcode = 'xor %edx,%edx\n' + shellcode.replace('push $0xb\npop %eax\ncltd','').replace('push %ebx\nmov %esp,%ecx','push %ebx\nmov %esp,%ecx'+'\n'+'push $0xb\npop %eax\ncltd')
t = True
eax = str('0xb')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0xb\npop %eax\ncltd',eax_xor+'\ncltd\n')
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n'%(str(ebx_1),str(ebx_2))
shellcode = shellcode.replace(line,command)
t = False
if 'download(' in job:
shellcode = 'xor %edx,%edx\n' + shellcode.replace('push $0xb\npop %eax\ncltd','').replace('push %ebx\nmov %esp,%ecx','push %ebx\nmov %esp,%ecx'+'\n'+'push $0xb\npop %eax\ncltd')
t = True
eax = str('0xb')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0xb\npop %eax\ncltd',eax_xor+'\ncltd\n')
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n'%(str(ebx_1),str(ebx_2))
shellcode = shellcode.replace(line,command)
t = False
if 'exec(' in job:
t = True
eax = str('0x46')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('mov $0x46,%al',eax_xor)
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n'%(str(ebx_1),str(ebx_2))
shellcode = shellcode.replace(line,command)
t = False
if 'file_create(' in job:
shellcode = 'xor %edx,%edx\n' + shellcode.replace('push $0xb\npop %eax\ncltd','').replace('push %ebx\nmov %esp,%ecx','push %ebx\nmov %esp,%ecx'+'\n'+'push $0xb\npop %eax\ncltd')
t = True
eax = str('0xb')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0xb\npop %eax\ncltd',eax_xor+'\ncltd\n')
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n'%(str(ebx_1),str(ebx_2))
shellcode = shellcode.replace(line,command)
t = False
if 'script_executor(' in job:
shellcode = 'xor %edx,%edx\n' + shellcode.replace('push $0xb\npop %eax\ncltd','').replace('push %ebx\nmov %esp,%ecx','push %ebx\nmov %esp,%ecx'+'\n'+'push $0xb\npop %eax\ncltd')
t = True
eax = str('0xb')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0xb\npop %eax\ncltd',eax_xor+'\ncltd\n')
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n'%(str(ebx_1),str(ebx_2))
shellcode = shellcode.replace(line,command)
t = False
if 'system(' in job:
shellcode = 'xor %edx,%edx\n' + shellcode.replace('push $0xb\npop %eax\ncltd','').replace('push %ebx\nmov %esp,%ecx','push %ebx\nmov %esp,%ecx'+'\n'+'push $0xb\npop %eax\ncltd')
t = True
eax = str('0xb')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0xb\npop %eax\ncltd',eax_xor+'\ncltd\n')
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ebx\npush $0x%s\npop %%eax\nxor %%ebx,%%eax\npush %%eax\n'%(str(ebx_1),str(ebx_2))
shellcode = shellcode.replace(line,command)
t = False
if 'write(' in job:
t = True
eax = str('0x5')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0x5\npop %eax',eax_xor)
t = True
eax = str('0x4')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(1)))
eax_1 = str('0') + str(eax_1[1])
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%eax\nneg %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%eax\nxor $0x%s,%%eax\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0x4\npop %eax',eax_xor)
A = 0
for line in shellcode.rsplit('\n'):
if 'mov %esp,%ebx' in line:
A = 1
shellcode = shellcode.replace(line,'\nmov %esp,%ebx\n_z3r0d4y_\n')
if A is 0:
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ebx\nxor $0x%s,%%ebx\npush %%ebx\n'%(str(ebx_2),str(ebx_1))
shellcode = shellcode.replace(line,command)
t = False
shellcode = shellcode.replace('_z3r0d4y_','')
t = True
eax = str('4014141')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if eax>eax_1:
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%ecx\nneg %%ecx\nxor $0x%s,%%ecx\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%ecx\nxor $0x%s,%%ecx\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0x4014141\npop %ecx',eax_xor+'\n_z3r0d4y_\n').replace('mov %esp,%ecx\npush $0x0b909090','\n_z3r0|d4y_\nmov %esp,%ecx\npush $0x0b909090\n')
A = 1
for line in shellcode.rsplit('\n'):
if '_z3r0d4y_' in line:
A = 0
if '_z3r0|d4y_' in line:
A = 1
if A is 0:
if 'push' in line and '$0x' in line and ',' not in line and len(line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
ebx_2 = "%x" % (int(data, 16) ^ int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(ebx_2) and len(ebx_2) >=7 and len(ebx_1) >= 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-','')
command = '\npush $0x%s\npop %%ecx\nxor $0x%s,%%ecx\npush %%ecx\n'%(str(ebx_2),str(ebx_1))
shellcode = shellcode.replace(line,command)
t = False
shellcode = shellcode.replace('_z3r0d4y_','').replace('_z3r0|d4y_','')
t = True
eax = str('0b909090')
while t:
eax_1 = binascii.b2a_hex(''.join(random.choice(chars) for i in range(4)))
eax_2 = "%x" % (int(eax, 16) ^ int(eax_1, 16))
if '00' not in str(eax_1) and '00' not in str(eax_2):
t = False
A = 0
eax = 'push $%s'%(str(eax))
if '-' in eax_2:
A = 1
eax_2 = eax_2.replace('-','')
eax_xor = 'push $0x%s\npop %%edx\nneg %%edx\nxor $0x%s,%%edx\n'%(eax_2,eax_1)
if A is 0:
eax_xor = 'push $0x%s\npop %%edx\nxor $0x%s,%%edx\n'%(eax_2,eax_1)
shellcode = shellcode.replace('push $0x0b909090\n\npop %edx\n',eax_xor)
return shellcode
| gpl-3.0 |
mpare002/HackTech_2017 | env/Lib/site-packages/pip/_vendor/distlib/database.py | 203 | 49199 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
metadata_path = posixpath.join(entry, METADATA_FILENAME)
pydist = finder.find(metadata_path)
if not pydist:
continue
metadata = Metadata(fileobj=pydist.as_stream(),
scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
reqts = getattr(self.metadata, req_attr)
return set(self.metadata.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Plaeholder for summary'
return Distribution(md)
| mit |
catapult-project/catapult-csm | third_party/google-endpoints/rsa/varblock.py | 82 | 5406 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VARBLOCK file support
.. deprecated:: 3.4
The VARBLOCK format is NOT recommended for general use, has been deprecated since
Python-RSA 3.4, and will be removed in a future release. It's vulnerable to a
number of attacks:
1. decrypt/encrypt_bigfile() does not implement `Authenticated encryption`_ nor
uses MACs to verify messages before decrypting public key encrypted messages.
2. decrypt/encrypt_bigfile() does not use hybrid encryption (it uses plain RSA)
and has no method for chaining, so block reordering is possible.
See `issue #19 on Github`_ for more information.
.. _Authenticated encryption: https://en.wikipedia.org/wiki/Authenticated_encryption
.. _issue #19 on Github: https://github.com/sybrenstuvel/python-rsa/issues/13
The VARBLOCK file format is as follows, where || denotes byte concatenation:
FILE := VERSION || BLOCK || BLOCK ...
BLOCK := LENGTH || DATA
LENGTH := varint-encoded length of the subsequent data. Varint comes from
Google Protobuf, and encodes an integer into a variable number of bytes.
Each byte uses the 7 lowest bits to encode the value. The highest bit set
to 1 indicates the next byte is also part of the varint. The last byte will
have this bit set to 0.
This file format is called the VARBLOCK format, in line with the varint format
used to denote the block sizes.
"""
import warnings
from rsa._compat import byte, b
ZERO_BYTE = b('\x00')
VARBLOCK_VERSION = 1
warnings.warn("The 'rsa.varblock' module was deprecated in Python-RSA version "
"3.4 due to security issues in the VARBLOCK format. See "
"https://github.com/sybrenstuvel/python-rsa/issues/13 for more information.",
DeprecationWarning)
def read_varint(infile):
"""Reads a varint from the file.
When the first byte to be read indicates EOF, (0, 0) is returned. When an
EOF occurs when at least one byte has been read, an EOFError exception is
raised.
:param infile: the file-like object to read from. It should have a read()
method.
:returns: (varint, length), the read varint and the number of read bytes.
"""
varint = 0
read_bytes = 0
while True:
char = infile.read(1)
if len(char) == 0:
if read_bytes == 0:
return 0, 0
raise EOFError('EOF while reading varint, value is %i so far' %
varint)
byte = ord(char)
varint += (byte & 0x7F) << (7 * read_bytes)
read_bytes += 1
if not byte & 0x80:
return varint, read_bytes
def write_varint(outfile, value):
"""Writes a varint to a file.
:param outfile: the file-like object to write to. It should have a write()
method.
:returns: the number of written bytes.
"""
# there is a big difference between 'write the value 0' (this case) and
# 'there is nothing left to write' (the false-case of the while loop)
if value == 0:
outfile.write(ZERO_BYTE)
return 1
written_bytes = 0
while value > 0:
to_write = value & 0x7f
value >>= 7
if value > 0:
to_write |= 0x80
outfile.write(byte(to_write))
written_bytes += 1
return written_bytes
def yield_varblocks(infile):
"""Generator, yields each block in the input file.
:param infile: file to read, is expected to have the VARBLOCK format as
described in the module's docstring.
@yields the contents of each block.
"""
# Check the version number
first_char = infile.read(1)
if len(first_char) == 0:
raise EOFError('Unable to read VARBLOCK version number')
version = ord(first_char)
if version != VARBLOCK_VERSION:
raise ValueError('VARBLOCK version %i not supported' % version)
while True:
(block_size, read_bytes) = read_varint(infile)
# EOF at block boundary, that's fine.
if read_bytes == 0 and block_size == 0:
break
block = infile.read(block_size)
read_size = len(block)
if read_size != block_size:
raise EOFError('Block size is %i, but could read only %i bytes' %
(block_size, read_size))
yield block
def yield_fixedblocks(infile, blocksize):
"""Generator, yields each block of ``blocksize`` bytes in the input file.
:param infile: file to read and separate in blocks.
:returns: a generator that yields the contents of each block
"""
while True:
block = infile.read(blocksize)
read_bytes = len(block)
if read_bytes == 0:
break
yield block
if read_bytes < blocksize:
break
| bsd-3-clause |
kantlove/flask-simple-page | Lib/site-packages/pip/_vendor/requests/cookies.py | 413 | 17191 | # -*- coding: utf-8 -*-
"""
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""Produce an appropriate Cookie header string to be sent with `request`, or None."""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name == name:
if domain is None or domain == cookie.domain:
if path is None or path == cookie.path:
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific."""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains."""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar. See itervalues() and iteritems()."""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar. See values() and items()."""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar. See iterkeys() and iteritems()."""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar. See keys() and items()."""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar. See iterkeys() and itervalues()."""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. See keys() and values(). Allows client-code to call
``dict(RequestsCookieJar)`` and get a vanilla python dict of key value
pairs."""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise."""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``."""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values. Takes as
args name and optional domain and path. Returns a cookie.value. If
there are conflicting cookies, _find arbitrarily chooses one. See
_find_no_duplicates if you want an exception thrown if there are
conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests. Takes as args name and optional domain and
path. Returns a cookie.value. Throws KeyError if cookie is not found
and CookieConflictError if there are multiple cookies that match name
and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instane of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
expires = time.time() + morsel['max-age']
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = time.mktime(
time.strptime(morsel['expires'], time_template)) - time.timezone
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| mit |
jonashaag/ansible | lib/ansible/plugins/lookup/indexed_items.py | 103 | 1323 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, variables, **kwargs):
### FIXME: Is this needed now that listify is run on all lookup plugin terms?
if not isinstance(terms, list):
raise AnsibleError("with_indexed_items expects a list")
items = self._flatten(terms)
return zip(range(len(items)), items)
| gpl-3.0 |
ojengwa/odoo | addons/email_template/__init__.py | 381 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import email_template
import wizard
import res_partner
import ir_actions
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
joergkappes/opengm | src/interfaces/python/examples/ad3_posteriors.py | 13 | 2384 | import opengm
import numpy
# do not get used to this example, api might change
length = 6 # slow if large and model == '3OrderRandomChain'
numLabels = 2 # slow if more than 2 or 3 for large length
ilp = False # slow if true '3OrderRandomChain' if large
model = '2OrderSubmodublarGrid'
model = '3OrderRandomChain'
# beta of 0.005 will lead to almost no fractional labels
# beta of 0.5 will lead to fractional solutions for
# '3OrderRandomChain' model, but potts model
# is still integral
beta = 0.005
if opengm.configuration.withAd3:
rnd = numpy.random.rand
# second order example
if model == '2OrderSubmodublarGrid':
unaries = rnd(length , length, numLabels)
potts = opengm.PottsFunction([numLabels,numLabels],0.0, beta)
gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)
# third order example
elif model == '3OrderRandomChain':
numberOfStates = numpy.ones(length, dtype=opengm.label_type)*numLabels
gm = opengm.gm(numberOfStates, operator='adder')
#add some random unaries
for vi in range(gm.numberOfVariables):
unaryFuction = rnd(numLabels)
gm.addFactor(gm.addFunction(unaryFuction), vi)
#add one 3.order function
for vi0 in range(length):
for vi1 in range(vi0+1, length):
for vi2 in range(vi1+1, length):
highOrderFunction = rnd(numLabels, numLabels,
numLabels)*beta
gm.addFactor(gm.addFunction(highOrderFunction),[vi0,vi1,vi2])
else :
raise RuntimeError("wrong model type")
# inference parameter
if ilp:
ad3Solver = 'ad3_ilp'
else:
ad3Solver = 'ad3_lp'
param = opengm.InfParam(solverType=ad3Solver, adaptEta=True,
steps=1000, residualThreshold=1e-6,
verbose=1)
inf = opengm.inference.Ad3(gm, parameter=param)
# do inference
inf.infer()
# get results
arg = inf.arg()
posteriors = inf.posteriors()
# grid or chain ?
if model == '2OrderSubmodublarGrid':
#print as grind
print posteriors
print arg.reshape([length, length])
else:
# print as chain
print posteriors
print arg
else:
raise RuntimeError("this example needs WITH_AD3 enabled") | mit |
rcharp/toyota-flask | venv/lib/python2.7/site-packages/numpy/core/tests/test_shape_base.py | 47 | 8240 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (TestCase, assert_, assert_raises, assert_array_equal,
assert_equal, run_module_suite)
from numpy.core import (array, arange, atleast_1d, atleast_2d, atleast_3d,
vstack, hstack, newaxis, concatenate)
from numpy.compat import long
class TestAtleast1d(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(long(3)).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res=hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res=hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res=hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
class TestVstack(TestCase):
def test_0D_array(self):
a = array(1)
b = array(2)
res=vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res=vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res=vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res=vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
def test_concatenate_axis_None():
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None)
d = array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
def test_concatenate():
# Test concatenate function
# No arrays raise ValueError
assert_raises(ValueError, concatenate, ())
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (array(0),))
# One sequence returns unmodified (but as array)
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
# Any sequence
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
# 1D default concatenation
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
# Mixed sequence types
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
# Explicit axis specification
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
# Including negative
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
# 2D
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
# Arrays much match shape
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
# 3D
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
def test_concatenate_sloppy0():
# Versions of numpy < 1.7.0 ignored axis argument value for 1D arrays. We
# allow this for now, but in due course we will raise an error
r4 = list(range(4))
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
assert_array_equal(concatenate((r4, r3), -10), r4 + r3)
assert_array_equal(concatenate((r4, r3), 10), r4 + r3)
# Confirm DeprecationWarning raised
warnings.simplefilter('error', DeprecationWarning)
assert_raises(DeprecationWarning, concatenate, (r4, r3), 10)
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
jesramirez/odoo | addons/website_membership/models/product.py | 338 | 1264 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = 'product.template'
_columns = {
'website_published': fields.boolean('Available in the website', copy=False),
}
_defaults = {
'website_published': False,
}
| agpl-3.0 |
dendisuhubdy/tensorflow | tensorflow/python/ops/linalg_ops.py | 10 | 23635 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_linalg_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Names below are lower_case.
# pylint: disable=invalid-name
def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind):
r"""Computes Cholesky factorization of regularized gramian matrix.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`output`=\\(C \in \Re^{\min(m, n) \times \min(m,n)}\\),
`l2_regularizer`=\\(\lambda\\).
If `first_kind` is True, returns the Cholesky factorization \\(L\\) such that
\\(L L^H = A^H A + \lambda I\\).
If `first_kind` is False, returns the Cholesky factorization \\(L\\) such that
\\(L L^H = A A^H + \lambda I\\).
Args:
matrix: `Tensor` of shape `[..., M, N]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
first_kind: bool. Controls what gramian matrix to factor.
Returns:
output: `Tensor` of shape `[..., min(M,N), min(M,N)]` whose inner-most 2
dimensions contain the Cholesky factors \\(L\\) described above.
"""
gramian = math_ops.matmul(
matrix, matrix, adjoint_a=first_kind, adjoint_b=not first_kind)
if isinstance(l2_regularizer, ops.Tensor) or l2_regularizer != 0:
matrix_shape = array_ops.shape(matrix)
batch_shape = matrix_shape[:-2]
if first_kind:
small_dim = matrix_shape[-1]
else:
small_dim = matrix_shape[-2]
identity = eye(small_dim, batch_shape=batch_shape, dtype=matrix.dtype)
small_dim_static = matrix.shape[-1 if first_kind else -2]
identity.set_shape(
matrix.shape[:-2].concatenate([small_dim_static, small_dim_static]))
gramian += l2_regularizer * identity
return gen_linalg_ops.cholesky(gramian)
@tf_export('cholesky_solve', 'linalg.cholesky_solve')
def cholesky_solve(chol, rhs, name=None):
"""Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations.
```python
# Solve 10 separate 2x2 linear systems:
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 1
chol = tf.cholesky(A) # shape 10 x 2 x 2
X = tf.cholesky_solve(chol, RHS) # shape 10 x 2 x 1
# tf.matmul(A, X) ~ RHS
X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0]
# Solve five linear systems (K = 5) for every member of the length 10 batch.
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 5
...
X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2]
```
Args:
chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`.
Cholesky factorization of `A`, e.g. `chol = tf.cholesky(A)`.
For that reason, only the lower triangular parts (including the diagonal)
of the last two dimensions of `chol` are used. The strictly upper part is
assumed to be zero and not accessed.
rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`.
name: A name to give this `Op`. Defaults to `cholesky_solve`.
Returns:
Solution to `A x = rhs`, shape `[..., M, K]`.
"""
# To solve C C^* x = rhs, we
# 1. Solve C y = rhs for y, thus y = C^* x
# 2. Solve C^* x = y for x
with ops.name_scope(name, 'cholesky_solve', [chol, rhs]):
y = gen_linalg_ops.matrix_triangular_solve(
chol, rhs, adjoint=False, lower=True)
x = gen_linalg_ops.matrix_triangular_solve(
chol, y, adjoint=True, lower=True)
return x
@tf_export('eye', 'linalg.eye')
def eye(num_rows,
num_columns=None,
batch_shape=None,
dtype=dtypes.float32,
name=None):
"""Construct an identity matrix, or a batch of matrices.
```python
# Construct one identity matrix.
tf.eye(2)
==> [[1., 0.],
[0., 1.]]
# Construct a batch of 3 identity matricies, each 2 x 2.
# batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
batch_identity = tf.eye(2, batch_shape=[3])
# Construct one 2 x 3 "identity" matrix
tf.eye(2, num_columns=3)
==> [[ 1., 0., 0.],
[ 0., 1., 0.]]
```
Args:
num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows
in each batch matrix.
num_columns: Optional non-negative `int32` scalar `Tensor` giving the number
of columns in each batch matrix. Defaults to `num_rows`.
batch_shape: A list or tuple of Python integers or a 1-D `int32` `Tensor`.
If provided, the returned `Tensor` will have leading batch dimensions of
this shape.
dtype: The type of an element in the resulting `Tensor`
name: A name for this `Op`. Defaults to "eye".
Returns:
A `Tensor` of shape `batch_shape + [num_rows, num_columns]`
"""
return linalg_ops_impl.eye(num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype,
name=name)
@tf_export('matrix_solve_ls', 'linalg.lstsq')
def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
r"""Solves one or more linear least-squares problems.
`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose
inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a
`Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares
sense.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`rhs`=\\(B \in \Re^{m \times k}\\),
`output`=\\(X \in \Re^{n \times k}\\),
`l2_regularizer`=\\(\lambda\\).
If `fast` is `True`, then the solution is computed by solving the normal
equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
\\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
\\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is
the minimum-norm solution to the under-determined linear system, i.e.
\\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
\\(A Z = B\\). Notice that the fast path is only numerically stable when
\\(A\\) is numerically full rank and has a condition number
\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\)
is sufficiently large.
If `fast` is `False` an algorithm based on the numerically robust complete
orthogonal decomposition is used. This computes the minimum-norm
least-squares solution, even when \\(A\\) is rank deficient. This path is
typically 6-7 times slower than the fast path. If `fast` is `False` then
`l2_regularizer` is ignored.
Args:
matrix: `Tensor` of shape `[..., M, N]`.
rhs: `Tensor` of shape `[..., M, K]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
fast: bool. Defaults to `True`.
name: string, optional name of the operation.
Returns:
output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form
`M`-by-`K` matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least
squares sense.
Raises:
NotImplementedError: matrix_solve_ls is currently disabled for complex128
and l2_regularizer != 0 due to poor accuracy.
"""
# pylint: disable=long-lambda
def _use_composite_impl(fast, tensor_shape):
"""Determines whether to use the composite or specialized CPU kernel.
When the total size of the tensor is larger than the cache size and the
batch size is large compared to the smallest matrix dimension, then the
composite implementation is inefficient since it has to read the entire
tensor from memory multiple times. In this case we fall back to the
original CPU kernel, which does all the computational steps on each
matrix separately.
Only fast mode is supported by the composite impl, so `False` is returned
if `fast` is `False`.
Args:
fast: bool indicating if fast mode in the solver was requested.
tensor_shape: The shape of the tensor.
Returns:
True if the composite impl should be used. False otherwise.
"""
if fast is False:
return False
batch_shape = tensor_shape[:-2]
matrix_shape = tensor_shape[-2:]
if not tensor_shape.is_fully_defined():
return True
tensor_size = tensor_shape.num_elements() * matrix.dtype.size
is_io_bound = batch_shape.num_elements() > np.min(matrix_shape)
L2_CACHE_SIZE_GUESSTIMATE = 256000
if tensor_size > L2_CACHE_SIZE_GUESSTIMATE and is_io_bound:
return False
else:
return True
def _overdetermined(matrix, rhs, l2_regularizer):
"""Computes (A^H*A + l2_regularizer)^{-1} * A^H * rhs."""
chol = _RegularizedGramianCholesky(
matrix, l2_regularizer=l2_regularizer, first_kind=True)
return cholesky_solve(chol, math_ops.matmul(matrix, rhs, adjoint_a=True))
def _underdetermined(matrix, rhs, l2_regularizer):
"""Computes A^H * (A*A^H + l2_regularizer)^{-1} * rhs."""
chol = _RegularizedGramianCholesky(
matrix, l2_regularizer=l2_regularizer, first_kind=False)
return math_ops.matmul(matrix, cholesky_solve(chol, rhs), adjoint_a=True)
def _composite_impl(matrix, rhs, l2_regularizer):
"""Composite implementation of matrix_solve_ls that supports GPU."""
with ops.name_scope(name, 'matrix_solve_ls', [matrix, rhs, l2_regularizer]):
matrix_shape = matrix.get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _overdetermined(matrix, rhs, l2_regularizer)
else:
return _underdetermined(matrix, rhs, l2_regularizer)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(matrix)[-2:]
return control_flow_ops.cond(
matrix_shape[-2] >= matrix_shape[-1],
lambda: _overdetermined(matrix, rhs, l2_regularizer),
lambda: _underdetermined(matrix, rhs, l2_regularizer))
matrix = ops.convert_to_tensor(matrix, name='matrix')
if matrix.dtype == dtypes.complex128 and l2_regularizer != 0:
# TODO(rmlarsen): Investigate and fix accuracy bug.
raise NotImplementedError('matrix_solve_ls is currently disabled for '
'complex128 and l2_regularizer != 0 due to '
'poor accuracy.')
tensor_shape = matrix.get_shape()
if _use_composite_impl(fast, tensor_shape):
return _composite_impl(matrix, rhs, l2_regularizer)
else:
return gen_linalg_ops.matrix_solve_ls(
matrix, rhs, l2_regularizer, fast=fast, name=name)
@tf_export('self_adjoint_eig', 'linalg.eigh')
def self_adjoint_eig(tensor, name=None):
"""Computes the eigen decomposition of a batch of self-adjoint matrices.
Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices
in `tensor` such that
`tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.
Args:
tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of
each inner inner matrix is referenced.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. Sorted in non-decreasing order.
v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most
matrices contain eigenvectors of the corresponding matrices in `tensor`
"""
e, v = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=True, name=name)
return e, v
@tf_export('self_adjoint_eigvals', 'linalg.eigvalsh')
def self_adjoint_eigvals(tensor, name=None):
"""Computes the eigenvalues of one or more self-adjoint matrices.
Note: If your program backpropagates through this function, you should replace
it with a call to tf.self_adjoint_eig (possibly ignoring the second output) to
avoid computing the eigen decomposition twice. This is because the
eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See
_SelfAdjointEigV2Grad in linalg_grad.py.
Args:
tensor: `Tensor` of shape `[..., N, N]`.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`
eigenvalues of `tensor[..., :, :]`.
"""
e, _ = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=False, name=name)
return e
@tf_export('svd', 'linalg.svd')
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
r"""Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `tensor` such that
`tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) *
transpose(conj(v[..., :, :]))`
```python
# a is a tensor.
# s is a tensor of singular values.
# u is a tensor of left singular vectors.
# v is a tensor of right singular vectors.
s, u, v = svd(a)
s = svd(a, compute_uv=False)
```
Args:
tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
Ignored if `compute_uv` is `False`.
compute_uv: If `True` then left and right singular vectors will be
computed and returned in `u` and `v`, respectively. Otherwise, only the
singular values will be computed, which can be significantly faster.
name: string, optional name of the operation.
Returns:
s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
second largest, etc.
u: Left singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
`[..., M, M]`. Not returned if `compute_uv` is `False`.
v: Right singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
`[..., N, N]`. Not returned if `compute_uv` is `False`.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.svd, except that
* The order of output arguments here is `s`, `u`, `v` when `compute_uv` is
`True`, as opposed to `u`, `s`, `v` for numpy.linalg.svd.
* full_matrices is `False` by default as opposed to `True` for
numpy.linalg.svd.
* tf.linalg.svd uses the standard definition of the SVD
\\(A = U \Sigma V^H\\), such that the left singular vectors of `a` are
the columns of `u`, while the right singular vectors of `a` are the
columns of `v`. On the other hand, numpy.linalg.svd returns the adjoint
\\(V^H\\) as the third output argument.
```python
import tensorflow as tf
import numpy as np
s, u, v = tf.linalg.svd(a)
tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_v=True))
u, s, v_adj = np.linalg.svd(a, full_matrices=False)
np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
# tf_a_approx and np_a_approx should be numerically close.
```
@end_compatibility
"""
s, u, v = gen_linalg_ops.svd(
tensor, compute_uv=compute_uv, full_matrices=full_matrices, name=name)
if compute_uv:
return math_ops.real(s), u, v
else:
return math_ops.real(s)
# pylint: disable=redefined-builtin
@tf_export('norm', 'linalg.norm')
@deprecation.deprecated_args(
None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')
def norm(tensor,
ord='euclidean',
axis=None,
keepdims=None,
name=None,
keep_dims=None):
r"""Computes the norm of vectors, matrices, and tensors.
This function can compute several different vector norms (the 1-norm, the
Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).
Args:
tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
ord: Order of the norm. Supported values are 'fro', 'euclidean',
`1`, `2`, `np.inf` and any positive real number yielding the corresponding
p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if
`tensor` is a matrix and equivalent to 2-norm for vectors.
Some restrictions apply:
a) The Frobenius norm `fro` is not defined for vectors,
b) If axis is a 2-tuple (matrix norm), only 'euclidean', 'fro', `1`,
`2`, `np.inf` are supported.
See the description of `axis` on how to compute norms for a batch of
vectors or matrices stored in a tensor.
axis: If `axis` is `None` (the default), the input is considered a vector
and a single vector norm is computed over the entire set of values in the
tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
`norm(reshape(tensor, [-1]), ord=ord)`.
If `axis` is a Python integer, the input is considered a batch of vectors,
and `axis` determines the axis in `tensor` over which to compute vector
norms.
If `axis` is a 2-tuple of Python integers it is considered a batch of
matrices and `axis` determines the axes in `tensor` over which to compute
a matrix norm.
Negative indices are supported. Example: If you are passing a tensor that
can be either a matrix or a batch of matrices at runtime, pass
`axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
computed.
keepdims: If True, the axis indicated in `axis` are kept with size 1.
Otherwise, the dimensions in `axis` are removed from the output shape.
name: The name of the op.
keep_dims: Deprecated alias for `keepdims`.
Returns:
output: A `Tensor` of the same type as tensor, containing the vector or
matrix norms. If `keepdims` is True then the rank of output is equal to
the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,
if `axis` is an integer, the rank of `output` is one less than the rank
of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less
than the rank of `tensor`.
Raises:
ValueError: If `ord` or `axis` is invalid.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.norm.
Not supported: ord <= 0, 2-norm for matrices, nuclear norm.
Other differences:
a) If axis is `None`, treats the flattened `tensor` as a vector
regardless of rank.
b) Explicitly supports 'euclidean' norm as the default, including for
higher order tensors.
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims,
'keep_dims', keep_dims)
if keepdims is None:
keepdims = False
is_matrix_norm = ((isinstance(axis, tuple) or isinstance(axis, list)) and
len(axis) == 2)
if is_matrix_norm:
axis = tuple(axis)
if (not isinstance(axis[0], int) or not isinstance(axis[1], int) or
axis[0] == axis[1]):
raise ValueError(
"'axis' must be None, an integer, or a tuple of 2 unique integers")
supported_matrix_norms = ['euclidean', 'fro', 1, 2, np.inf]
if ord not in supported_matrix_norms:
raise ValueError("'ord' must be a supported matrix norm in %s, got %s" %
(supported_matrix_norms, ord))
else:
if not (isinstance(axis, int) or axis is None):
raise ValueError(
"'axis' must be None, an integer, or a tuple of 2 unique integers")
supported_vector_norms = ['euclidean', 1, 2, np.inf]
if (not np.isreal(ord) or ord <= 0) and ord not in supported_vector_norms:
raise ValueError("'ord' must be a supported vector norm, got %s" % ord)
if axis is not None:
axis = (axis,)
with ops.name_scope(name, 'norm', [tensor]):
tensor = ops.convert_to_tensor(tensor)
if ord in ['fro', 'euclidean', 2, 2.0]:
if is_matrix_norm and ord in [2, 2.0]:
rank = array_ops.rank(tensor)
positive_axis = functional_ops.map_fn(
lambda i: control_flow_ops.cond(i >= 0, lambda: i, lambda: i + rank),
ops.convert_to_tensor(axis))
axes = math_ops.range(rank)
perm_before = array_ops.concat(
[array_ops.setdiff1d(axes, positive_axis)[0], positive_axis],
axis=0)
perm_after = functional_ops.map_fn(
lambda i: math_ops.cast(
array_ops.squeeze(
array_ops.where(math_ops.equal(perm_before, i))),
dtype=dtypes.int32), axes)
permed = array_ops.transpose(tensor, perm=perm_before)
matrix_2_norm = array_ops.expand_dims(
math_ops.reduce_max(
math_ops.abs(gen_linalg_ops.svd(permed, compute_uv=False)[0]),
axis=-1,
keepdims=True),
axis=-1)
result = array_ops.transpose(matrix_2_norm, perm=perm_after)
else:
result = math_ops.sqrt(
math_ops.reduce_sum(
tensor * math_ops.conj(tensor), axis, keepdims=True))
else:
result = math_ops.abs(tensor)
if ord == 1:
sum_axis = None if axis is None else axis[0]
result = math_ops.reduce_sum(result, sum_axis, keepdims=True)
if is_matrix_norm:
result = math_ops.reduce_max(result, axis[-1], keepdims=True)
elif ord == np.inf:
if is_matrix_norm:
result = math_ops.reduce_sum(result, axis[1], keepdims=True)
max_axis = None if axis is None else axis[0]
result = math_ops.reduce_max(result, max_axis, keepdims=True)
else:
# General p-norms (positive p only)
result = math_ops.pow(
math_ops.reduce_sum(math_ops.pow(result, ord), axis, keepdims=True),
1.0 / ord)
if not keepdims:
result = array_ops.squeeze(result, axis)
return result
# pylint: enable=invalid-name,redefined-builtin
| apache-2.0 |
fxfitz/ansible | test/units/modules/network/cnos/test_cnos_config.py | 9 | 5237 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.cnos import cnos_config
from .cnos_module import TestCnosModule, load_fixture, set_module_args
class TestCnosConfigModule(TestCnosModule):
module = cnos_config
def setUp(self):
self.patcher_get_config = patch('ansible.modules.network.cnos.cnos_config.get_config')
self.mock_get_config = self.patcher_get_config.start()
self.patcher_exec_command = patch('ansible.modules.network.cnos.cnos_config.load_config')
self.mock_exec_command = self.patcher_exec_command.start()
def tearDown(self):
self.patcher_get_config.stop()
self.patcher_exec_command.stop()
def load_fixtures(self, commands=None):
config_file = 'cnos_config_config.cfg'
self.mock_get_config.return_value = load_fixture(config_file)
self.mock_exec_command.return_value = 'dummy diff'
def test_cnos_config_unchanged(self):
src = load_fixture('cnos_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_cnos_config_src(self):
src = load_fixture('cnos_config_src.cfg')
set_module_args(dict(src=src))
commands = ['hostname foo', 'interface ethernet 1/13',
'speed 10000']
self.execute_module(changed=True, commands=commands)
def test_cnos_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_cnos_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_cnos_config_lines_w_parents(self):
set_module_args(dict(lines=['shutdown'], parents=['interface ethernet 1/13']))
commands = ['interface ethernet 1/13', 'shutdown']
self.execute_module(changed=True, commands=commands)
def test_cnos_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False)
def test_cnos_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False)
def test_cnos_config_before_after_no_change(self):
set_module_args(dict(lines=['hostname ip10-241-107-39'],
before=['test1', 'test2'],
after=['test2', 'test3']))
self.execute_module()
def test_cnos_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname ip10-241-107-39'], config=config))
commands = ['hostname ip10-241-107-39']
self.execute_module(changed=True, commands=commands)
def test_cnos_config_replace_block(self):
lines = ['description test string', 'test string']
parents = ['interface ethernet 1/13']
set_module_args(dict(lines=lines, replace='block', parents=parents))
commands = parents + lines
self.execute_module(changed=True, commands=commands)
def test_cnos_config_match_none(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string']
parents = ['interface ethernet 1/13']
set_module_args(dict(lines=lines, parents=parents, match='none'))
commands = parents + lines
self.execute_module(changed=True, commands=commands, sort=False)
def test_cnos_config_match_strict(self):
lines = ['ip address 100.10.10.10/24', 'no switchport']
parents = ['interface Ethernet1/12']
set_module_args(dict(lines=lines, parents=parents, match='strict'))
commands = parents + ['no switchport']
self.execute_module(changed=True, commands=commands, sort=False)
def test_cnos_config_match_exact(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string',
'no shutdown']
parents = ['interface ethernet 1/13']
set_module_args(dict(lines=lines, parents=parents, match='exact'))
commands = parents + lines
self.execute_module(changed=True, commands=commands, sort=False)
| gpl-3.0 |
Yelp/pootle | tests/models/translationproject.py | 1 | 2685 | import os
import shutil
import pytest
from django.db import IntegrityError
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_translationproject.models import TranslationProject
@pytest.mark.django_db
def test_tp_create_fail(tutorial, english):
# Trying to create a TP with no Project raises a RelatedObjectDoesNotExist
# which can be caught with Project.DoesNotExist
with pytest.raises(Project.DoesNotExist):
TranslationProject.objects.create()
# TP needs a lang set too...
with pytest.raises(Language.DoesNotExist):
TranslationProject.objects.create(project=tutorial)
# There is already an english tutorial was automagically set up
with pytest.raises(IntegrityError):
TranslationProject.objects.create(project=tutorial, language=english)
@pytest.mark.django_db
def test_tp_create_no_files(tutorial, fish):
# There are no files on disk so TP was not automagically created
# and there are no templates loaded
tp = TranslationProject.objects.create(project=tutorial, language=fish)
assert list(tp.stores.all()) == []
def _test_tp_stores_match(tp1, tp2):
# For testing tp creation from templates
assert tp1.stores.count() == tp2.stores.count()
tp1_stores = tp1.stores.all()
tp2_stores = tp2.stores.all()
for i, store1 in enumerate(tp1_stores):
store2 = tp2_stores[i]
assert store1.units == store2.units
@pytest.mark.django_db
def test_tp_create_templates(tutorial, klingon_vpw, templates):
# As there is a tutorial template it will automatically create stores for
# our new TP
template_tp = TranslationProject.objects.get(
language=templates, project=tutorial)
tp = TranslationProject.objects.create(
project=tutorial, language=klingon_vpw)
assert tp.stores.count() == template_tp.stores.count()
assert (
[(s, t)
for s, t
in template_tp.stores.first().units.values_list("source_f",
"target_f")]
== [(s, t)
for s, t
in tp.stores.first().units.values_list("source_f",
"target_f")])
@pytest.mark.django_db
def test_tp_create_with_files(tutorial, english, klingon, settings):
# lets add some files by hand
trans_dir = settings.POOTLE_TRANSLATION_DIRECTORY
shutil.copytree(
os.path.join(trans_dir, "tutorial/en"),
os.path.join(trans_dir, "tutorial/kl"))
TranslationProject.objects.create(project=tutorial, language=klingon)
shutil.rmtree(os.path.join(trans_dir, "tutorial/kl"))
| gpl-3.0 |
shoopio/shoop | shuup/front/views/checkout.py | 2 | 3591 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.views.generic import View
from six.moves import urllib
from shuup.front.checkout import CheckoutProcess, VerticalCheckoutProcess
from shuup.utils.importing import cached_load
__all__ = ["BaseCheckoutView"]
class BaseCheckoutView(View):
url_namespace = 'shuup:checkout'
phase_specs = []
empty_phase_spec = None
initial_phase = None
process_class = CheckoutProcess
def dispatch(self, request, *args, **kwargs):
if request.basket.is_empty and self.empty_phase_spec:
self.phase_specs = [self.empty_phase_spec]
phase_identifier = "empty"
else:
phase_identifier = kwargs.get("phase", self.initial_phase)
process = self.process_class(
phase_specs=self.phase_specs,
phase_kwargs=dict(request=request, args=args, kwargs=kwargs),
view=self)
request.basket = process.basket
if phase_identifier == "reset":
process.reset()
return redirect(self.get_url())
current_phase = process.prepare_current_phase(phase_identifier)
if not current_phase.final and current_phase.identifier != phase_identifier:
url = current_phase.get_url()
params = ("?" + urllib.parse.urlencode(request.GET)) if request.GET else ""
return redirect(url + params)
return current_phase.dispatch(request, *args, **kwargs)
def get_url(self, **kwargs):
"""
Get URL for given kwargs within the checkout process in this view.
This can be overriden in a subclass to customize the URLs.
:rtype: str
"""
return reverse(self.url_namespace, kwargs=kwargs)
def get_phase_url(self, phase):
"""
Get URL for the given phase in the checkout process of this view.
:type phase: shuup.front.checkout.CheckoutPhaseViewMixin
:rtype: str
"""
return self.get_url(phase=phase.identifier)
class DefaultCheckoutView(BaseCheckoutView):
phase_specs = [
"shuup.front.checkout.addresses:AddressesPhase",
"shuup.front.checkout.methods:MethodsPhase",
"shuup.front.checkout.methods:ShippingMethodPhase",
"shuup.front.checkout.methods:PaymentMethodPhase",
"shuup.front.checkout.confirm:ConfirmPhase",
]
empty_phase_spec = "shuup.front.checkout.empty:EmptyPhase"
class SinglePageCheckoutView(DefaultCheckoutView):
initial_phase = "addresses"
process_class = VerticalCheckoutProcess
class CheckoutViewWithLoginAndRegister(BaseCheckoutView):
phase_specs = [
"shuup.front.checkout.checkout_method:CheckoutMethodPhase",
"shuup.front.checkout.checkout_method:RegisterPhase",
"shuup.front.checkout.addresses:AddressesPhase",
"shuup.front.checkout.methods:MethodsPhase",
"shuup.front.checkout.methods:ShippingMethodPhase",
"shuup.front.checkout.methods:PaymentMethodPhase",
"shuup.front.checkout.confirm:ConfirmPhase",
]
empty_phase_spec = "shuup.front.checkout.empty:EmptyPhase"
def get_checkout_view():
view = cached_load("SHUUP_CHECKOUT_VIEW_SPEC")
if hasattr(view, "as_view"): # pragma: no branch
view = view.as_view()
return view
| agpl-3.0 |
ConnorGBrewster/servo | tests/wpt/web-platform-tests/css/tools/apiclient/apiclient/apiclient.py | 79 | 10723 | # coding=utf-8
#
# Copyright © 2013 Hewlett-Packard Development Company, L.P.
#
# This work is distributed under the W3C® Software License [1]
# in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# [1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
#
# Process URI templates per http://tools.ietf.org/html/rfc6570
import urllib2
import urlparse
import json
import base64
import contextlib
import collections
import UserString
import uritemplate
class MimeType(UserString.MutableString):
def __init__(self, mimeType):
UserString.MutableString.__init__(self, mimeType)
self._type = None
self._subtype = None
self._structure = None
slashIndex = mimeType.find('/')
if (-1 < slashIndex):
self._type = mimeType[:slashIndex]
mimeType = mimeType[slashIndex + 1:]
plusIndex = mimeType.find('+')
if (-1 < plusIndex):
self._subtype = mimeType[:plusIndex]
self._structure = mimeType[plusIndex + 1:]
else:
self._structure = mimeType
else:
self._type = mimeType
def _update(self):
if (self._structure):
if (self._subtype):
self.data = self._type + '/' + self._subtype + '+' + self._structure
else:
self.data = self._type + '/' + self._structure
else:
self.data = self._type
def set(self, type, structure, subtype = None):
self._type = type
self._subtype = subtype
self._structure = structure
self._update()
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
self._update()
@property
def subtype(self):
return self._subtype
@subtype.setter
def subtype(self, value):
self._subtype = value
self._update()
@property
def structure(self):
return self._structure
@structure.setter
def structure(self, value):
self._structure = value
self._update()
class APIResponse(object):
def __init__(self, response):
self.status = response.getcode() if (response) else 0
self.headers = response.info() if (response) else {}
self.data = response.read() if (200 == self.status) else None
if (self.data and
(('json' == self.contentType.structure) or ('json-home' == self.contentType.structure))):
try:
self.data = json.loads(self.data, object_pairs_hook = collections.OrderedDict)
except:
pass
@property
def contentType(self):
contentType = self.headers.get('content-type') if (self.headers) else None
return MimeType(contentType.split(';')[0]) if (contentType and (';' in contentType)) else MimeType(contentType)
@property
def encoding(self):
contentType = self.headers.get('content-type') if (self.headers) else None
if (contentType and (';' in contentType)):
encoding = contentType.split(';', 1)[1]
if ('=' in encoding):
return encoding.split('=', 1)[1].strip()
return 'utf-8'
class APIHints(object):
def __init__(self, data):
self.httpMethods = [method.upper() for method in data['allow'] if method] if ('allow' in data) else ['GET']
self.formats = {}
formats = [MimeType(format) for format in data['formats']] if ('formats' in data) else []
if (formats):
if ('GET' in self.httpMethods):
self.formats['GET'] = formats
if ('PUT' in self.httpMethods):
self.formats['PUT'] = formats
if (('PATCH' in self.httpMethods) and ('accept-patch' in data)):
self.formats['PATCH'] = [MimeType(format) for format in data['accept-patch']]
if (('POST' in self.httpMethods) and ('accept-post' in data)):
self.formats['POST'] = [MimeType(format) for format in data['accept-post']]
# TODO: ranges from 'accept-ranges'; preferece tokens from 'accept-prefer';
# preconditions from 'precondition-req'; auth from 'auth-req'
self.ranges = None
self.preferences = None
self.preconditions = None
self.auth = None
self.docs = data.get('docs')
self.status = data.get('status')
class APIResource(object):
def __init__(self, baseURI, uri, variables = None, hints = None):
try:
self.template = uritemplate.URITemplate(urlparse.urljoin(baseURI, uri))
if (variables):
self.variables = {variable: urlparse.urljoin(baseURI, variables[variable]) for variable in variables}
else:
self.variables = {variable: '' for variable in self.template.variables}
self.hints = hints
except Exception as e:
self.template = uritemplate.URITemplate('')
self.variables = {}
self.hints = None
class APIClient(object):
def __init__(self, baseURI, version = None, username = None, password = None):
self._baseURI = baseURI
self.defaultVersion = version
self.defaultAccept = 'application/json'
self.username = username
self.password = password
self._resources = {}
self._versions = {}
self._accepts = {}
self._loadHome()
@property
def baseURI(self):
return self._baseURI
def _loadHome(self):
home = self._callURI('GET', self.baseURI, 'application/home+json, application/json-home, application/json')
if (home):
if ('application/json' == home.contentType):
for name in home.data:
apiKey = urlparse.urljoin(self.baseURI, name)
self._resources[apiKey] = APIResource(self.baseURI, home.data[name])
elif (('application/home+json' == home.contentType) or
('application/json-home' == home.contentType)):
resources = home.data.get('resources')
if (resources):
for name in resources:
apiKey = urlparse.urljoin(self.baseURI, name)
data = resources[name]
uri = data['href'] if ('href' in data) else data.get('href-template')
variables = data.get('href-vars')
hints = APIHints(data['hints']) if ('hints' in data) else None
self._resources[apiKey] = APIResource(self.baseURI, uri, variables, hints)
def relativeURI(self, uri):
if (uri.startswith(self.baseURI)):
relative = uri[len(self.baseURI):]
if (relative.startswith('/') and not self.baseURI.endswith('/')):
relative = relative[1:]
return relative
return uri
@property
def resourceNames(self):
return [self.relativeURI(apiKey) for apiKey in self._resources]
def resource(self, name):
return self._resources.get(urlparse.urljoin(self.baseURI, name))
def addResource(self, name, uri):
resource = APIResource(self.baseURI, uri)
apiKey = urlparse.urljoin(self.baseURI, name)
self._resources[apiKey] = resource
def _accept(self, resource):
version = None
if (api and (api in self._versions)):
version = self._versions[api]
if (not version):
version = self.defaultVersion
return ('application/' + version + '+json, application/json') if (version) else 'application/json'
def _callURI(self, method, uri, accept, payload = None, payloadType = None):
try:
request = urllib2.Request(uri, data = payload, headers = { 'Accept' : accept })
if (self.username and self.password):
request.add_header('Authorization', b'Basic ' + base64.b64encode(self.username + b':' + self.password))
if (payload and payloadType):
request.add_header('Content-Type', payloadType)
request.get_method = lambda: method
with contextlib.closing(urllib2.urlopen(request)) as response:
return APIResponse(response)
except Exception as e:
pass
return None
def _call(self, method, name, arguments, payload = None, payloadType = None):
apiKey = urlparse.urljoin(self.baseURI, name)
resource = self._resources.get(apiKey)
if (resource):
uri = resource.template.expand(**arguments)
if (uri):
version = self._versions.get(apiKey) if (apiKey in self._versions) else self.defaultVersion
accept = MimeType(self._accepts(apiKey) if (apiKey in self._accepts) else self.defaultAccept)
if (version):
accept.subtype = version
return self._callURI(method, uri, accept, payload, payloadType)
return None
def setVersion(self, name, version):
apiKey = urlparse.urljoin(self.baseURI, name)
self._versions[apiKey] = version
def setAccept(self, name, mimeType):
apiKey = urlparse.urljoin(self.baseURI, name)
self._accepts[apiKey] = mimeType
def get(self, name, **kwargs):
return self._call('GET', name, kwargs)
def post(self, name, payload = None, payloadType = None, **kwargs):
return self._call('POST', name, kwargs, payload, payloadType)
def postForm(self, name, payload = None, **kwargs):
return self._call('POST', name, kwargs, urllib.urlencode(payload), 'application/x-www-form-urlencoded')
def postJSON(self, name, payload = None, **kwargs):
return self._call('POST', name, kwargs, json.dumps(payload), 'application/json')
def put(self, name, payload = None, payloadType = None, **kwargs):
return self._call('PUT', name, kwargs, payload, payloadType)
def patch(self, name, patch = None, **kwargs):
return self._call('PATCH', name, kwargs, json.dumps(patch), 'application/json-patch')
def delete(self, name, **kwargs):
return self._call('DELETE', name, kwargs)
| mpl-2.0 |
Dr-Bean/spkrepo | spkrepo/tests/test_admin.py | 1 | 12182 | # -*- coding: utf-8 -*-
import os
from unittest import TestSuite, TestLoader
from flask import url_for, current_app
from spkrepo.ext import db
from spkrepo.models import Version, Package
from spkrepo.tests.common import BaseTestCase, BuildFactory, PackageFactory, create_image, VersionFactory
class IndexTestCase(BaseTestCase):
def test_anonymous(self):
self.assert403(self.client.get(url_for('admin.index')))
def test_user(self):
with self.logged_user():
self.assert403(self.client.get(url_for('admin.index')))
def test_developer(self):
with self.logged_user('developer'):
self.assert200(self.client.get(url_for('admin.index')))
def test_package_admin(self):
with self.logged_user('package_admin'):
self.assert200(self.client.get(url_for('admin.index')))
def test_admin(self):
with self.logged_user('admin'):
self.assert200(self.client.get(url_for('admin.index')))
class UserTestCase(BaseTestCase):
def test_anonymous(self):
self.assert403(self.client.get(url_for('userview.index_view')))
def test_user(self):
with self.logged_user():
self.assert403(self.client.get(url_for('userview.index_view')))
def test_developer(self):
with self.logged_user('developer'):
self.assert403(self.client.get(url_for('userview.index_view')))
def test_package_admin(self):
with self.logged_user('package_admin'):
self.assert403(self.client.get(url_for('userview.index_view')))
def test_admin(self):
with self.logged_user('admin'):
self.assert200(self.client.get(url_for('userview.index_view')))
def test_action_activate_one(self):
with self.logged_user('admin'):
user = self.create_user()
user.active = False
db.session.commit()
response = self.client.post(url_for('userview.action_view'), follow_redirects=True,
data=dict(action='activate', rowid=[user.id]))
self.assert200(response)
self.assertIn('User was successfully activated.', response.data.decode(response.charset))
self.assertTrue(user.active)
def test_action_activate_multi(self):
with self.logged_user('admin'):
user1 = self.create_user()
user1.active = False
user2 = self.create_user()
user2.active = False
db.session.commit()
response = self.client.post(url_for('userview.action_view'), follow_redirects=True,
data=dict(action='activate', rowid=[user1.id, user2.id]))
self.assert200(response)
self.assertIn('2 users were successfully activated.', response.data.decode(response.charset))
self.assertTrue(user1.active)
self.assertTrue(user2.active)
def test_action_deactivate(self):
with self.logged_user('admin'):
user = self.create_user()
user.active = True
db.session.commit()
response = self.client.post(url_for('userview.action_view'), follow_redirects=True,
data=dict(action='deactivate', rowid=[user.id]))
self.assert200(response)
self.assertIn('User was successfully deactivated.', response.data.decode(response.charset))
self.assertFalse(user.active)
def test_action_deactivate_multi(self):
with self.logged_user('admin'):
user1 = self.create_user()
user1.active = True
user2 = self.create_user()
user2.active = True
db.session.commit()
response = self.client.post(url_for('userview.action_view'), follow_redirects=True,
data=dict(action='deactivate', rowid=[user1.id, user2.id]))
self.assert200(response)
self.assertIn('2 users were successfully deactivated.', response.data.decode(response.charset))
self.assertFalse(user1.active)
self.assertFalse(user2.active)
class PackageTestCase(BaseTestCase):
def test_anonymous(self):
self.assert403(self.client.get(url_for('packageview.index_view')))
def test_user(self):
with self.logged_user():
self.assert403(self.client.get(url_for('packageview.index_view')))
def test_developer(self):
with self.logged_user('developer'):
self.assert403(self.client.get(url_for('packageview.index_view')))
def test_package_admin(self):
with self.logged_user('package_admin'):
self.assert200(self.client.get(url_for('packageview.index_view')))
def test_admin(self):
with self.logged_user('admin'):
self.assert403(self.client.get(url_for('packageview.index_view')))
def test_on_model_create(self):
self.assertEqual(len(Package.query.all()), 0)
with self.logged_user('package_admin'):
self.client.post(url_for('packageview.create_view'), data=dict(name='test'))
self.assertEqual(len(Package.query.all()), 1)
package = Package.query.one()
package_path = os.path.join(current_app.config['DATA_PATH'], package.name)
self.assertTrue(os.path.exists(package_path))
def test_on_model_delete(self):
package = PackageFactory()
db.session.commit()
self.assertEqual(len(Package.query.all()), 1)
package_path = os.path.join(current_app.config['DATA_PATH'], package.name)
self.assertTrue(os.path.exists(package_path))
with self.logged_user('package_admin', 'admin'):
self.client.post(url_for('packageview.delete_view', id=str(package.id)))
self.assertEqual(len(Package.query.all()), 0)
self.assertTrue(not os.path.exists(package_path))
class VersionTestCase(BaseTestCase):
def test_anonymous(self):
self.assert403(self.client.get(url_for('versionview.index_view')))
def test_user(self):
with self.logged_user():
self.assert403(self.client.get(url_for('versionview.index_view')))
def test_developer(self):
with self.logged_user('developer'):
self.assert200(self.client.get(url_for('versionview.index_view')))
def test_package_admin(self):
with self.logged_user('package_admin'):
self.assert200(self.client.get(url_for('versionview.index_view')))
def test_admin(self):
with self.logged_user('admin'):
self.assert403(self.client.get(url_for('versionview.index_view')))
def test_on_model_delete(self):
version = VersionFactory()
db.session.commit()
self.assertEqual(len(Version.query.all()), 1)
version_path = os.path.join(current_app.config['DATA_PATH'], version.package.name, str(version.version))
self.assertTrue(os.path.exists(version_path))
with self.logged_user('package_admin', 'admin'):
self.client.post(url_for('versionview.delete_view', id=str(version.id)))
self.assertEqual(len(Version.query.all()), 0)
self.assertTrue(not os.path.exists(version_path))
class BuildTestCase(BaseTestCase):
def test_anonymous(self):
self.assert403(self.client.get(url_for('buildview.index_view')))
def test_user(self):
with self.logged_user():
self.assert403(self.client.get(url_for('buildview.index_view')))
def test_developer(self):
with self.logged_user('developer'):
self.assert200(self.client.get(url_for('buildview.index_view')))
def test_package_admin(self):
with self.logged_user('package_admin'):
self.assert200(self.client.get(url_for('buildview.index_view')))
def test_admin(self):
with self.logged_user('admin'):
self.assert403(self.client.get(url_for('buildview.index_view')))
def test_action_activate_one(self):
with self.logged_user('package_admin'):
build = BuildFactory(active=False)
db.session.commit()
response = self.client.post(url_for('buildview.action_view'), follow_redirects=True,
data=dict(action='activate', rowid=[build.id]))
self.assert200(response)
self.assertIn('Build was successfully activated.', response.data.decode(response.charset))
self.assertTrue(build.active)
def test_action_activate_multi(self):
with self.logged_user('package_admin'):
build1 = BuildFactory(active=False)
build2 = BuildFactory(active=False)
db.session.commit()
response = self.client.post(url_for('buildview.action_view'), follow_redirects=True,
data=dict(action='activate', rowid=[build1.id, build2.id]))
self.assert200(response)
self.assertIn('2 builds were successfully activated.', response.data.decode(response.charset))
self.assertTrue(build1.active)
self.assertTrue(build2.active)
def test_action_deactivate(self):
with self.logged_user('package_admin'):
build = BuildFactory(active=True)
db.session.commit()
response = self.client.post(url_for('buildview.action_view'), follow_redirects=True,
data=dict(action='deactivate', rowid=[build.id]))
self.assert200(response)
self.assertIn('Build was successfully deactivated.', response.data.decode(response.charset))
self.assertFalse(build.active)
def test_action_deactivate_multi(self):
with self.logged_user('package_admin'):
build1 = BuildFactory(active=True)
build2 = BuildFactory(active=True)
db.session.commit()
response = self.client.post(url_for('buildview.action_view'), follow_redirects=True,
data=dict(action='deactivate', rowid=[build1.id, build2.id]))
self.assert200(response)
self.assertIn('2 builds were successfully deactivated.', response.data.decode(response.charset))
self.assertFalse(build1.active)
self.assertFalse(build2.active)
class ScreenshotTestCase(BaseTestCase):
def test_anonymous(self):
self.assert403(self.client.get(url_for('screenshotview.index_view')))
def test_user(self):
with self.logged_user():
self.assert403(self.client.get(url_for('screenshotview.index_view')))
def test_developer(self):
with self.logged_user('developer'):
self.assert403(self.client.get(url_for('screenshotview.index_view')))
def test_package_admin(self):
with self.logged_user('package_admin'):
self.assert200(self.client.get(url_for('screenshotview.index_view')))
def test_admin(self):
with self.logged_user('admin'):
self.assert403(self.client.get(url_for('screenshotview.index_view')))
def test_create(self):
package = PackageFactory(add_screenshot=False)
db.session.commit()
self.assertEqual(len(package.screenshots), 0)
with self.logged_user('package_admin'):
response = self.client.post(url_for('screenshotview.create_view'),
data=dict(package=str(package.id),
path=(create_image('Test', 1280, 1024), 'test.png')))
self.assertEqual(len(package.screenshots), 1)
self.assertTrue(package.screenshots[0].path.endswith('screenshot_1.png'))
def suite():
suite = TestSuite()
suite.addTest(TestLoader().loadTestsFromTestCase(IndexTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(UserTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(PackageTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(VersionTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(BuildTestCase))
suite.addTest(TestLoader().loadTestsFromTestCase(ScreenshotTestCase))
return suite
| mit |
tumb1er/django-celery-rpc | celery_rpc/tests/test_tasks.py | 2 | 15994 | from __future__ import absolute_import
from random import randint
from uuid import uuid4
from django.core.exceptions import ObjectDoesNotExist
from celery_rpc.tests import factories
from celery_rpc.tests.utils import (get_model_dict, SimpleModelTestMixin,
get_model_dict_from_list, unpack_exception)
from django.test import TestCase
from django.db.models import Q
from rest_framework import serializers
from .. import tasks
from ..exceptions import ModelTaskError, remote_exception_registry
from ..tests.tasks import CustomModelTask
from .models import SimpleModel, NonAutoPrimaryKeyModel, PartialUpdateModel
class BaseTaskTests(SimpleModelTestMixin, TestCase):
pass
class FilterTaskTests(BaseTaskTests):
""" Tests for selecting models located on RPC server.
"""
def testLimit(self):
r = tasks.filter.delay(self.MODEL_SYMBOL)
self.assertEquals(5, len(r.get()))
r = tasks.filter.delay(self.MODEL_SYMBOL, limit=2)
self.assertEquals(2, len(r.get()))
def testOffset(self):
r = tasks.filter.delay(self.MODEL_SYMBOL, offset=1)
expected = get_model_dict(self.models[1])
self.assertEquals(expected, r.get()[0])
def testFilters(self):
expected = get_model_dict(self.models[0])
r = tasks.filter.delay(self.MODEL_SYMBOL,
filters={'pk': expected['id']})
self.assertEquals(expected, r.get()[0])
def testFiltersWithQ(self):
expected = get_model_dict(self.models[0])
r = tasks.filter.delay(self.MODEL_SYMBOL,
filters_Q=Q(pk=expected['id']))
self.assertEquals(expected, r.get()[0])
def testFiltersWithLookupsAndQ(self):
filter_ids = [m.id for m in self.models[3:]]
filter_Q = Q(pk__lte=self.models[3].pk)
r = tasks.filter.delay(self.MODEL_SYMBOL,
filters={'pk__in': filter_ids},
filters_Q=filter_Q)
expected = get_model_dict(self.models[3])
self.assertEquals(len(r.get()), 1)
self.assertEquals(expected, r.get()[0])
def testExclude(self):
""" Exclude seems good.
"""
exclude_ids = [m.pk for m in self.models[1:]]
r = tasks.filter.delay(self.MODEL_SYMBOL,
exclude={'pk__in': exclude_ids})
expected = get_model_dict(self.models[0])
self.assertEquals(expected, r.get()[0])
def testExcludeWithQ(self):
""" Exclude with Q-object works nice.
"""
r = tasks.filter.delay(self.MODEL_SYMBOL,
exclude_q=Q(pk__gte=self.models[1].pk))
expected = get_model_dict(self.models[0])
self.assertEquals(expected, r.get()[0])
def testExcludeWithLookupsAndQ(self):
""" Exclude all except first and last by mix of `exclude` and
`exclude_Q` seems able.
"""
exclude_char = [m.char for m in self.models[1:]]
exclude_Q = Q(pk__lte=self.models[3].pk)
r = tasks.filter.delay(self.MODEL_SYMBOL,
exclude={'char__in': exclude_char},
exclude_Q=exclude_Q)
result = r.get()
self.assertEquals(len(result), 2)
for i in 4, 0:
expected = get_model_dict(self.models[i])
r = result.pop()
self.assertEquals(expected, r)
def testSerializerFields(self):
expected = get_model_dict(self.models[0])
field = list(expected.keys())[0]
r = tasks.filter.delay(self.MODEL_SYMBOL,
filters={'pk': expected['id']},
fields=[field])
self.assertEquals({field: expected[field]}, r.get()[0])
def testOrdering(self):
self.models[0].char = 'a'
self.models[0].save()
self.models[1].char = 'b'
self.models[1].save()
r = tasks.filter.delay(self.MODEL_SYMBOL,
filters={'char__in': ['a', 'b']},
order_by=['char'])
self.assertEquals(['a', 'b'], [item['char'] for item in r.get()])
def testReverseOrdering(self):
self.models[0].char = 'a'
self.models[0].save()
self.models[1].char = 'b'
self.models[1].save()
r = tasks.filter.delay(self.MODEL_SYMBOL,
filters={'char__in': ['a', 'b']},
order_by='-char')
self.assertEquals(['b', 'a'], [item['char'] for item in r.get()])
class SimpleTaskSerializer(serializers.ModelSerializer):
""" Test serializer
"""
class Meta:
model = SimpleModel
fields = ('id', )
class SingleObjectsDoesNotExistMixin(object):
""" Checks behavior of tasks, which modify existing objects.
"""
def checkSingleObjectDoesNotExist(self, expected_exc=ObjectDoesNotExist):
with self.assertRaisesRegexp(expected_exc,
r'matching query does not exist.'):
with unpack_exception():
self.task.delay(self.MODEL_SYMBOL,
{'char': str(uuid4()),
'id': randint(100, 1000)}).get()
def testSingleObjectDoesNotExist(self):
""" Raise exception if cannot find object in single mode """
tasks.rpc.conf['WRAP_REMOTE_ERRORS'] = False
return self.checkSingleObjectDoesNotExist()
def testSingleObjectDoesNotExistRemoteError(self):
""" Perform testSingleObjectDoesNotExist with remote errors handling
enabled."""
tasks.rpc.conf['WRAP_REMOTE_ERRORS'] = True
return self.checkSingleObjectDoesNotExist(remote_exception_registry.RemoteError)
class UpdateTaskTests(SingleObjectsDoesNotExistMixin, BaseTaskTests):
task = tasks.update
def testUpdateOne(self):
expected = get_model_dict(self.models[0])
expected.update(char=str(uuid4()))
r = self.task.delay(self.MODEL_SYMBOL, expected)
self.assertEquals(expected, r.get())
updated = get_model_dict(SimpleModel.objects.get(pk=expected['id']))
self.assertEquals(expected, updated)
def testUpdateMulti(self):
expected = [get_model_dict(e) for e in self.models[0:2]]
for e in expected:
e.update(char=str(uuid4()))
r = self.task.delay(self.MODEL_SYMBOL, expected)
result = r.get()
self.assertEquals(2, len(result))
self.assertEquals(expected, result)
updated = [get_model_dict(o) for o in SimpleModel.objects.all()[0:2]]
self.assertEquals(expected, updated)
def testUpdatePartial(self):
char_val = str(uuid4())
expected = get_model_dict(self.models[0])
expected.update(char=char_val)
r = self.task.delay(self.MODEL_SYMBOL,
{'char': char_val, 'id': expected['id']})
self.assertDictEqual(expected, r.get())
updated = get_model_dict(SimpleModel.objects.get(pk=expected['id']))
self.assertEquals(expected, updated)
def testSerializer(self):
""" Test serializer_cls """
char_val = str(uuid4())
expected = get_model_dict(self.models[0])
expected.update(char=char_val)
serializer_cls = "{}:{}".format(SimpleTaskSerializer.__module__,
SimpleTaskSerializer.__name__)
r = self.task.delay(self.MODEL_SYMBOL,
{'char': char_val, 'id': expected['id']},
serializer_cls=serializer_cls)
self.assertDictEqual({'id': expected['id']}, r.get())
def testNoExistSerializer(self):
""" Test not existing serializer """
char_val = str(uuid4())
expected = get_model_dict(self.models[0])
with self.assertRaises(ImportError):
with unpack_exception():
self.task.delay(self.MODEL_SYMBOL,
{'char': char_val, 'id': expected['id']},
serializer_cls='not.existing.symbol').get()
def testNoExistSerializerRemoteError(self):
""" Perform testNoExistSerializer with remote errors handling
in another mode."""
old = tasks.rpc.conf['WRAP_REMOTE_ERRORS']
tasks.rpc.conf['WRAP_REMOTE_ERRORS'] = not old
return self.testNoExistSerializer()
def testNoValidSerializer(self):
""" Test not valid serializer """
char_val = str(uuid4())
expected = get_model_dict(self.models[0])
with self.assertRaisesRegexp(TypeError, r'not a DRF serializer'):
serializer_cls = 'celery_rpc.tests.models:SimpleModel'
with unpack_exception():
self.task.delay(self.MODEL_SYMBOL,
{'char': char_val, 'id': expected['id']},
serializer_cls=serializer_cls).get()
def testNoValidSerializerRemoteError(self):
""" Perform testNoValidSerializer with remote errors handling
in another mode."""
old = tasks.rpc.conf['WRAP_REMOTE_ERRORS']
tasks.rpc.conf['WRAP_REMOTE_ERRORS'] = not old
return self.testNoValidSerializer()
class GetSetTaskTests(SingleObjectsDoesNotExistMixin, BaseTaskTests):
task = tasks.getset
def testGetSetOne(self):
new = get_model_dict(self.models[0])
new.update(char=str(uuid4()))
r = self.task.delay(self.MODEL_SYMBOL, new)
old = get_model_dict(self.models[0])
self.assertEquals(old, r.get())
updated = get_model_dict(SimpleModel.objects.get(pk=old['id']))
self.assertEquals(new, updated)
def testGetSetMulti(self):
new = [get_model_dict(e) for e in self.models[0:2]]
for e in new:
e.update(char=str(uuid4()))
r = self.task.delay(self.MODEL_SYMBOL, new)
result = r.get()
self.assertEquals(2, len(result))
old = [get_model_dict(e) for e in self.models[0:2]]
self.assertEquals(old, result)
updated = [get_model_dict(o) for o in SimpleModel.objects.all()[0:2]]
self.assertEquals(new, updated)
def testPartialUpdate(self):
""" Check that getset allow update model partially
"""
m = factories.PartialUpdateModelFactory()
preserve_f2 = m.f2
expected = randint(1, 1000)
r = self.task.delay('celery_rpc.tests.models:PartialUpdateModel',
{'f1': expected, 'pk': m.pk})
r.get()
m = PartialUpdateModel.objects.get(pk=m.pk)
self.assertEquals(expected, m.f1)
self.assertEquals(preserve_f2, m.f2)
class CreateTaskTests(BaseTaskTests):
task = tasks.create
def testCreateOne(self):
expected = str(uuid4())
self.assertEquals(0, SimpleModel.objects.filter(char=expected).count())
r = self.task.delay(self.MODEL_SYMBOL, {'char': expected})
self.assertEquals(expected, r.get()['char'])
self.assertEquals(1, SimpleModel.objects.filter(char=expected).count())
def testCreateMulti(self):
uuids = str(uuid4()), str(uuid4())
expected = [{'char': v} for v in uuids]
self.assertEquals(0, SimpleModel.objects.filter(char__in=uuids).count())
r = self.task.delay(self.MODEL_SYMBOL, expected)
self.assertEquals(expected, [{'char': i['char']} for i in r.get()])
self.assertEquals(2, SimpleModel.objects.filter(char__in=uuids).count())
def checkSingleObjectDoesNotExist(self, *args):
""" Creates new object if provided ID does not exist """
expected = str(uuid4())
self.assertEquals(0, SimpleModel.objects.filter(char=expected).count())
unexpected_id = randint(100, 1000)
r = self.task.delay(self.MODEL_SYMBOL, {'char': expected,
'id': unexpected_id})
self.assertEquals(expected, r.get()['char'])
self.assertNotEquals(unexpected_id, r.get()['id'])
self.assertEquals(0, SimpleModel.objects.filter(
char=unexpected_id).count())
self.assertEquals(1, SimpleModel.objects.filter(
char=expected).count())
def testSingleObjectAlreadyExist(self):
""" Raise exception if object already exists """
pk = randint(1, 1000)
obj = NonAutoPrimaryKeyModel.objects.create(pk=pk)
with self.assertRaisesRegexp(
ModelTaskError,
r'primary key|PRIMARY KEY|This field must be unique'
r'|with this id already exists') as ctx:
with unpack_exception():
r = self.task.delay(
'celery_rpc.tests.models:NonAutoPrimaryKeyModel',
{'id': obj.pk})
r.get()
self.assertNotEquals(self.models[0].id, ctx.exception.args[1]['id'])
def testSingleObjectAlreadyExistRemoteError(self):
""" Perform testSingleObjectAlreadyExist with remote errors handling
in another mode."""
old = tasks.rpc.conf['WRAP_REMOTE_ERRORS']
tasks.rpc.conf['WRAP_REMOTE_ERRORS'] = not old
return self.testSingleObjectAlreadyExist()
class UpdateOrCreateTaskTests(CreateTaskTests, UpdateTaskTests):
task = tasks.update_or_create
def testSingleObjectAlreadyExist(self):
super(UpdateOrCreateTaskTests, self).testUpdateOne()
class DeleteTaskTests(SingleObjectsDoesNotExistMixin, BaseTaskTests):
task = tasks.delete
def testDeleteOne(self):
expected = get_model_dict(self.models[0])
r = self.task.delay(self.MODEL_SYMBOL, expected)
self.assertEquals(None, r.get())
self.assertEquals(0, SimpleModel.objects.filter(
id=expected['id']).count())
def testDeleteMany(self):
expected = (get_model_dict(self.models[0]),
get_model_dict(self.models[1]))
r = self.task.delay(self.MODEL_SYMBOL, expected)
self.assertEquals([], r.get())
ids = [v['id'] for v in expected]
self.assertEquals(0, SimpleModel.objects.filter(id__in=ids).count())
def plus(a, b):
return a + b
class CallTaskTests(TestCase):
def testCallPlus(self):
a = 2
b = 3
expected = a + b
r = tasks.call.delay('celery_rpc.tests.test_tasks:plus', [a, b],
None)
self.assertEquals(expected, r.get())
class OverrideTaskTests(TestCase):
""" Check if base task class overriding is worked.
"""
def testOverrideModelTask(self):
self.assertIsInstance(tasks.filter, CustomModelTask)
class TranslateTaskTests(BaseTaskTests):
task = tasks.translate
transform_map = {'title': 'char'}
def _transform_keys(self, transform_map, data):
result = {}
for new_key, old_key in transform_map.items():
if old_key in data.keys():
result[new_key] = data[old_key]
return result
def testTransformDict(self):
before = get_model_dict(self.models[0])
after = self._transform_keys(self.transform_map, before)
r = self.task.delay(self.transform_map, before)
self.assertEquals(after, r.get())
def testTransformList(self):
before = get_model_dict_from_list(self.models)
after = before[:]
for index, el in enumerate(after):
after[index] = self._transform_keys(self.transform_map, el)
r = self.task.delay(self.transform_map, before)
self.assertEquals(after, r.get())
def testTransformWithDefaults(self):
defaults = dict(bart='simpson')
before = get_model_dict(self.models[0])
after = self._transform_keys(self.transform_map, before)
after.update(defaults)
r = self.task.delay(self.transform_map, before, defaults=defaults)
self.assertEquals(after, r.get())
| unlicense |
chemelnucfin/tensorflow | tensorflow/lite/schema/upgrade_schema_test.py | 26 | 8644 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing for updating TensorFlow lite schema."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tempfile
from tensorflow.lite.schema import upgrade_schema as upgrade_schema_lib
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
EMPTY_TEST_SCHEMA_V1 = {
"version": 1,
"operator_codes": [],
"subgraphs": [],
}
EMPTY_TEST_SCHEMA_V3 = {
"version": 3,
"operator_codes": [],
"subgraphs": [],
"buffers": [{
"data": []
}]
}
TEST_SCHEMA_V0 = {
"operator_codes": [],
"tensors": [],
"inputs": [],
"outputs": [],
"operators": [],
"version": 0
}
TEST_SCHEMA_V3 = {
"operator_codes": [],
"buffers": [{
"data": []
}],
"subgraphs": [{
"tensors": [],
"inputs": [],
"outputs": [],
"operators": [],
}],
"version":
3
}
FULL_TEST_SCHEMA_V1 = {
"version":
1,
"operator_codes": [
{
"builtin_code": "CONVOLUTION"
},
{
"builtin_code": "DEPTHWISE_CONVOLUTION"
},
{
"builtin_code": "AVERAGE_POOL"
},
{
"builtin_code": "MAX_POOL"
},
{
"builtin_code": "L2_POOL"
},
{
"builtin_code": "SIGMOID"
},
{
"builtin_code": "L2NORM"
},
{
"builtin_code": "LOCAL_RESPONSE_NORM"
},
{
"builtin_code": "ADD"
},
{
"builtin_code": "Basic_RNN"
},
],
"subgraphs": [{
"operators": [
{
"builtin_options_type": "PoolOptions"
},
{
"builtin_options_type": "DepthwiseConvolutionOptions"
},
{
"builtin_options_type": "ConvolutionOptions"
},
{
"builtin_options_type": "LocalResponseNormOptions"
},
{
"builtin_options_type": "BasicRNNOptions"
},
],
}],
"description":
"",
}
FULL_TEST_SCHEMA_V3 = {
"version":
3,
"operator_codes": [
{
"builtin_code": "CONV_2D"
},
{
"builtin_code": "DEPTHWISE_CONV_2D"
},
{
"builtin_code": "AVERAGE_POOL_2D"
},
{
"builtin_code": "MAX_POOL_2D"
},
{
"builtin_code": "L2_POOL_2D"
},
{
"builtin_code": "LOGISTIC"
},
{
"builtin_code": "L2_NORMALIZATION"
},
{
"builtin_code": "LOCAL_RESPONSE_NORMALIZATION"
},
{
"builtin_code": "ADD"
},
{
"builtin_code": "RNN"
},
],
"subgraphs": [{
"operators": [
{
"builtin_options_type": "Pool2DOptions"
},
{
"builtin_options_type": "DepthwiseConv2DOptions"
},
{
"builtin_options_type": "Conv2DOptions"
},
{
"builtin_options_type": "LocalResponseNormalizationOptions"
},
{
"builtin_options_type": "RNNOptions"
},
],
}],
"description":
"",
"buffers": [{
"data": []
}]
}
BUFFER_TEST_V2 = {
"operator_codes": [],
"buffers": [],
"subgraphs": [{
"tensors": [
{
"data_buffer": [1, 2, 3, 4]
},
{
"data_buffer": [1, 2, 3, 4, 5, 6, 7, 8]
},
{
"data_buffer": []
},
],
"inputs": [],
"outputs": [],
"operators": [],
}],
"version":
2
}
BUFFER_TEST_V3 = {
"operator_codes": [],
"subgraphs": [{
"tensors": [
{
"buffer": 1
},
{
"buffer": 2
},
{
"buffer": 0
},
],
"inputs": [],
"outputs": [],
"operators": [],
}],
"buffers": [
{
"data": []
},
{
"data": [1, 2, 3, 4]
},
{
"data": [1, 2, 3, 4, 5, 6, 7, 8]
},
],
"version":
3
}
def JsonDumpAndFlush(data, fp):
"""Write the dictionary `data` to a JSON file `fp` (and flush).
Args:
data: in a dictionary that is JSON serializable.
fp: File-like object
"""
json.dump(data, fp)
fp.flush()
class TestSchemaUpgrade(test_util.TensorFlowTestCase):
def testNonExistentFile(self):
converter = upgrade_schema_lib.Converter()
non_existent = tempfile.mktemp(suffix=".json")
with self.assertRaisesRegexp(IOError, "No such file or directory"):
converter.Convert(non_existent, non_existent)
def testInvalidExtension(self):
converter = upgrade_schema_lib.Converter()
invalid_extension = tempfile.mktemp(suffix=".foo")
with self.assertRaisesRegexp(ValueError, "Invalid extension on input"):
converter.Convert(invalid_extension, invalid_extension)
with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json:
JsonDumpAndFlush(EMPTY_TEST_SCHEMA_V1, in_json)
with self.assertRaisesRegexp(ValueError, "Invalid extension on output"):
converter.Convert(in_json.name, invalid_extension)
def CheckConversion(self, data_old, data_expected):
"""Given a data dictionary, test upgrading to current version.
Args:
data_old: TFLite model as a dictionary (arbitrary version).
data_expected: TFLite model as a dictionary (upgraded).
"""
converter = upgrade_schema_lib.Converter()
with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json, \
tempfile.NamedTemporaryFile(
suffix=".json", mode="w+") as out_json, \
tempfile.NamedTemporaryFile(
suffix=".bin", mode="w+b") as out_bin, \
tempfile.NamedTemporaryFile(
suffix=".tflite", mode="w+b") as out_tflite:
JsonDumpAndFlush(data_old, in_json)
# Test JSON output
converter.Convert(in_json.name, out_json.name)
# Test binary output
# Convert to .tflite and then to .bin and check if binary is equal
converter.Convert(in_json.name, out_tflite.name)
converter.Convert(out_tflite.name, out_bin.name)
self.assertEqual(
open(out_bin.name, "rb").read(),
open(out_tflite.name, "rb").read())
# Test that conversion actually produced successful new json.
converted_schema = json.load(out_json)
self.assertEqual(converted_schema, data_expected)
def testAlreadyUpgraded(self):
"""A file already at version 3 should stay at version 3."""
self.CheckConversion(EMPTY_TEST_SCHEMA_V3, EMPTY_TEST_SCHEMA_V3)
self.CheckConversion(TEST_SCHEMA_V3, TEST_SCHEMA_V3)
self.CheckConversion(BUFFER_TEST_V3, BUFFER_TEST_V3)
# Disable this while we have incorrectly versioned structures around.
# def testV0Upgrade_IntroducesSubgraphs(self):
# """V0 did not have subgraphs; check to make sure they get introduced."""
# self.CheckConversion(TEST_SCHEMA_V0, TEST_SCHEMA_V3)
def testV1Upgrade_RenameOps(self):
"""V1 had many different names for ops; check to make sure they rename."""
self.CheckConversion(EMPTY_TEST_SCHEMA_V1, EMPTY_TEST_SCHEMA_V3)
self.CheckConversion(FULL_TEST_SCHEMA_V1, FULL_TEST_SCHEMA_V3)
def testV2Upgrade_CreateBuffers(self):
"""V2 did not have buffers; check to make sure they are created."""
self.CheckConversion(BUFFER_TEST_V2, BUFFER_TEST_V3)
if __name__ == "__main__":
test_lib.main()
| apache-2.0 |
RO-ny9/python-for-android | python3-alpha/extra_modules/gdata/apps/multidomain/client.py | 48 | 13872 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MultiDomainProvisioningClient simplifies Multidomain Provisioning API calls.
MultiDomainProvisioningClient extends gdata.client.GDClient to ease interaction
with the Google Multidomain Provisioning API. These interactions include the
ability to create, retrieve, update and delete users and aliases in multiple
domains.
"""
__author__ = 'Claudio Cherubino <[email protected]>'
import urllib.request, urllib.parse, urllib.error
import gdata.apps.multidomain.data
import gdata.client
# Multidomain URI templates
# The strings in this template are eventually replaced with the feed type
# (user/alias), API version and Google Apps domain name, respectively.
MULTIDOMAIN_URI_TEMPLATE = '/a/feeds/%s/%s/%s'
# The strings in this template are eventually replaced with the API version,
# Google Apps domain name and old email address, respectively.
MULTIDOMAIN_USER_RENAME_URI_TEMPLATE = '/a/feeds/user/userEmail/%s/%s/%s'
# The value for user requests
MULTIDOMAIN_USER_FEED = 'user'
# The value for alias requests
MULTIDOMAIN_ALIAS_FEED = 'alias'
class MultiDomainProvisioningClient(gdata.client.GDClient):
"""Client extension for the Google MultiDomain Provisioning API service.
Attributes:
host: string The hostname for the MultiDomain Provisioning API service.
api_version: string The version of the MultiDomain Provisioning API.
"""
host = 'apps-apis.google.com'
api_version = '2.0'
auth_service = 'apps'
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
ssl = True
def __init__(self, domain, auth_token=None, **kwargs):
"""Constructs a new client for the MultiDomain Provisioning API.
Args:
domain: string The Google Apps domain with MultiDomain Provisioning.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the email settings.
kwargs: The other parameters to pass to the gdata.client.GDClient
constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def make_multidomain_provisioning_uri(
self, feed_type, email=None, params=None):
"""Creates a resource feed URI for the MultiDomain Provisioning API.
Using this client's Google Apps domain, create a feed URI for multidomain
provisioning in that domain. If an email address is provided, return a
URI for that specific resource. If params are provided, append them as GET
params.
Args:
feed_type: string The type of feed (user/alias)
email: string (optional) The email address of multidomain resource for
which to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for multidomain provisioning for this client's
Google Apps domain.
"""
uri = MULTIDOMAIN_URI_TEMPLATE % (feed_type, self.api_version, self.domain)
if email:
uri += '/' + email
if params:
uri += '?' + urllib.parse.urlencode(params)
return uri
MakeMultidomainProvisioningUri = make_multidomain_provisioning_uri
def make_multidomain_user_provisioning_uri(self, email=None, params=None):
"""Creates a resource feed URI for the MultiDomain User Provisioning API.
Using this client's Google Apps domain, create a feed URI for multidomain
user provisioning in that domain. If an email address is provided, return a
URI for that specific resource. If params are provided, append them as GET
params.
Args:
email: string (optional) The email address of multidomain user for which
to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for multidomain user provisioning for thisis that
client's Google Apps domain.
"""
return self.make_multidomain_provisioning_uri(
MULTIDOMAIN_USER_FEED, email, params)
MakeMultidomainUserProvisioningUri = make_multidomain_user_provisioning_uri
def make_multidomain_alias_provisioning_uri(self, email=None, params=None):
"""Creates a resource feed URI for the MultiDomain Alias Provisioning API.
Using this client's Google Apps domain, create a feed URI for multidomain
alias provisioning in that domain. If an email address is provided, return a
URI for that specific resource. If params are provided, append them as GET
params.
Args:
email: string (optional) The email address of multidomain alias for which
to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for multidomain alias provisioning for this
client's Google Apps domain.
"""
return self.make_multidomain_provisioning_uri(
MULTIDOMAIN_ALIAS_FEED, email, params)
MakeMultidomainAliasProvisioningUri = make_multidomain_alias_provisioning_uri
def retrieve_all_pages(self, uri, desired_class=gdata.data.GDFeed, **kwargs):
"""Retrieves all pages from uri.
Args:
uri: The uri where the first page is.
desired_class: Type of feed that is retrieved.
kwargs: The other parameters to pass to gdata.client.GDClient.GetFeed()
Returns:
A desired_class feed object.
"""
feed = self.GetFeed(
uri,
desired_class=desired_class,
**kwargs)
next_link = feed.GetNextLink()
while next_link is not None:
uri = next_link.href
temp_feed = self.GetFeed(
uri, desired_class=desired_class, **kwargs)
feed.entry = feed.entry + temp_feed.entry
next_link = temp_feed.GetNextLink()
return feed
RetrieveAllPages = retrieve_all_pages
def retrieve_all_users(self, **kwargs):
"""Retrieves all users in all domains.
Args:
kwargs: The other parameters to pass to gdata.client.GDClient.GetFeed()
Returns:
A gdata.data.GDFeed of the domain users
"""
uri = self.MakeMultidomainUserProvisioningUri()
return self.RetrieveAllPages(
uri,
desired_class=gdata.apps.multidomain.data.UserFeed,
**kwargs)
RetrieveAllUsers = retrieve_all_users
def retrieve_user(self, email, **kwargs):
"""Retrieves a single user in the domain.
Args:
email: string The email address of the user to be retrieved
kwargs: The other parameters to pass to gdata.client.GDClient.GetEntry()
Returns:
A gdata.apps.multidomain.data.UserEntry representing the user
"""
uri = self.MakeMultidomainUserProvisioningUri(email=email)
return self.GetEntry(
uri,
desired_class=gdata.apps.multidomain.data.UserEntry,
**kwargs)
RetrieveUser = retrieve_user
def create_user(self, email, first_name, last_name, password, is_admin,
hash_function=None, suspended=None, change_password=None,
ip_whitelisted=None, quota=None, **kwargs):
"""Creates an user in the domain with the given properties.
Args:
email: string The email address of the user.
first_name: string The first name of the user.
last_name: string The last name of the user.
password: string The password of the user.
is_admin: Boolean Whether or not the user has administrator privileges.
hash_function: string (optional) The name of the function used to hash the
password.
suspended: Boolean (optional) Whether or not the user is suspended.
change_password: Boolean (optional) Whether or not the user must change
password at first login.
ip_whitelisted: Boolean (optional) Whether or not the user's ip is
whitelisted.
quota: string (optional) The value (in GB) of the user's quota.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
A gdata.apps.multidomain.data.UserEntry of the new user
"""
new_user = gdata.apps.multidomain.data.UserEntry(
email=email, first_name=first_name, last_name=last_name,
password=password, is_admin=is_admin, hash_function=hash_function,
suspended=suspended, change_password=change_password,
ip_whitelisted=ip_whitelisted, quota=quota)
return self.post(new_user, self.MakeMultidomainUserProvisioningUri(),
**kwargs)
CreateUser = create_user
def update_user(self, email, user_entry, **kwargs):
"""Deletes the user with the given email address.
Args:
email: string The email address of the user to be updated.
user_entry: UserEntry The user entry with updated values.
kwargs: The other parameters to pass to gdata.client.GDClient.put()
Returns:
A gdata.apps.multidomain.data.UserEntry representing the user
"""
return self.update(user_entry,
uri=self.MakeMultidomainUserProvisioningUri(email),
**kwargs)
UpdateUser = update_user
def delete_user(self, email, **kwargs):
"""Deletes the user with the given email address.
Args:
email: string The email address of the user to delete.
kwargs: The other parameters to pass to gdata.client.GDClient.delete()
Returns:
An HTTP response object. See gdata.client.request().
"""
return self.delete(self.MakeMultidomainUserProvisioningUri(email), **kwargs)
DeleteUser = delete_user
def rename_user(self, old_email, new_email, **kwargs):
"""Renames an user's account to a different domain.
Args:
old_email: string The old email address of the user to rename.
new_email: string The new email address for the user to be renamed.
kwargs: The other parameters to pass to gdata.client.GDClient.put()
Returns:
A gdata.apps.multidomain.data.UserRenameRequest representing the request.
"""
rename_uri = MULTIDOMAIN_USER_RENAME_URI_TEMPLATE % (self.api_version,
self.domain,
old_email)
entry = gdata.apps.multidomain.data.UserRenameRequest(new_email)
return self.update(entry, uri=rename_uri, **kwargs)
RenameUser = rename_user
def retrieve_all_aliases(self, **kwargs):
"""Retrieves all aliases in the domain.
Args:
kwargs: The other parameters to pass to gdata.client.GDClient.GetFeed()
Returns:
A gdata.data.GDFeed of the domain aliases
"""
uri = self.MakeMultidomainAliasProvisioningUri()
return self.RetrieveAllPages(
uri,
desired_class=gdata.apps.multidomain.data.AliasFeed,
**kwargs)
RetrieveAllAliases = retrieve_all_aliases
def retrieve_alias(self, email, **kwargs):
"""Retrieves a single alias in the domain.
Args:
email: string The email address of the alias to be retrieved
kwargs: The other parameters to pass to gdata.client.GDClient.GetEntry()
Returns:
A gdata.apps.multidomain.data.AliasEntry representing the alias
"""
uri = self.MakeMultidomainAliasProvisioningUri(email=email)
return self.GetEntry(
uri,
desired_class=gdata.apps.multidomain.data.AliasEntry,
**kwargs)
RetrieveAlias = retrieve_alias
def retrieve_all_user_aliases(self, user_email, **kwargs):
"""Retrieves all aliases for a given user in the domain.
Args:
user_email: string Email address of the user whose aliases
are to be retrieved
kwargs: The other parameters to pass to gdata.client.GDClient.GetFeed()
Returns:
A gdata.data.GDFeed of the user aliases
"""
uri = self.MakeMultidomainAliasProvisioningUri(
params = {'userEmail' : user_email})
return self.RetrieveAllPages(
uri,
desired_class=gdata.apps.multidomain.data.AliasFeed,
**kwargs)
RetrieveAllUserAliases = retrieve_all_user_aliases
def create_alias(self, user_email, alias_email, **kwargs):
"""Creates an alias in the domain with the given properties.
Args:
user_email: string The email address of the user.
alias_email: string The first name of the user.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
A gdata.apps.multidomain.data.AliasEntry of the new alias
"""
new_alias = gdata.apps.multidomain.data.AliasEntry(
user_email=user_email, alias_email=alias_email)
return self.post(new_alias, self.MakeMultidomainAliasProvisioningUri(),
**kwargs)
CreateAlias = create_alias
def delete_alias(self, email, **kwargs):
"""Deletes the alias with the given email address.
Args:
email: string The email address of the alias to delete.
kwargs: The other parameters to pass to gdata.client.GDClient.delete()
Returns:
An HTTP response object. See gdata.client.request().
"""
return self.delete(self.MakeMultidomainAliasProvisioningUri(email),
**kwargs)
DeleteAlias = delete_alias
| apache-2.0 |
saurabh6790/med_app_rels | setup/utils.py | 21 | 1350 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _, msgprint
import json
def get_company_currency(company):
currency = webnotes.conn.get_value("Company", company, "default_currency")
if not currency:
currency = webnotes.conn.get_default("currency")
if not currency:
msgprint(_('Please specify Default Currency in Company Master \
and Global Defaults'), raise_exception=True)
return currency
def get_root_of(doctype):
"""Get root element of a DocType with a tree structure"""
result = webnotes.conn.sql_list("""select name from `tab%s`
where lft=1 and rgt=(select max(rgt) from `tab%s` where docstatus < 2)""" %
(doctype, doctype))
return result[0] if result else None
def get_ancestors_of(doctype, name):
"""Get ancestor elements of a DocType with a tree structure"""
lft, rgt = webnotes.conn.get_value(doctype, name, ["lft", "rgt"])
result = webnotes.conn.sql_list("""select name from `tab%s`
where lft<%s and rgt>%s order by lft desc""" % (doctype, "%s", "%s"), (lft, rgt))
return result or []
@webnotes.whitelist()
def get_price_list_currency(price_list):
return {"price_list_currency": webnotes.conn.get_value("Price List", price_list,
"currency")} | agpl-3.0 |
ayushagrawal288/zamboni | mkt/search/tests/test_middleware.py | 19 | 1188 | from django.test.client import RequestFactory
import elasticsearch
import mock
from nose.tools import eq_
import mkt.site.tests
from mkt.search.middleware import ElasticsearchExceptionMiddleware as ESM
class TestElasticsearchExceptionMiddleware(mkt.site.tests.TestCase):
def setUp(self):
self.request = RequestFactory()
@mock.patch('mkt.search.middleware.render')
def test_exceptions_we_catch(self, render_mock):
# These are instantiated with an error string.
for e in [elasticsearch.ElasticsearchException,
elasticsearch.SerializationError,
elasticsearch.TransportError,
elasticsearch.NotFoundError,
elasticsearch.RequestError]:
ESM().process_exception(self.request, e(503, 'ES ERROR'))
render_mock.assert_called_with(self.request, 'search/down.html',
status=503)
render_mock.reset_mock()
@mock.patch('mkt.search.middleware.render')
def test_exceptions_we_do_not_catch(self, render_mock):
ESM().process_exception(self.request, Exception)
eq_(render_mock.called, False)
| bsd-3-clause |
axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/google/protobuf/internal/more_extensions_dynamic_pb2.py | 37 | 3979 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/more_extensions_dynamic.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf.internal import more_extensions_pb2 as google_dot_protobuf_dot_internal_dot_more__extensions__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/more_extensions_dynamic.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_pb=_b('\n6google/protobuf/internal/more_extensions_dynamic.proto\x12\x18google.protobuf.internal\x1a.google/protobuf/internal/more_extensions.proto\"\x1f\n\x12\x44ynamicMessageType\x12\t\n\x01\x61\x18\x01 \x01(\x05:J\n\x17\x64ynamic_int32_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x64 \x01(\x05:z\n\x19\x64ynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x65 \x01(\x0b\x32,.google.protobuf.internal.DynamicMessageType')
,
dependencies=[google_dot_protobuf_dot_internal_dot_more__extensions__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DYNAMIC_INT32_EXTENSION_FIELD_NUMBER = 100
dynamic_int32_extension = _descriptor.FieldDescriptor(
name='dynamic_int32_extension', full_name='google.protobuf.internal.dynamic_int32_extension', index=0,
number=100, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
DYNAMIC_MESSAGE_EXTENSION_FIELD_NUMBER = 101
dynamic_message_extension = _descriptor.FieldDescriptor(
name='dynamic_message_extension', full_name='google.protobuf.internal.dynamic_message_extension', index=1,
number=101, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_DYNAMICMESSAGETYPE = _descriptor.Descriptor(
name='DynamicMessageType',
full_name='google.protobuf.internal.DynamicMessageType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='a', full_name='google.protobuf.internal.DynamicMessageType.a', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=163,
)
DESCRIPTOR.message_types_by_name['DynamicMessageType'] = _DYNAMICMESSAGETYPE
DESCRIPTOR.extensions_by_name['dynamic_int32_extension'] = dynamic_int32_extension
DESCRIPTOR.extensions_by_name['dynamic_message_extension'] = dynamic_message_extension
DynamicMessageType = _reflection.GeneratedProtocolMessageType('DynamicMessageType', (_message.Message,), dict(
DESCRIPTOR = _DYNAMICMESSAGETYPE,
__module__ = 'google.protobuf.internal.more_extensions_dynamic_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.DynamicMessageType)
))
_sym_db.RegisterMessage(DynamicMessageType)
google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_int32_extension)
dynamic_message_extension.message_type = _DYNAMICMESSAGETYPE
google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_message_extension)
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
sestrella/ansible | test/units/modules/network/f5/test_bigip_firewall_log_profile_network.py | 22 | 5706 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_firewall_log_profile_network import ApiParameters
from library.modules.bigip_firewall_log_profile_network import ModuleParameters
from library.modules.bigip_firewall_log_profile_network import ModuleManager
from library.modules.bigip_firewall_log_profile_network import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_firewall_log_profile_network import ApiParameters
from ansible.modules.network.f5.bigip_firewall_log_profile_network import ModuleParameters
from ansible.modules.network.f5.bigip_firewall_log_profile_network import ModuleManager
from ansible.modules.network.f5.bigip_firewall_log_profile_network import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
profile_name='foo',
rate_limit=150000,
log_publisher='/Common/foobar',
log_tcp_errors=dict(
enabled='yes',
rate_limit=10000,
),
log_tcp_events=dict(
enabled='yes',
rate_limit=30000,
),
log_ip_errors=dict(
enabled='yes',
rate_limit=60000,
),
log_matches_accept_rule=dict(
enabled='yes',
rate_limit=80000,
),
log_matches_drop_rule=dict(
enabled='no',
rate_limit='indefinite',
),
log_matches_reject_rule=dict(
enabled='no',
rate_limit='indefinite',
),
log_format_delimiter='.',
log_storage_format='field-list',
log_message_fields=['vlan', 'translated_vlan', 'src_ip']
)
p = ModuleParameters(params=args)
assert p.profile_name == 'foo'
assert p.rate_limit == 150000
assert p.log_publisher == '/Common/foobar'
assert p.log_tcp_events == 'enabled'
assert p.rate_tcp_events == 30000
assert p.log_ip_errors == 'enabled'
assert p.rate_ip_errors == 60000
assert p.log_tcp_errors == 'enabled'
assert p.rate_tcp_errors == 10000
assert p.log_acl_match_accept == 'enabled'
assert p.rate_acl_match_accept == 80000
assert p.log_acl_match_drop == 'disabled'
assert p.rate_acl_match_drop == 4294967295
assert p.log_acl_match_reject == 'disabled'
assert p.rate_acl_match_reject == 4294967295
assert p.log_format_delimiter == '.'
assert p.log_storage_format == 'field-list'
def test_api_parameters(self):
args = load_fixture('load_afm_global_network_log_network.json')
p = ApiParameters(params=args)
assert p.rate_limit == 4294967295
assert p.log_tcp_events == 'disabled'
assert p.rate_tcp_events == 4294967295
assert p.log_ip_errors == 'disabled'
assert p.rate_ip_errors == 4294967295
assert p.log_tcp_errors == 'disabled'
assert p.rate_tcp_errors == 4294967295
assert p.log_acl_match_accept == 'disabled'
assert p.rate_acl_match_accept == 4294967295
assert p.log_acl_match_drop == 'disabled'
assert p.rate_acl_match_drop == 4294967295
assert p.log_acl_match_reject == 'disabled'
assert p.rate_acl_match_reject == 4294967295
assert p.log_format_delimiter == ','
assert p.log_storage_format == 'none'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
profile_name='foo',
rate_limit=150000,
log_publisher='/Common/foobar',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['rate_limit'] == 150000
assert results['log_publisher'] == '/Common/foobar'
| gpl-3.0 |
Francis-Liu/animated-broccoli | nova/servicegroup/drivers/mc.py | 19 | 3627 | # Service heartbeat driver using Memcached
# Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
#
# This is derived from nova/servicegroup/drivers/db.py.
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from nova.i18n import _, _LI, _LW
from nova.openstack.common import memorycache
from nova.servicegroup import api
from nova.servicegroup.drivers import base
CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service')
LOG = logging.getLogger(__name__)
class MemcachedDriver(base.Driver):
def __init__(self, *args, **kwargs):
if not CONF.memcached_servers:
raise RuntimeError(_('memcached_servers not defined'))
self.mc = memorycache.get_client()
def join(self, member_id, group_id, service=None):
"""Join the given service with its group."""
LOG.debug('Memcached_Driver: join new ServiceGroup member '
'%(member_id)s to the %(group_id)s group, '
'service = %(service)s',
{'member_id': member_id,
'group_id': group_id,
'service': service})
if service is None:
raise RuntimeError(_('service is a mandatory argument for '
'Memcached based ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
service.tg.add_timer(report_interval, self._report_state,
api.INITIAL_REPORTING_DELAY, service)
def is_up(self, service_ref):
"""Moved from nova.utils
Check whether a service is up based on last heartbeat.
"""
key = "%(topic)s:%(host)s" % service_ref
is_up = self.mc.get(str(key)) is not None
if not is_up:
LOG.debug('Seems service %s is down' % key)
return is_up
def _report_state(self, service):
"""Update the state of this service in the datastore."""
try:
key = "%(topic)s:%(host)s" % service.service_ref
# memcached has data expiration time capability.
# set(..., time=CONF.service_down_time) uses it and
# reduces key-deleting code.
self.mc.set(str(key),
timeutils.utcnow(),
time=CONF.service_down_time)
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
service.model_disconnected = False
LOG.info(
_LI('Recovered connection to memcache server '
'for reporting service status.'))
# TODO(vish): this should probably only catch connection errors
except Exception:
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
LOG.warn(_LW('Lost connection to memcache server '
'for reporting service status.'))
| apache-2.0 |
JaSpa/swift | utils/swift_build_support/swift_build_support/cache_util.py | 48 | 1586 | # swift_build_support/cache_util.py -----------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
"""
Cache related utilities
"""
# ----------------------------------------------------------------------------
from functools import update_wrapper
__all__ = [
'cached',
'reify'
]
def cached(func):
"""Decorator that caches result of method or function.
Note: Support method or function.
"""
cache = {}
def wrapper(*args, **kwargs):
key = tuple(args) + tuple(kwargs.items())
if key not in cache:
result = func(*args, **kwargs)
cache[key] = result
return result
else:
return cache[key]
return update_wrapper(wrapper, func)
def reify(func):
"""Decorator that replaces the wrapped method with the result after the
first call.
Note: Support method that takes no arguments.
"""
class Wrapper(object):
def __get__(self, obj, objtype=None):
if obj is None:
return self
result = func(obj)
setattr(obj, func.__name__, result)
return result
return update_wrapper(Wrapper(), func)
| apache-2.0 |
DanePubliczneGovPl/ckanext-archiver | ckanext/archiver/interfaces.py | 3 | 1204 | import logging
import ckan.plugins as plugins
from ckan.plugins.interfaces import Interface
log = logging.getLogger(__name__)
class IPipe(Interface):
"""
Process data in a Data Pipeline.
Inherit this to subscribe to events in the Data Pipeline and be able to
broadcast the results for others to process next. In this way, a number of
IPipes can be linked up in sequence to build up a data processing pipeline.
When a resource is archived, it broadcasts its resource_id, perhaps
triggering a process which transforms the data to another format, or loads
it into a datastore, or checks it against a schema. These processes can in
turn put the resulting data into the pipeline
"""
def receive_data(self, operation, queue, **params):
pass
@classmethod
def send_data(cls, operation, queue, **params):
for observer in plugins.PluginImplementations(cls):
try:
observer.receive_data(operation, queue, **params)
except Exception, ex:
log.exception(ex)
# We reraise all exceptions so they are obvious there
# is something wrong
raise
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.