repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
inspyration/house_booking | booking.py | 4 | 11851 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp.tools.translate import _
import operator
class Booking(osv.Model):
"""Main model"""
_name = "house_booking.booking"
_description = "booking"
_inherit = ['mail.thread']
_states = [
('pending', "Pending"),
('approved', "Approved"),
('denied', "Denied"),
]
def _get_deposit(self, cr, uid, ids, field, arg, context=None):
"""Deposit"""
# TODO: Refaire la lecture de la configuration.
# TODO: Refaire en pythonique
setting_obj = self.pool.get('booking.config.settings')
config_ids = setting_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
deposit = setting_obj.read(cr, uid, config_ids[0], ['deposit'], context=context)['deposit']
else:
deposit = 0
res = {}
for reserv in self.browse(cr, uid, ids, context=context):
if reserv.price > 0:
res[reserv.id] = deposit
return res
def _get_advance_ratio(self, cr, uid, ids, field, arg, context=None):
"""Advance Payment ratio"""
# TODO: Refaire la lecture de la configuration.
# TODO: Refaire en pythonique
setting_obj = self.pool.get('booking.config.settings')
config_ids = setting_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
advance_payment = setting_obj.read(cr, uid, config_ids[0], ['advance_payment'], context=context)['advance_payment']
else:
advance_payment = 0
res = {}
for reserv in self.browse(cr, uid, ids, context=context):
res[reserv.id] = advance_payment
return res
def _get_advance_payment(self, cr, uid, ids, field, arg, context=None):
"""Advance Payment depending on price"""
# TODO: Refaire la lecture de la configuration.
# TODO: Refaire en pythonique
setting_obj = self.pool.get('booking.config.settings')
config_ids = setting_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
advance_payment = setting_obj.read(cr, uid, config_ids[0], ['advance_payment'], context=context)['advance_payment']
else:
advance_payment = 0
res = {}
for reserv in self.browse(cr, uid, ids, context=context):
if reserv.price > 0:
res[reserv.id] = int(round(reserv.price*advance_payment/100, -2))
else:
res[reserv.id] = 0
return res
def _get_balance_due(self, cr, uid, ids, field, arg, context=None):
"""Return the difference between total price and advance payment"""
# TODO: Refaire la lecture de la configuration.
# TODO: Refaire en pythonique
setting_obj = self.pool.get('booking.config.settings')
config_ids = setting_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
advance_payment = setting_obj.read(cr, uid, config_ids[0], ['advance_payment'], context=context)['advance_payment']
else:
advance_payment = 0
res = {}
for reserv in self.browse(cr, uid, ids, context=context):
if reserv.price > 0:
res[reserv.id] = int(reserv.price - round(reserv.price*advance_payment/100, -2))
else:
res[reserv.id] = 0
return res
res = {}
def _get_title(self, cr, uid, ids, field, arg, context=None):
"""Return the reservation title"""
# TODO: Refaire la lecture de la configuration.
# TODO: Refaire en pythonique
# TODO: Le titre doit être traductible.
setting_obj = self.pool.get('booking.config.settings')
config_ids = setting_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
booking_title = setting_obj.read(cr, uid, config_ids[0], ['booking_title'], context=context)['booking_title']
else:
booking_title = ""
res = {}
for reserv in self.browse(cr, uid, ids, context=context):
res[reserv.id] = booking_title
return res
def _date_to_datetime(self, cr, uid, ids, field, arg, context=None):
"""Convert date to datetime (with rules for arrival and departure)"""
# TODO: Refaire en pythonique
if field == 'arrival_date':
f, h = operator.attrgetter('arrival_day'), " 16:00:00"
else: # departure_date
f, h = operator.attrgetter('departure_day'), " 10:00:00"
result = {b.id: f(b) + h for b in self.browse(cr, uid, ids, context=context)}
return result
_columns = {
'name': fields.char(
'Title',
size=256,
required=True,
select=True,
),
'arrival_day': fields.date(
string="Arrival day",
required=True,
),
'arrival_date': fields.function(
_date_to_datetime,
type='datetime',
string="Arrival date",
store=True,
),
'departure_day': fields.date(
string="Departure day",
required=True,
),
'departure_date': fields.function(
_date_to_datetime,
type='datetime',
string="Departure date",
store=True,
),
'persons_number': fields.integer(
string="Number of Persons",
),
'partner_id': fields.many2one(
'res.partner',
string="Client",
required=True,
),
'state': fields.selection(
_states,
string="Booking's state",
),
'price': fields.integer(
string="Price of booking",
),
'advance_payment': fields.function(
_get_advance_payment,
type='integer',
string="Advance payment",
store=True,
),
'balance_due': fields.function(
_get_balance_due,
type='integer',
string="Balance due",
store=True,
),
'advance_ratio': fields.function(
_get_advance_ratio,
type='integer',
string="Advance Ratio",
store=True,
),
'deposit': fields.function(
_get_deposit,
type='integer',
string="Deposit",
store=True,
),
'voucher_title': fields.function(
_get_title,
type='char',
string="Voucher's title",
store=True,
),
}
_order = 'create_date desc'
_defaults = {
'state': 'pending',
}
_sql_constraints = [
(
"house_booking_arrival_before_departure_date_constraint",
"CHECK(arrival_date < departure_date)",
"'Arrival date' should be before 'Departure date'",
),
(
"house_booking_arrival_before_departure_day_constraint",
"CHECK(arrival_day < departure_day)",
"'Arrival day' should be before 'Departure day'",
),
]
def create(self, cr, uid, values, context=None):
"""
Check availability before creating.
"""
arrival_date, departure_date, = values['arrival_day'] + " 16:00:00", values['departure_day'] + " 10:00:00"
if not self.check_availability(cr, uid, arrival_date, departure_date, context=context):
raise osv.except_osv(_('Unavailable dates !'), _("Unable to book for the selected dates."))
return osv.Model.create(self, cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
"""
Check availability before writing.
"""
# Can't change many booking dates at once.
if type(ids) == list and len(ids) > 1 and ('arrival_date' in values or 'departure_date' in values):
raise osv.except_osv(('Date Change denied !'), ("Changing departure or arrival dates for several bookings at the same time is not allowed."))
elif type(ids) != list:
ids = [ids]
arrival_date, departure_date = None, None
# Get the two dates (if it is true, we are sure that there is one and only one id in 'ids')
if 'arrival_day' in values and 'departure_day' not in values:
read = self.read(cr, uid, ids[0], ['departure_date'], context=context)
arrival_date, departure_date = values['arrival_day'] + " 16:00:00", read['departure_date']
elif 'departure_day' in values and 'arrival_day' not in values:
read = self.read(cr, uid, ids[0], ['arrival_date'], context=context)
arrival_date, departure_date = read['arrival_date'], values['departure_day'] + " 10:00:00"
if arrival_date is not None: # departure_date is not None too !
# Checking available periods. (if it is true, we are sure that there is one and only one id in 'ids')
if not self.check_availability(cr, uid, arrival_date, departure_date, current_id=ids[0], context=context):
raise osv.except_osv(('Unavailable dates !'), ("Unable to book for the selected dates."))
# self.message_post(cr, uid, ids, _('Booking <b>updated</b>'), context=context)
return osv.Model.write(self, cr, uid, ids, values, context=context)
def accept_booking(self, cr, uid, ids, context=None, *args):
"""
Change state to 'approved'.
"""
if type(ids) != list:
ids = [ids]
read = self.read(cr, uid, ids, ['price'], context=context)
if any(r['price'] <= 0 for r in read):
raise osv.except_osv(_('Price not set !'), _("Booking price has to be set."))
self.write(cr, uid, ids, {'state': 'approved'})
self.message_post(cr, uid, ids, _('Booking <b>approved</b>'), context=context)
self.send_email(cr, uid, ids, context=context)
return True
def send_email(self, cr, uid, ids, context=None):
"""Send email"""
template_id = self.pool.get('email.template').search(cr, uid, [('name', '=', 'House booking - Send by Email')], context=context)[0]
email_obj = self.pool.get('email.template')
email_obj.send_mail(cr, uid, template_id, ids[0], force_send=True)
def refuse_booking(self, cr, uid, ids, context=None, *args):
"""
Change state to 'denied'.
"""
self.write(cr, uid, ids, {'state': 'denied'})
self.message_post(cr, uid, ids, _('Booking <b>denied</b>'), context=context)
return True
def check_availability(self, cr, uid, arrival_date, departure_date, current_id=None, context=None):
"""
Return True if all dates between arrival_date and departure_date are available, False otherwise.
"""
# sch : supprimé au 15 mai
#if len(arrival_date) == 10:
# arrival_date += " 16:00:00"
#if len(departure_date) == 10:
# departure_date += " 16:00:00"
# Domain of bookings crossing targeted period.
domaine = [
('state', '!=', 'denied'),
'!',
'|',
('arrival_date','>=',departure_date),
('departure_date','<=',arrival_date),
]
sch = self.search(cr, uid, [], context=context)
brw = self.browse(cr, uid, sch, context=context)
# Remove current booking.
if current_id is not None:
domaine.insert(0, ('id', '!=', current_id))
search = self.search(cr, uid, domaine, context=context)
long = len(search)
res = long == 0
return res
| agpl-3.0 |
laumann/servo | tests/wpt/css-tests/tools/pywebsocket/src/example/echo_client.py | 442 | 44484 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Simple WebSocket client named echo_client just because of historical reason.
mod_pywebsocket directory must be in PYTHONPATH.
Example Usage:
# server setup
% cd $pywebsocket
% PYTHONPATH=$cwd/src python ./mod_pywebsocket/standalone.py -p 8880 \
-d $cwd/src/example
# run client
% PYTHONPATH=$cwd/src python ./src/example/echo_client.py -p 8880 \
-s localhost \
-o http://localhost -r /echo -m test
or
# run echo client to test IETF HyBi 00 protocol
run with --protocol-version=hybi00
"""
import base64
import codecs
import logging
from optparse import OptionParser
import os
import random
import re
import socket
import struct
import sys
from mod_pywebsocket import common
from mod_pywebsocket.extensions import DeflateFrameExtensionProcessor
from mod_pywebsocket.extensions import PerMessageDeflateExtensionProcessor
from mod_pywebsocket.extensions import _PerMessageDeflateFramer
from mod_pywebsocket.extensions import _parse_window_bits
from mod_pywebsocket.stream import Stream
from mod_pywebsocket.stream import StreamHixie75
from mod_pywebsocket.stream import StreamOptions
from mod_pywebsocket import util
_TIMEOUT_SEC = 10
_UNDEFINED_PORT = -1
_UPGRADE_HEADER = 'Upgrade: websocket\r\n'
_UPGRADE_HEADER_HIXIE75 = 'Upgrade: WebSocket\r\n'
_CONNECTION_HEADER = 'Connection: Upgrade\r\n'
# Special message that tells the echo server to start closing handshake
_GOODBYE_MESSAGE = 'Goodbye'
_PROTOCOL_VERSION_HYBI13 = 'hybi13'
_PROTOCOL_VERSION_HYBI08 = 'hybi08'
_PROTOCOL_VERSION_HYBI00 = 'hybi00'
_PROTOCOL_VERSION_HIXIE75 = 'hixie75'
# Constants for the --tls_module flag.
_TLS_BY_STANDARD_MODULE = 'ssl'
_TLS_BY_PYOPENSSL = 'pyopenssl'
# Values used by the --tls-version flag.
_TLS_VERSION_SSL23 = 'ssl23'
_TLS_VERSION_SSL3 = 'ssl3'
_TLS_VERSION_TLS1 = 'tls1'
class ClientHandshakeError(Exception):
pass
def _build_method_line(resource):
return 'GET %s HTTP/1.1\r\n' % resource
def _origin_header(header, origin):
# 4.1 13. concatenation of the string "Origin:", a U+0020 SPACE character,
# and the /origin/ value, converted to ASCII lowercase, to /fields/.
return '%s: %s\r\n' % (header, origin.lower())
def _format_host_header(host, port, secure):
# 4.1 9. Let /hostport/ be an empty string.
# 4.1 10. Append the /host/ value, converted to ASCII lowercase, to
# /hostport/
hostport = host.lower()
# 4.1 11. If /secure/ is false, and /port/ is not 80, or if /secure/
# is true, and /port/ is not 443, then append a U+003A COLON character
# (:) followed by the value of /port/, expressed as a base-ten integer,
# to /hostport/
if ((not secure and port != common.DEFAULT_WEB_SOCKET_PORT) or
(secure and port != common.DEFAULT_WEB_SOCKET_SECURE_PORT)):
hostport += ':' + str(port)
# 4.1 12. concatenation of the string "Host:", a U+0020 SPACE
# character, and /hostport/, to /fields/.
return '%s: %s\r\n' % (common.HOST_HEADER, hostport)
def _receive_bytes(socket, length):
bytes = []
remaining = length
while remaining > 0:
received_bytes = socket.recv(remaining)
if not received_bytes:
raise IOError(
'Connection closed before receiving requested length '
'(requested %d bytes but received only %d bytes)' %
(length, length - remaining))
bytes.append(received_bytes)
remaining -= len(received_bytes)
return ''.join(bytes)
def _get_mandatory_header(fields, name):
"""Gets the value of the header specified by name from fields.
This function expects that there's only one header with the specified name
in fields. Otherwise, raises an ClientHandshakeError.
"""
values = fields.get(name.lower())
if values is None or len(values) == 0:
raise ClientHandshakeError(
'%s header not found: %r' % (name, values))
if len(values) > 1:
raise ClientHandshakeError(
'Multiple %s headers found: %r' % (name, values))
return values[0]
def _validate_mandatory_header(fields, name,
expected_value, case_sensitive=False):
"""Gets and validates the value of the header specified by name from
fields.
If expected_value is specified, compares expected value and actual value
and raises an ClientHandshakeError on failure. You can specify case
sensitiveness in this comparison by case_sensitive parameter. This function
expects that there's only one header with the specified name in fields.
Otherwise, raises an ClientHandshakeError.
"""
value = _get_mandatory_header(fields, name)
if ((case_sensitive and value != expected_value) or
(not case_sensitive and value.lower() != expected_value.lower())):
raise ClientHandshakeError(
'Illegal value for header %s: %r (expected) vs %r (actual)' %
(name, expected_value, value))
class _TLSSocket(object):
"""Wrapper for a TLS connection."""
def __init__(self,
raw_socket, tls_module, tls_version, disable_tls_compression):
self._logger = util.get_class_logger(self)
if tls_module == _TLS_BY_STANDARD_MODULE:
if tls_version == _TLS_VERSION_SSL23:
version = ssl.PROTOCOL_SSLv23
elif tls_version == _TLS_VERSION_SSL3:
version = ssl.PROTOCOL_SSLv3
elif tls_version == _TLS_VERSION_TLS1:
version = ssl.PROTOCOL_TLSv1
else:
raise ValueError(
'Invalid --tls-version flag: %r' % tls_version)
if disable_tls_compression:
raise ValueError(
'--disable-tls-compression is not available for ssl '
'module')
self._tls_socket = ssl.wrap_socket(raw_socket, ssl_version=version)
# Print cipher in use. Handshake is done on wrap_socket call.
self._logger.info("Cipher: %s", self._tls_socket.cipher())
elif tls_module == _TLS_BY_PYOPENSSL:
if tls_version == _TLS_VERSION_SSL23:
version = OpenSSL.SSL.SSLv23_METHOD
elif tls_version == _TLS_VERSION_SSL3:
version = OpenSSL.SSL.SSLv3_METHOD
elif tls_version == _TLS_VERSION_TLS1:
version = OpenSSL.SSL.TLSv1_METHOD
else:
raise ValueError(
'Invalid --tls-version flag: %r' % tls_version)
context = OpenSSL.SSL.Context(version)
if disable_tls_compression:
# OP_NO_COMPRESSION is not defined in OpenSSL module.
context.set_options(0x00020000)
self._tls_socket = OpenSSL.SSL.Connection(context, raw_socket)
# Client mode.
self._tls_socket.set_connect_state()
self._tls_socket.setblocking(True)
# Do handshake now (not necessary).
self._tls_socket.do_handshake()
else:
raise ValueError('No TLS support module is available')
def send(self, data):
return self._tls_socket.write(data)
def sendall(self, data):
return self._tls_socket.sendall(data)
def recv(self, size=-1):
return self._tls_socket.read(size)
def close(self):
return self._tls_socket.close()
def getpeername(self):
return self._tls_socket.getpeername()
class ClientHandshakeBase(object):
"""A base class for WebSocket opening handshake processors for each
protocol version.
"""
def __init__(self):
self._logger = util.get_class_logger(self)
def _read_fields(self):
# 4.1 32. let /fields/ be a list of name-value pairs, initially empty.
fields = {}
while True: # "Field"
# 4.1 33. let /name/ and /value/ be empty byte arrays
name = ''
value = ''
# 4.1 34. read /name/
name = self._read_name()
if name is None:
break
# 4.1 35. read spaces
# TODO(tyoshino): Skip only one space as described in the spec.
ch = self._skip_spaces()
# 4.1 36. read /value/
value = self._read_value(ch)
# 4.1 37. read a byte from the server
ch = _receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise ClientHandshakeError(
'Expected LF but found %r while reading value %r for '
'header %r' % (ch, value, name))
self._logger.debug('Received %r header', name)
# 4.1 38. append an entry to the /fields/ list that has the name
# given by the string obtained by interpreting the /name/ byte
# array as a UTF-8 stream and the value given by the string
# obtained by interpreting the /value/ byte array as a UTF-8 byte
# stream.
fields.setdefault(name, []).append(value)
# 4.1 39. return to the "Field" step above
return fields
def _read_name(self):
# 4.1 33. let /name/ be empty byte arrays
name = ''
while True:
# 4.1 34. read a byte from the server
ch = _receive_bytes(self._socket, 1)
if ch == '\r': # 0x0D
return None
elif ch == '\n': # 0x0A
raise ClientHandshakeError(
'Unexpected LF when reading header name %r' % name)
elif ch == ':': # 0x3A
return name
elif ch >= 'A' and ch <= 'Z': # Range 0x31 to 0x5A
ch = chr(ord(ch) + 0x20)
name += ch
else:
name += ch
def _skip_spaces(self):
# 4.1 35. read a byte from the server
while True:
ch = _receive_bytes(self._socket, 1)
if ch == ' ': # 0x20
continue
return ch
def _read_value(self, ch):
# 4.1 33. let /value/ be empty byte arrays
value = ''
# 4.1 36. read a byte from server.
while True:
if ch == '\r': # 0x0D
return value
elif ch == '\n': # 0x0A
raise ClientHandshakeError(
'Unexpected LF when reading header value %r' % value)
else:
value += ch
ch = _receive_bytes(self._socket, 1)
def _get_permessage_deflate_framer(extension_response):
"""Validate the response and return a framer object using the parameters in
the response. This method doesn't accept the server_.* parameters.
"""
client_max_window_bits = None
client_no_context_takeover = None
client_max_window_bits_name = (
PerMessageDeflateExtensionProcessor.
_CLIENT_MAX_WINDOW_BITS_PARAM)
client_no_context_takeover_name = (
PerMessageDeflateExtensionProcessor.
_CLIENT_NO_CONTEXT_TAKEOVER_PARAM)
# We didn't send any server_.* parameter.
# Handle those parameters as invalid if found in the response.
for param_name, param_value in extension_response.get_parameters():
if param_name == client_max_window_bits_name:
if client_max_window_bits is not None:
raise ClientHandshakeError(
'Multiple %s found' % client_max_window_bits_name)
parsed_value = _parse_window_bits(param_value)
if parsed_value is None:
raise ClientHandshakeError(
'Bad %s: %r' %
(client_max_window_bits_name, param_value))
client_max_window_bits = parsed_value
elif param_name == client_no_context_takeover_name:
if client_no_context_takeover is not None:
raise ClientHandshakeError(
'Multiple %s found' % client_no_context_takeover_name)
if param_value is not None:
raise ClientHandshakeError(
'Bad %s: Has value %r' %
(client_no_context_takeover_name, param_value))
client_no_context_takeover = True
if client_no_context_takeover is None:
client_no_context_takeover = False
return _PerMessageDeflateFramer(client_max_window_bits,
client_no_context_takeover)
class ClientHandshakeProcessor(ClientHandshakeBase):
"""WebSocket opening handshake processor for
draft-ietf-hybi-thewebsocketprotocol-06 and later.
"""
def __init__(self, socket, options):
super(ClientHandshakeProcessor, self).__init__()
self._socket = socket
self._options = options
self._logger = util.get_class_logger(self)
def handshake(self):
"""Performs opening handshake on the specified socket.
Raises:
ClientHandshakeError: handshake failed.
"""
request_line = _build_method_line(self._options.resource)
self._logger.debug('Client\'s opening handshake Request-Line: %r',
request_line)
self._socket.sendall(request_line)
fields = []
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
fields.append(_UPGRADE_HEADER)
fields.append(_CONNECTION_HEADER)
if self._options.origin is not None:
if self._options.protocol_version == _PROTOCOL_VERSION_HYBI08:
fields.append(_origin_header(
common.SEC_WEBSOCKET_ORIGIN_HEADER,
self._options.origin))
else:
fields.append(_origin_header(common.ORIGIN_HEADER,
self._options.origin))
original_key = os.urandom(16)
self._key = base64.b64encode(original_key)
self._logger.debug(
'%s: %r (%s)',
common.SEC_WEBSOCKET_KEY_HEADER,
self._key,
util.hexify(original_key))
fields.append(
'%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY_HEADER, self._key))
if self._options.version_header > 0:
fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
self._options.version_header))
elif self._options.protocol_version == _PROTOCOL_VERSION_HYBI08:
fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
common.VERSION_HYBI08))
else:
fields.append('%s: %d\r\n' % (common.SEC_WEBSOCKET_VERSION_HEADER,
common.VERSION_HYBI_LATEST))
extensions_to_request = []
if self._options.deflate_frame:
extensions_to_request.append(
common.ExtensionParameter(common.DEFLATE_FRAME_EXTENSION))
if self._options.use_permessage_deflate:
extension = common.ExtensionParameter(
common.PERMESSAGE_DEFLATE_EXTENSION)
# Accept the client_max_window_bits extension parameter by default.
extension.add_parameter(
PerMessageDeflateExtensionProcessor.
_CLIENT_MAX_WINDOW_BITS_PARAM,
None)
extensions_to_request.append(extension)
if len(extensions_to_request) != 0:
fields.append(
'%s: %s\r\n' %
(common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(extensions_to_request)))
for field in fields:
self._socket.sendall(field)
self._socket.sendall('\r\n')
self._logger.debug('Sent client\'s opening handshake headers: %r',
fields)
self._logger.debug('Start reading Status-Line')
status_line = ''
while True:
ch = _receive_bytes(self._socket, 1)
status_line += ch
if ch == '\n':
break
m = re.match('HTTP/\\d+\.\\d+ (\\d\\d\\d) .*\r\n', status_line)
if m is None:
raise ClientHandshakeError(
'Wrong status line format: %r' % status_line)
status_code = m.group(1)
if status_code != '101':
self._logger.debug('Unexpected status code %s with following '
'headers: %r', status_code, self._read_fields())
raise ClientHandshakeError(
'Expected HTTP status code 101 but found %r' % status_code)
self._logger.debug('Received valid Status-Line')
self._logger.debug('Start reading headers until we see an empty line')
fields = self._read_fields()
ch = _receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise ClientHandshakeError(
'Expected LF but found %r while reading value %r for header '
'name %r' % (ch, value, name))
self._logger.debug('Received an empty line')
self._logger.debug('Server\'s opening handshake headers: %r', fields)
_validate_mandatory_header(
fields,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE,
False)
_validate_mandatory_header(
fields,
common.CONNECTION_HEADER,
common.UPGRADE_CONNECTION_TYPE,
False)
accept = _get_mandatory_header(
fields, common.SEC_WEBSOCKET_ACCEPT_HEADER)
# Validate
try:
binary_accept = base64.b64decode(accept)
except TypeError, e:
raise HandshakeError(
'Illegal value for header %s: %r' %
(common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
if len(binary_accept) != 20:
raise ClientHandshakeError(
'Decoded value of %s is not 20-byte long' %
common.SEC_WEBSOCKET_ACCEPT_HEADER)
self._logger.debug(
'Response for challenge : %r (%s)',
accept, util.hexify(binary_accept))
binary_expected_accept = util.sha1_hash(
self._key + common.WEBSOCKET_ACCEPT_UUID).digest()
expected_accept = base64.b64encode(binary_expected_accept)
self._logger.debug(
'Expected response for challenge: %r (%s)',
expected_accept, util.hexify(binary_expected_accept))
if accept != expected_accept:
raise ClientHandshakeError(
'Invalid %s header: %r (expected: %s)' %
(common.SEC_WEBSOCKET_ACCEPT_HEADER, accept, expected_accept))
deflate_frame_accepted = False
permessage_deflate_accepted = False
extensions_header = fields.get(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER.lower())
accepted_extensions = []
if extensions_header is not None and len(extensions_header) != 0:
accepted_extensions = common.parse_extensions(extensions_header[0])
# TODO(bashi): Support the new style perframe compression extension.
for extension in accepted_extensions:
extension_name = extension.name()
if (extension_name == common.DEFLATE_FRAME_EXTENSION and
self._options.deflate_frame):
deflate_frame_accepted = True
processor = DeflateFrameExtensionProcessor(extension)
unused_extension_response = processor.get_extension_response()
self._options.deflate_frame = processor
continue
elif (extension_name == common.PERMESSAGE_DEFLATE_EXTENSION and
self._options.use_permessage_deflate):
permessage_deflate_accepted = True
framer = _get_permessage_deflate_framer(extension)
framer.set_compress_outgoing_enabled(True)
self._options.use_permessage_deflate = framer
continue
raise ClientHandshakeError(
'Unexpected extension %r' % extension_name)
if (self._options.deflate_frame and not deflate_frame_accepted):
raise ClientHandshakeError(
'Requested %s, but the server rejected it' %
common.DEFLATE_FRAME_EXTENSION)
if (self._options.use_permessage_deflate and
not permessage_deflate_accepted):
raise ClientHandshakeError(
'Requested %s, but the server rejected it' %
common.PERMESSAGE_DEFLATE_EXTENSION)
# TODO(tyoshino): Handle Sec-WebSocket-Protocol
# TODO(tyoshino): Handle Cookie, etc.
class ClientHandshakeProcessorHybi00(ClientHandshakeBase):
"""WebSocket opening handshake processor for
draft-ietf-hybi-thewebsocketprotocol-00 (equivalent to
draft-hixie-thewebsocketprotocol-76).
"""
def __init__(self, socket, options):
super(ClientHandshakeProcessorHybi00, self).__init__()
self._socket = socket
self._options = options
self._logger = util.get_class_logger(self)
if (self._options.deflate_frame or
self._options.use_permessage_deflate):
logging.critical('HyBi 00 doesn\'t support extensions.')
sys.exit(1)
def handshake(self):
"""Performs opening handshake on the specified socket.
Raises:
ClientHandshakeError: handshake failed.
"""
# 4.1 5. send request line.
self._socket.sendall(_build_method_line(self._options.resource))
# 4.1 6. Let /fields/ be an empty list of strings.
fields = []
# 4.1 7. Add the string "Upgrade: WebSocket" to /fields/.
fields.append(_UPGRADE_HEADER_HIXIE75)
# 4.1 8. Add the string "Connection: Upgrade" to /fields/.
fields.append(_CONNECTION_HEADER)
# 4.1 9-12. Add Host: field to /fields/.
fields.append(_format_host_header(
self._options.server_host,
self._options.server_port,
self._options.use_tls))
# 4.1 13. Add Origin: field to /fields/.
if not self._options.origin:
raise ClientHandshakeError(
'Specify the origin of the connection by --origin flag')
fields.append(_origin_header(common.ORIGIN_HEADER,
self._options.origin))
# TODO: 4.1 14 Add Sec-WebSocket-Protocol: field to /fields/.
# TODO: 4.1 15 Add cookie headers to /fields/.
# 4.1 16-23. Add Sec-WebSocket-Key<n> to /fields/.
self._number1, key1 = self._generate_sec_websocket_key()
self._logger.debug('Number1: %d', self._number1)
fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY1_HEADER, key1))
self._number2, key2 = self._generate_sec_websocket_key()
self._logger.debug('Number2: %d', self._number2)
fields.append('%s: %s\r\n' % (common.SEC_WEBSOCKET_KEY2_HEADER, key2))
fields.append('%s: 0\r\n' % common.SEC_WEBSOCKET_DRAFT_HEADER)
# 4.1 24. For each string in /fields/, in a random order: send the
# string, encoded as UTF-8, followed by a UTF-8 encoded U+000D CARRIAGE
# RETURN U+000A LINE FEED character pair (CRLF).
random.shuffle(fields)
for field in fields:
self._socket.sendall(field)
# 4.1 25. send a UTF-8-encoded U+000D CARRIAGE RETURN U+000A LINE FEED
# character pair (CRLF).
self._socket.sendall('\r\n')
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
self._key3 = self._generate_key3()
# 4.1 27. send /key3/ to the server.
self._socket.sendall(self._key3)
self._logger.debug(
'Key3: %r (%s)', self._key3, util.hexify(self._key3))
self._logger.info('Sent handshake')
# 4.1 28. Read bytes from the server until either the connection
# closes, or a 0x0A byte is read. let /field/ be these bytes, including
# the 0x0A bytes.
field = ''
while True:
ch = _receive_bytes(self._socket, 1)
field += ch
if ch == '\n':
break
# if /field/ is not at least seven bytes long, or if the last
# two bytes aren't 0x0D and 0x0A respectively, or if it does not
# contain at least two 0x20 bytes, then fail the WebSocket connection
# and abort these steps.
if len(field) < 7 or not field.endswith('\r\n'):
raise ClientHandshakeError('Wrong status line: %r' % field)
m = re.match('[^ ]* ([^ ]*) .*', field)
if m is None:
raise ClientHandshakeError(
'No HTTP status code found in status line: %r' % field)
# 4.1 29. let /code/ be the substring of /field/ that starts from the
# byte after the first 0x20 byte, and ends with the byte before the
# second 0x20 byte.
code = m.group(1)
# 4.1 30. if /code/ is not three bytes long, or if any of the bytes in
# /code/ are not in the range 0x30 to 0x90, then fail the WebSocket
# connection and abort these steps.
if not re.match('[0-9][0-9][0-9]', code):
raise ClientHandshakeError(
'HTTP status code %r is not three digit in status line: %r' %
(code, field))
# 4.1 31. if /code/, interpreted as UTF-8, is "101", then move to the
# next step.
if code != '101':
raise ClientHandshakeError(
'Expected HTTP status code 101 but found %r in status line: '
'%r' % (code, field))
# 4.1 32-39. read fields into /fields/
fields = self._read_fields()
# 4.1 40. _Fields processing_
# read a byte from server
ch = _receive_bytes(self._socket, 1)
if ch != '\n': # 0x0A
raise ClientHandshakeError('Expected LF but found %r' % ch)
# 4.1 41. check /fields/
# TODO(ukai): protocol
# if the entry's name is "upgrade"
# if the value is not exactly equal to the string "WebSocket",
# then fail the WebSocket connection and abort these steps.
_validate_mandatory_header(
fields,
common.UPGRADE_HEADER,
common.WEBSOCKET_UPGRADE_TYPE_HIXIE75,
True)
# if the entry's name is "connection"
# if the value, converted to ASCII lowercase, is not exactly equal
# to the string "upgrade", then fail the WebSocket connection and
# abort these steps.
_validate_mandatory_header(
fields,
common.CONNECTION_HEADER,
common.UPGRADE_CONNECTION_TYPE,
False)
origin = _get_mandatory_header(
fields, common.SEC_WEBSOCKET_ORIGIN_HEADER)
location = _get_mandatory_header(
fields, common.SEC_WEBSOCKET_LOCATION_HEADER)
# TODO(ukai): check origin, location, cookie, ..
# 4.1 42. let /challenge/ be the concatenation of /number_1/,
# expressed as a big endian 32 bit integer, /number_2/, expressed
# as big endian 32 bit integer, and the eight bytes of /key_3/ in the
# order they were sent on the wire.
challenge = struct.pack('!I', self._number1)
challenge += struct.pack('!I', self._number2)
challenge += self._key3
self._logger.debug(
'Challenge: %r (%s)', challenge, util.hexify(challenge))
# 4.1 43. let /expected/ be the MD5 fingerprint of /challenge/ as a
# big-endian 128 bit string.
expected = util.md5_hash(challenge).digest()
self._logger.debug(
'Expected challenge response: %r (%s)',
expected, util.hexify(expected))
# 4.1 44. read sixteen bytes from the server.
# let /reply/ be those bytes.
reply = _receive_bytes(self._socket, 16)
self._logger.debug(
'Actual challenge response: %r (%s)', reply, util.hexify(reply))
# 4.1 45. if /reply/ does not exactly equal /expected/, then fail
# the WebSocket connection and abort these steps.
if expected != reply:
raise ClientHandshakeError(
'Bad challenge response: %r (expected) != %r (actual)' %
(expected, reply))
# 4.1 46. The *WebSocket connection is established*.
def _generate_sec_websocket_key(self):
# 4.1 16. let /spaces_n/ be a random integer from 1 to 12 inclusive.
spaces = random.randint(1, 12)
# 4.1 17. let /max_n/ be the largest integer not greater than
# 4,294,967,295 divided by /spaces_n/.
maxnum = 4294967295 / spaces
# 4.1 18. let /number_n/ be a random integer from 0 to /max_n/
# inclusive.
number = random.randint(0, maxnum)
# 4.1 19. let /product_n/ be the result of multiplying /number_n/ and
# /spaces_n/ together.
product = number * spaces
# 4.1 20. let /key_n/ be a string consisting of /product_n/, expressed
# in base ten using the numerals in the range U+0030 DIGIT ZERO (0) to
# U+0039 DIGIT NINE (9).
key = str(product)
# 4.1 21. insert between one and twelve random characters from the
# range U+0021 to U+002F and U+003A to U+007E into /key_n/ at random
# positions.
available_chars = range(0x21, 0x2f + 1) + range(0x3a, 0x7e + 1)
n = random.randint(1, 12)
for _ in xrange(n):
ch = random.choice(available_chars)
pos = random.randint(0, len(key))
key = key[0:pos] + chr(ch) + key[pos:]
# 4.1 22. insert /spaces_n/ U+0020 SPACE characters into /key_n/ at
# random positions other than start or end of the string.
for _ in xrange(spaces):
pos = random.randint(1, len(key) - 1)
key = key[0:pos] + ' ' + key[pos:]
return number, key
def _generate_key3(self):
# 4.1 26. let /key3/ be a string consisting of eight random bytes (or
# equivalently, a random 64 bit integer encoded in a big-endian order).
return ''.join([chr(random.randint(0, 255)) for _ in xrange(8)])
class ClientConnection(object):
"""A wrapper for socket object to provide the mp_conn interface.
mod_pywebsocket library is designed to be working on Apache mod_python's
mp_conn object.
"""
def __init__(self, socket):
self._socket = socket
def write(self, data):
self._socket.sendall(data)
def read(self, n):
return self._socket.recv(n)
def get_remote_addr(self):
return self._socket.getpeername()
remote_addr = property(get_remote_addr)
class ClientRequest(object):
"""A wrapper class just to make it able to pass a socket object to
functions that expect a mp_request object.
"""
def __init__(self, socket):
self._logger = util.get_class_logger(self)
self._socket = socket
self.connection = ClientConnection(socket)
def _import_ssl():
global ssl
try:
import ssl
return True
except ImportError:
return False
def _import_pyopenssl():
global OpenSSL
try:
import OpenSSL.SSL
return True
except ImportError:
return False
class EchoClient(object):
"""WebSocket echo client."""
def __init__(self, options):
self._options = options
self._socket = None
self._logger = util.get_class_logger(self)
def run(self):
"""Run the client.
Shake hands and then repeat sending message and receiving its echo.
"""
self._socket = socket.socket()
self._socket.settimeout(self._options.socket_timeout)
try:
self._socket.connect((self._options.server_host,
self._options.server_port))
if self._options.use_tls:
self._socket = _TLSSocket(
self._socket,
self._options.tls_module,
self._options.tls_version,
self._options.disable_tls_compression)
version = self._options.protocol_version
if (version == _PROTOCOL_VERSION_HYBI08 or
version == _PROTOCOL_VERSION_HYBI13):
self._handshake = ClientHandshakeProcessor(
self._socket, self._options)
elif version == _PROTOCOL_VERSION_HYBI00:
self._handshake = ClientHandshakeProcessorHybi00(
self._socket, self._options)
else:
raise ValueError(
'Invalid --protocol-version flag: %r' % version)
self._handshake.handshake()
self._logger.info('Connection established')
request = ClientRequest(self._socket)
version_map = {
_PROTOCOL_VERSION_HYBI08: common.VERSION_HYBI08,
_PROTOCOL_VERSION_HYBI13: common.VERSION_HYBI13,
_PROTOCOL_VERSION_HYBI00: common.VERSION_HYBI00}
request.ws_version = version_map[version]
if (version == _PROTOCOL_VERSION_HYBI08 or
version == _PROTOCOL_VERSION_HYBI13):
stream_option = StreamOptions()
stream_option.mask_send = True
stream_option.unmask_receive = False
if self._options.deflate_frame is not False:
processor = self._options.deflate_frame
processor.setup_stream_options(stream_option)
if self._options.use_permessage_deflate is not False:
framer = self._options.use_permessage_deflate
framer.setup_stream_options(stream_option)
self._stream = Stream(request, stream_option)
elif version == _PROTOCOL_VERSION_HYBI00:
self._stream = StreamHixie75(request, True)
for line in self._options.message.split(','):
self._stream.send_message(line)
if self._options.verbose:
print 'Send: %s' % line
try:
received = self._stream.receive_message()
if self._options.verbose:
print 'Recv: %s' % received
except Exception, e:
if self._options.verbose:
print 'Error: %s' % e
raise
self._do_closing_handshake()
finally:
self._socket.close()
def _do_closing_handshake(self):
"""Perform closing handshake using the specified closing frame."""
if self._options.message.split(',')[-1] == _GOODBYE_MESSAGE:
# requested server initiated closing handshake, so
# expecting closing handshake message from server.
self._logger.info('Wait for server-initiated closing handshake')
message = self._stream.receive_message()
if message is None:
print 'Recv close'
print 'Send ack'
self._logger.info(
'Received closing handshake and sent ack')
return
print 'Send close'
self._stream.close_connection()
self._logger.info('Sent closing handshake')
print 'Recv ack'
self._logger.info('Received ack')
def main():
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
parser = OptionParser()
# We accept --command_line_flag style flags which is the same as Google
# gflags in addition to common --command-line-flag style flags.
parser.add_option('-s', '--server-host', '--server_host',
dest='server_host', type='string',
default='localhost', help='server host')
parser.add_option('-p', '--server-port', '--server_port',
dest='server_port', type='int',
default=_UNDEFINED_PORT, help='server port')
parser.add_option('-o', '--origin', dest='origin', type='string',
default=None, help='origin')
parser.add_option('-r', '--resource', dest='resource', type='string',
default='/echo', help='resource path')
parser.add_option('-m', '--message', dest='message', type='string',
help=('comma-separated messages to send. '
'%s will force close the connection from server.' %
_GOODBYE_MESSAGE))
parser.add_option('-q', '--quiet', dest='verbose', action='store_false',
default=True, help='suppress messages')
parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
default=False, help='use TLS (wss://). By default, '
'it looks for ssl and pyOpenSSL module and uses found '
'one. Use --tls-module option to specify which module '
'to use')
parser.add_option('--tls-module', '--tls_module', dest='tls_module',
type='choice',
choices=[_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
help='Use ssl module if "%s" is specified. '
'Use pyOpenSSL module if "%s" is specified' %
(_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
parser.add_option('--tls-version', '--tls_version',
dest='tls_version',
type='string', default=_TLS_VERSION_SSL23,
help='TLS/SSL version to use. One of \'' +
_TLS_VERSION_SSL23 + '\' (SSL version 2 or 3), \'' +
_TLS_VERSION_SSL3 + '\' (SSL version 3), \'' +
_TLS_VERSION_TLS1 + '\' (TLS version 1)')
parser.add_option('--disable-tls-compression', '--disable_tls_compression',
dest='disable_tls_compression',
action='store_true', default=False,
help='Disable TLS compression. Available only when '
'pyOpenSSL module is used.')
parser.add_option('-k', '--socket-timeout', '--socket_timeout',
dest='socket_timeout', type='int', default=_TIMEOUT_SEC,
help='Timeout(sec) for sockets')
parser.add_option('--draft75', dest='draft75',
action='store_true', default=False,
help='Obsolete option. Don\'t use this.')
parser.add_option('--protocol-version', '--protocol_version',
dest='protocol_version',
type='string', default=_PROTOCOL_VERSION_HYBI13,
help='WebSocket protocol version to use. One of \'' +
_PROTOCOL_VERSION_HYBI13 + '\', \'' +
_PROTOCOL_VERSION_HYBI08 + '\', \'' +
_PROTOCOL_VERSION_HYBI00 + '\'')
parser.add_option('--version-header', '--version_header',
dest='version_header',
type='int', default=-1,
help='Specify Sec-WebSocket-Version header value')
parser.add_option('--deflate-frame', '--deflate_frame',
dest='deflate_frame',
action='store_true', default=False,
help='Use the deflate-frame extension.')
parser.add_option('--use-permessage-deflate', '--use_permessage_deflate',
dest='use_permessage_deflate',
action='store_true', default=False,
help='Use the permessage-deflate extension.')
parser.add_option('--log-level', '--log_level', type='choice',
dest='log_level', default='warn',
choices=['debug', 'info', 'warn', 'error', 'critical'],
help='Log level.')
(options, unused_args) = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(options.log_level.upper()))
if options.draft75:
logging.critical('--draft75 option is obsolete.')
sys.exit(1)
if options.protocol_version == _PROTOCOL_VERSION_HIXIE75:
logging.critical(
'Value %s is obsolete for --protocol_version options' %
_PROTOCOL_VERSION_HIXIE75)
sys.exit(1)
if options.use_tls:
if options.tls_module is None:
if _import_ssl():
options.tls_module = _TLS_BY_STANDARD_MODULE
logging.debug('Using ssl module')
elif _import_pyopenssl():
options.tls_module = _TLS_BY_PYOPENSSL
logging.debug('Using pyOpenSSL module')
else:
logging.critical(
'TLS support requires ssl or pyOpenSSL module.')
sys.exit(1)
elif options.tls_module == _TLS_BY_STANDARD_MODULE:
if not _import_ssl():
logging.critical('ssl module is not available')
sys.exit(1)
elif options.tls_module == _TLS_BY_PYOPENSSL:
if not _import_pyopenssl():
logging.critical('pyOpenSSL module is not available')
sys.exit(1)
else:
logging.critical('Invalid --tls-module option: %r',
options.tls_module)
sys.exit(1)
if (options.disable_tls_compression and
options.tls_module != _TLS_BY_PYOPENSSL):
logging.critical('You can disable TLS compression only when '
'pyOpenSSL module is used.')
sys.exit(1)
else:
if options.tls_module is not None:
logging.critical('Use --tls-module option only together with '
'--use-tls option.')
sys.exit(1)
if options.disable_tls_compression:
logging.critical('Use --disable-tls-compression only together '
'with --use-tls option.')
sys.exit(1)
# Default port number depends on whether TLS is used.
if options.server_port == _UNDEFINED_PORT:
if options.use_tls:
options.server_port = common.DEFAULT_WEB_SOCKET_SECURE_PORT
else:
options.server_port = common.DEFAULT_WEB_SOCKET_PORT
# optparse doesn't seem to handle non-ascii default values.
# Set default message here.
if not options.message:
options.message = u'Hello,\u65e5\u672c' # "Japan" in Japanese
EchoClient(options).run()
if __name__ == '__main__':
main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
lucasb-eyer/pydensecrf | tests/issue26.py | 2 | 2130 |
# coding: utf-8
# In[1]:
# import sys
# sys.path.insert(0,'/home/dlr16/Applications/anaconda2/envs/PyDenseCRF/lib/python2.7/site-packages')
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
# get_ipython().magic(u'matplotlib inline')
plt.rcParams['figure.figsize'] = (20, 20)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral, create_pairwise_gaussian
# ## Start from scratch
# In[3]:
from scipy.stats import multivariate_normal
x, y = np.mgrid[0:512, 0:512]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x; pos[:, :, 1] = y
rv = multivariate_normal([256, 256], 128*128)
# In[4]:
probs = rv.pdf(pos)
probs = (probs-probs.min()) / (probs.max()-probs.min())
probs = 0.2 * (probs-0.5) + 0.5
probs = np.tile(probs[:,:,np.newaxis],(1,1,2))
probs[:,:,1] = 1 - probs[:,:,0]
# plt.plot(probs[256,:,0])
# transpose for graph
probs = np.transpose(probs,(2,0,1))
# In[17]:
# XX:IF NCHANNELS != 3, I GET ERRONEOUS OUTPUT
nchannels=4
U = unary_from_softmax(probs) # note: num classes is first dim
d = dcrf.DenseCRF2D(probs.shape[1],probs.shape[2],probs.shape[0])
d.setUnaryEnergy(U)
Q_Unary = d.inference(10)
map_soln_Unary = np.argmax(Q_Unary, axis=0).reshape((probs.shape[1],probs.shape[2]))
tmp_img = np.zeros((probs.shape[1],probs.shape[2],nchannels)).astype(np.uint8)
tmp_img[150:362,150:362,:] = 1
energy = create_pairwise_bilateral(sdims=(10,10), schan=0.01, img=tmp_img, chdim=2)
d.addPairwiseEnergy(energy, compat=10)
# This is wrong and will now raise a ValueError:
#d.addPairwiseBilateral(sxy=(10,10),
# srgb=0.01,
# rgbim=tmp_img,
# compat=10)
Q = d.inference(100)
map_soln = np.argmax(Q, axis=0).reshape((probs.shape[1],probs.shape[2]))
plt.subplot(2,2,1)
plt.imshow(probs[0,:,:])
plt.colorbar()
plt.subplot(2,2,2)
plt.imshow(map_soln_Unary)
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow(tmp_img[:,:,0])
plt.colorbar()
plt.subplot(2,2,4)
plt.imshow(map_soln)
plt.colorbar()
plt.show()
| mit |
MypaceEngine/ifttt-line | libs/requests/packages/urllib3/util/retry.py | 198 | 9981 | from __future__ import absolute_import
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| apache-2.0 |
chepazzo/ansible-modules-extras | monitoring/datadog_event.py | 76 | 5388 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author: Artūras 'arturaz' Šlajus <[email protected]>
#
# This module is proudly sponsored by iGeolise (www.igeolise.com) and
# Tiny Lab Productions (www.tinylabproductions.com).
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: datadog_event
short_description: Posts events to DataDog service
description:
- "Allows to post events to DataDog (www.datadoghq.com) service."
- "Uses http://docs.datadoghq.com/api/#events API."
version_added: "1.3"
author: "Artūras `arturaz` Šlajus (@arturaz)"
notes: []
requirements: []
options:
api_key:
description: ["Your DataDog API key."]
required: true
default: null
title:
description: ["The event title."]
required: true
default: null
text:
description: ["The body of the event."]
required: true
default: null
date_happened:
description:
- POSIX timestamp of the event.
- Default value is now.
required: false
default: now
priority:
description: ["The priority of the event."]
required: false
default: normal
choices: [normal, low]
tags:
description: ["Comma separated list of tags to apply to the event."]
required: false
default: null
alert_type:
description: ["Type of alert."]
required: false
default: info
choices: ['error', 'warning', 'info', 'success']
aggregation_key:
description: ["An arbitrary string to use for aggregation."]
required: false
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES = '''
# Post an event with low priority
datadog_event: title="Testing from ansible" text="Test!" priority="low"
api_key="6873258723457823548234234234"
# Post an event with several tags
datadog_event: title="Testing from ansible" text="Test!"
api_key="6873258723457823548234234234"
tags=aa,bb,#host:{{ inventory_hostname }}
'''
import socket
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True),
title=dict(required=True),
text=dict(required=True),
date_happened=dict(required=False, default=None, type='int'),
priority=dict(
required=False, default='normal', choices=['normal', 'low']
),
tags=dict(required=False, default=None, type='list'),
alert_type=dict(
required=False, default='info',
choices=['error', 'warning', 'info', 'success']
),
aggregation_key=dict(required=False, default=None),
source_type_name=dict(
required=False, default='my apps',
choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps',
'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric',
'capistrano']
),
validate_certs = dict(default='yes', type='bool'),
)
)
post_event(module)
def post_event(module):
uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key']
body = dict(
title=module.params['title'],
text=module.params['text'],
priority=module.params['priority'],
alert_type=module.params['alert_type']
)
if module.params['date_happened'] != None:
body['date_happened'] = module.params['date_happened']
if module.params['tags'] != None:
body['tags'] = module.params['tags']
if module.params['aggregation_key'] != None:
body['aggregation_key'] = module.params['aggregation_key']
if module.params['source_type_name'] != None:
body['source_type_name'] = module.params['source_type_name']
json_body = module.jsonify(body)
headers = {"Content-Type": "application/json"}
(response, info) = fetch_url(module, uri, data=json_body, headers=headers)
if info['status'] == 200:
response_body = response.read()
response_json = module.from_json(response_body)
if response_json['status'] == 'ok':
module.exit_json(changed=True)
else:
module.fail_json(msg=response)
else:
module.fail_json(**info)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
trevor/calendarserver | contrib/performance/benchlib.py | 1 | 6872 | ##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
import pickle
from time import time
from twisted.internet.defer import (
FirstError, DeferredList, inlineCallbacks, returnValue)
from twisted.web.http_headers import Headers
from twisted.python.log import msg
from twisted.web.http import NO_CONTENT, NOT_FOUND
from stats import Duration
from httpclient import StringProducer, readBody
class CalDAVAccount(object):
def __init__(self, agent, netloc, user, password, root, principal):
self.agent = agent
self.netloc = netloc
self.user = user
self.password = password
self.root = root
self.principal = principal
def _makeURL(self, path):
if not path.startswith('/'):
raise ValueError("Pass a relative URL with an absolute path")
return 'http://%s%s' % (self.netloc, path)
def deleteResource(self, path):
url = self._makeURL(path)
d = self.agent.request('DELETE', url)
def deleted(response):
if response.code not in (NO_CONTENT, NOT_FOUND):
raise Exception(
"Unexpected response to DELETE %s: %d" % (
url, response.code))
d.addCallback(deleted)
return d
def makeCalendar(self, path):
return self.agent.request('MKCALENDAR', self._makeURL(path))
def writeData(self, path, data, contentType):
return self.agent.request(
'PUT',
self._makeURL(path),
Headers({'content-type': [contentType]}),
StringProducer(data))
@inlineCallbacks
def _serial(fs):
for (f, args) in fs:
yield f(*args)
returnValue(None)
def initialize(agent, host, port, user, password, root, principal, calendar):
"""
If the specified calendar exists, delete it. Then re-create it empty.
"""
account = CalDAVAccount(
agent,
"%s:%d" % (host, port),
user=user, password=password,
root=root, principal=principal)
cal = "/calendars/users/%s/%s/" % (user, calendar)
d = _serial([
(account.deleteResource, (cal,)),
(account.makeCalendar, (cal,))])
d.addCallback(lambda ignored: account)
return d
def firstResult(deferreds):
"""
Return a L{Deferred} which fires when the first L{Deferred} from
C{deferreds} fires.
@param deferreds: A sequence of Deferreds to wait on.
"""
@inlineCallbacks
def sample(dtrace, sampleTime, agent, paramgen, responseCode, concurrency=1):
urlopen = Duration('HTTP')
data = {urlopen: []}
def once():
msg('emitting request')
before = time()
params = paramgen()
d = agent.request(*params)
def cbResponse(response):
if response.code != responseCode:
raise Exception(
"Request %r received unexpected response code: %d" % (
params, response.code))
d = readBody(response)
def cbBody(ignored):
after = time()
msg('response received')
# Give things a moment to settle down. This is a hack
# to try to collect the last of the dtrace output
# which may still be sitting in the write buffer of
# the dtrace process. It would be nice if there were
# a more reliable way to know when we had it all, but
# no luck on that front so far. The implementation of
# mark is supposed to take care of that, but the
# assumption it makes about ordering of events appears
# to be invalid.
# XXX Disabled until I get a chance to seriously
# measure what affect, if any, it has.
# d = deferLater(reactor, 0.5, dtrace.mark)
d = dtrace.mark()
def cbStats(stats):
msg('stats collected')
for k, v in stats.iteritems():
data.setdefault(k, []).append(v)
data[urlopen].append(after - before)
d.addCallback(cbStats)
return d
d.addCallback(cbBody)
return d
d.addCallback(cbResponse)
return d
msg('starting dtrace')
yield dtrace.start()
msg('dtrace started')
start = time()
requests = []
for _ignore_i in range(concurrency):
requests.append(once())
while requests:
try:
_ignore_result, index = yield DeferredList(requests, fireOnOneCallback=True, fireOnOneErrback=True)
except FirstError, e:
e.subFailure.raiseException()
# Get rid of the completed Deferred
del requests[index]
if time() > start + sampleTime:
# Wait for the rest of the outstanding requests to keep things tidy
yield DeferredList(requests)
# And then move on
break
else:
# And start a new operation to replace it
try:
requests.append(once())
except StopIteration:
# Ran out of work to do, so paramgen raised a
# StopIteration. This is pretty sad. Catch it or it
# will demolish inlineCallbacks.
if len(requests) == concurrency - 1:
msg('exhausted parameter generator')
msg('stopping dtrace')
leftOver = yield dtrace.stop()
msg('dtrace stopped')
for (k, v) in leftOver.items():
if v:
print('Extra', k, ':', v)
returnValue(data)
def select(statistics, benchmark, parameter, statistic):
for stat, samples in statistics[benchmark][int(parameter)].iteritems():
if stat.name == statistic:
return (stat, samples)
raise ValueError("Unknown statistic %r" % (statistic,))
def load_stats(statfiles):
data = []
for fname in statfiles:
fname, bench, param, stat = fname.split(',')
stats, samples = select(
pickle.load(file(fname)), bench, param, stat)
data.append((stats, samples))
if data:
assert len(samples) == len(data[0][1])
return data
| apache-2.0 |
Endika/odoomrp-wip | base_partner_references/__openerp__.py | 27 | 1618 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Partner Reference Codes",
"version": "1.0",
"depends": [
"base"
],
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"contributors": [
"Oihane Crucelaegui <[email protected]>",
],
"category": "Custom Module",
"website": "http://www.odoomrp.com",
"summary": "Supplier & Customer codes",
"description": """
This module adds :
* A customer reference code when the partner is supplier
* A supplier reference code when the partner is customer
All this data is from the partner
""",
"data": [
"views/res_partner_view.xml",
],
"installable": True,
"auto_install": False,
}
| agpl-3.0 |
mattstep/ansible | lib/ansible/inventory/host.py | 15 | 3750 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars
__all__ = ['Host']
class Host:
''' a single ansible host '''
#__slots__ = [ 'name', 'vars', 'groups' ]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
return self.deserialize(data)
def __eq__(self, other):
return self.name == other.name
def serialize(self):
groups = []
for group in self.groups:
groups.append(group.serialize())
return dict(
name=self.name,
vars=self.vars.copy(),
ipv4_address=self.ipv4_address,
ipv6_address=self.ipv6_address,
gathered_facts=self._gathered_facts,
groups=groups,
)
def deserialize(self, data):
self.__init__()
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.ipv4_address = data.get('ipv4_address', '')
self.ipv6_address = data.get('ipv6_address', '')
groups = data.get('groups', [])
for group_data in groups:
g = Group()
g.deserialize(group_data)
self.groups.append(g)
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
self.ipv4_address = name
self.ipv6_address = name
if port and port != C.DEFAULT_REMOTE_PORT:
self.set_variable('ansible_ssh_port', int(port))
self._gathered_facts = False
def __repr__(self):
return self.get_name()
def get_name(self):
return self.name
@property
def gathered_facts(self):
return self._gathered_facts
def set_gathered_facts(self, gathered):
self._gathered_facts = gathered
def add_group(self, group):
self.groups.append(group)
def set_variable(self, key, value):
self.vars[key]=value
def get_groups(self):
groups = {}
for g in self.groups:
groups[g.name] = g
ancestors = g.get_ancestors()
for a in ancestors:
groups[a.name] = a
return groups.values()
def get_vars(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
results = combine_vars(results, group.get_vars())
results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['ansible_ssh_host'] = self.ipv4_address
if 'ansible_ssh_port' not in results:
results['ansible_ssh_port'] = C.DEFAULT_REMOTE_PORT
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
| gpl-3.0 |
HackBulgaria/Odin | forum/migrations/0001_initial.py | 1 | 1586 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('title', models.CharField(max_length=128)),
('text', models.CharField(max_length=512)),
('ordering', models.PositiveSmallIntegerField(default=0)),
],
options={
'verbose_name_plural': 'categories',
'ordering': ('ordering',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('text', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('title', models.CharField(max_length=128)),
('text', models.TextField()),
('date', models.DateField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
]
| agpl-3.0 |
ShingYang/pefile | peutils.py | 4 | 18239 | # -*- coding: Latin-1 -*-
"""peutils, Portable Executable utilities module
Copyright (c) 2005-2013 Ero Carrera <[email protected]>
All rights reserved.
For detailed copyright information see the file COPYING in
the root of the distribution archive.
"""
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from past.utils import old_div
from builtins import object
import os
import re
import string
import urllib.request, urllib.parse, urllib.error
import pefile
__author__ = 'Ero Carrera'
__version__ = pefile.__version__
__contact__ = '[email protected]'
class SignatureDatabase(object):
"""This class loads and keeps a parsed PEiD signature database.
Usage:
sig_db = SignatureDatabase('/path/to/signature/file')
and/or
sig_db = SignatureDatabase()
sig_db.load('/path/to/signature/file')
Signature databases can be combined by performing multiple loads.
The filename parameter can be a URL too. In that case the
signature database will be downloaded from that location.
"""
def __init__(self, filename=None, data=None):
# RegExp to match a signature block
#
self.parse_sig = re.compile(
'\[(.*?)\]\s+?signature\s*=\s*(.*?)(\s+\?\?)*\s*ep_only\s*=\s*(\w+)(?:\s*section_start_only\s*=\s*(\w+)|)', re.S)
# Signature information
#
# Signatures are stored as trees using dictionaries
# The keys are the byte values while the values for
# each key are either:
#
# - Other dictionaries of the same form for further
# bytes in the signature
#
# - A dictionary with a string as a key (packer name)
# and None as value to indicate a full signature
#
self.signature_tree_eponly_true = dict ()
self.signature_count_eponly_true = 0
self.signature_tree_eponly_false = dict ()
self.signature_count_eponly_false = 0
self.signature_tree_section_start = dict ()
self.signature_count_section_start = 0
# The depth (length) of the longest signature
#
self.max_depth = 0
self.__load(filename=filename, data=data)
def generate_section_signatures(self, pe, name, sig_length=512):
"""Generates signatures for all the sections in a PE file.
If the section contains any data a signature will be created
for it. The signature name will be a combination of the
parameter 'name' and the section number and its name.
"""
section_signatures = list()
for idx, section in enumerate(pe.sections):
if section.SizeOfRawData < sig_length:
continue
#offset = pe.get_offset_from_rva(section.VirtualAddress)
offset = section.PointerToRawData
sig_name = '%s Section(%d/%d,%s)' % (
name, idx + 1, len(pe.sections),
''.join([c for c in section.Name if c in string.printable]))
section_signatures.append(
self.__generate_signature(
pe, offset, sig_name, ep_only=False,
section_start_only=True,
sig_length=sig_length) )
return '\n'.join(section_signatures)+'\n'
def generate_ep_signature(self, pe, name, sig_length=512):
"""Generate signatures for the entry point of a PE file.
Creates a signature whose name will be the parameter 'name'
and the section number and its name.
"""
offset = pe.get_offset_from_rva(pe.OPTIONAL_HEADER.AddressOfEntryPoint)
return self.__generate_signature(
pe, offset, name, ep_only=True, sig_length=sig_length)
def __generate_signature(self, pe, offset, name, ep_only=False,
section_start_only=False, sig_length=512):
data = pe.__data__[offset:offset+sig_length]
signature_bytes = ' '.join(['%02x' % ord(c) for c in data])
if ep_only == True:
ep_only = 'true'
else:
ep_only = 'false'
if section_start_only == True:
section_start_only = 'true'
else:
section_start_only = 'false'
signature = '[%s]\nsignature = %s\nep_only = %s\nsection_start_only = %s\n' % (
name, signature_bytes, ep_only, section_start_only)
return signature
def match(self, pe, ep_only=True, section_start_only=False):
"""Matches and returns the exact match(es).
If ep_only is True the result will be a string with
the packer name. Otherwise it will be a list of the
form (file_ofsset, packer_name). Specifying where
in the file the signature was found.
"""
matches = self.__match(pe, ep_only, section_start_only)
# The last match (the most precise) from the
# list of matches (if any) is returned
#
if matches:
if ep_only == False:
# Get the most exact match for each list of matches
# at a given offset
#
return [(match[0], match[1][-1]) for match in matches]
return matches[1][-1]
return None
def match_all(self, pe, ep_only=True, section_start_only=False):
"""Matches and returns all the likely matches."""
matches = self.__match(pe, ep_only, section_start_only)
if matches:
if ep_only == False:
# Get the most exact match for each list of matches
# at a given offset
#
return matches
return matches[1]
return None
def __match(self, pe, ep_only, section_start_only):
# Load the corresponding set of signatures
# Either the one for ep_only equal to True or
# to False
#
if section_start_only is True:
# Fetch the data of the executable as it'd
# look once loaded in memory
#
try :
data = pe.__data__
except Exception as excp :
raise
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_section_start
# Set the starting address to start scanning from
#
scan_addresses = [section.PointerToRawData for section in pe.sections]
elif ep_only is True:
# Fetch the data of the executable as it'd
# look once loaded in memory
#
try :
data = pe.get_memory_mapped_image()
except Exception as excp :
raise
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_eponly_true
# Fetch the entry point of the PE file and the data
# at the entry point
#
ep = pe.OPTIONAL_HEADER.AddressOfEntryPoint
# Set the starting address to start scanning from
#
scan_addresses = [ep]
else:
data = pe.__data__
signatures = self.signature_tree_eponly_false
scan_addresses = range( len(data) )
# For each start address, check if any signature matches
#
matches = []
for idx in scan_addresses:
result = self.__match_signature_tree(
signatures,
data[idx:idx+self.max_depth])
if result:
matches.append( (idx, result) )
# Return only the matched items found at the entry point if
# ep_only is True (matches will have only one element in that
# case)
#
if ep_only is True:
if matches:
return matches[0]
return matches
def match_data(self, code_data, ep_only=True, section_start_only=False):
data = code_data
scan_addresses = [ 0 ]
# Load the corresponding set of signatures
# Either the one for ep_only equal to True or
# to False
#
if section_start_only is True:
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_section_start
# Set the starting address to start scanning from
#
elif ep_only is True:
# Load the corresponding tree of signatures
#
signatures = self.signature_tree_eponly_true
# For each start address, check if any signature matches
#
matches = []
for idx in scan_addresses:
result = self.__match_signature_tree(
signatures,
data[idx:idx+self.max_depth])
if result:
matches.append( (idx, result) )
# Return only the matched items found at the entry point if
# ep_only is True (matches will have only one element in that
# case)
#
if ep_only is True:
if matches:
return matches[0]
return matches
def __match_signature_tree(self, signature_tree, data, depth = 0):
"""Recursive function to find matches along the signature tree.
signature_tree is the part of the tree left to walk
data is the data being checked against the signature tree
depth keeps track of how far we have gone down the tree
"""
matched_names = list ()
match = signature_tree
# Walk the bytes in the data and match them
# against the signature
#
for idx, byte in enumerate ( [b if isinstance(b, int) else ord(b) for b in data] ):
# If the tree is exhausted...
#
if match is None :
break
# Get the next byte in the tree
#
match_next = match.get(byte, None)
# If None is among the values for the key
# it means that a signature in the database
# ends here and that there's an exact match.
#
if None in list(match.values()):
# idx represent how deep we are in the tree
#
#names = [idx+depth]
names = list()
# For each of the item pairs we check
# if it has an element other than None,
# if not then we have an exact signature
#
for item in list(match.items()):
if item[1] is None :
names.append (item[0])
matched_names.append(names)
# If a wildcard is found keep scanning the signature
# ignoring the byte.
#
if '??' in match :
match_tree_alternate = match.get ('??', None)
data_remaining = data[idx + 1 :]
if data_remaining:
matched_names.extend(
self.__match_signature_tree(
match_tree_alternate, data_remaining, idx+depth+1))
match = match_next
# If we have any more packer name in the end of the signature tree
# add them to the matches
#
if match is not None and None in list(match.values()):
#names = [idx + depth + 1]
names = list()
for item in list(match.items()) :
if item[1] is None:
names.append(item[0])
matched_names.append(names)
return matched_names
def load(self , filename=None, data=None):
"""Load a PEiD signature file.
Invoking this method on different files combines the signatures.
"""
self.__load(filename=filename, data=data)
def __load(self, filename=None, data=None):
if filename is not None:
# If the path does not exist, attempt to open a URL
#
if not os.path.exists(filename):
try:
sig_f = urllib.request.urlopen(filename)
sig_data = sig_f.read()
sig_f.close()
except IOError:
# Let this be raised back to the user...
raise
else:
# Get the data for a file
#
try:
sig_f = open( filename, 'rt' )
sig_data = sig_f.read()
sig_f.close()
except IOError:
# Let this be raised back to the user...
raise
else:
sig_data = data
# If the file/URL could not be read or no "raw" data
# was provided there's nothing else to do
#
if not sig_data:
return
# Helper function to parse the signature bytes
#
def to_byte(value):
if '?' in value:
return value
return int(value, 16)
# Parse all the signatures in the file
#
matches = self.parse_sig.findall(sig_data)
# For each signature, get the details and load it into the
# signature tree
#
for packer_name, signature, superfluous_wildcards, ep_only, section_start_only in matches:
ep_only = ep_only.strip().lower()
signature = signature.replace('\\n', '').strip()
signature_bytes = [to_byte(b) for b in signature.split()]
if ep_only == 'true':
ep_only = True
else:
ep_only = False
if section_start_only == 'true':
section_start_only = True
else:
section_start_only = False
depth = 0
if section_start_only is True:
tree = self.signature_tree_section_start
self.signature_count_section_start += 1
else:
if ep_only is True :
tree = self.signature_tree_eponly_true
self.signature_count_eponly_true += 1
else :
tree = self.signature_tree_eponly_false
self.signature_count_eponly_false += 1
for idx, byte in enumerate (signature_bytes) :
if idx+1 == len(signature_bytes):
tree[byte] = tree.get( byte, dict() )
tree[byte][packer_name] = None
else :
tree[byte] = tree.get ( byte, dict() )
tree = tree[byte]
depth += 1
if depth > self.max_depth:
self.max_depth = depth
def is_valid( pe ):
""""""
pass
def is_suspicious( pe ):
"""
unusual locations of import tables
non recognized section names
presence of long ASCII strings
"""
relocations_overlap_entry_point = False
sequential_relocs = 0
# If relocation data is found and the entries go over the entry point, and also are very
# continuous or point outside section's boundaries => it might imply that an obfuscation
# trick is being used or the relocations are corrupt (maybe intentionally)
#
if hasattr(pe, 'DIRECTORY_ENTRY_BASERELOC'):
for base_reloc in pe.DIRECTORY_ENTRY_BASERELOC:
last_reloc_rva = None
for reloc in base_reloc.entries:
if reloc.rva <= pe.OPTIONAL_HEADER.AddressOfEntryPoint <= reloc.rva + 4:
relocations_overlap_entry_point = True
if last_reloc_rva is not None and last_reloc_rva <= reloc.rva <= last_reloc_rva + 4:
sequential_relocs += 1
last_reloc_rva = reloc.rva
# If import tables or strings exist (are pointed to) to within the header or in the area
# between the PE header and the first section that's supicious
#
# IMPLEMENT
warnings_while_parsing = False
# If we have warnings, that's suspicious, some of those will be because of out-of-ordinary
# values are found in the PE header fields
# Things that are reported in warnings:
# (parsing problems, special section characteristics i.e. W & X, uncommon values of fields,
# unusual entrypoint, suspicious imports)
#
warnings = pe.get_warnings()
if warnings:
warnings_while_parsing
# If there are few or none (should come with a standard "density" of strings/kilobytes of data) longer (>8)
# ascii sequences that might indicate packed data, (this is similar to the entropy test in some ways but
# might help to discard cases of legitimate installer or compressed data)
# If compressed data (high entropy) and is_driver => uuuuhhh, nasty
pass
def is_probably_packed( pe ):
"""Returns True is there is a high likelihood that a file is packed or contains compressed data.
The sections of the PE file will be analyzed, if enough sections
look like containing containing compressed data and the data makes
up for more than 20% of the total file size. The function will
return True.
"""
# Calculate the lenth of the data up to the end of the last section in the
# file. Overlay data won't be taken into account
#
total_pe_data_length = len( pe.trim() )
has_significant_amount_of_compressed_data = False
# If some of the sections have high entropy and they make for more than 20% of the file's size
# it's assumed that it could be an installer or a packed file
total_compressed_data = 0
for section in pe.sections:
s_entropy = section.get_entropy()
s_length = len( section.get_data() )
# The value of 7.4 is empircal, based of looking at a few files packed
# by different packers
if s_entropy > 7.4:
total_compressed_data += s_length
if (old_div((1.0 * total_compressed_data),total_pe_data_length)) > .2:
has_significant_amount_of_compressed_data = True
return has_significant_amount_of_compressed_data
| mit |
piffey/ansible | lib/ansible/modules/network/avi/avi_cloudconnectoruser.py | 41 | 4186 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudconnectoruser
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of CloudConnectorUser Avi RESTful Object
description:
- This module is used to configure CloudConnectorUser object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
azure_serviceprincipal:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
azure_userpass:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
name:
description:
- Name of the object.
required: true
private_key:
description:
- Private_key of cloudconnectoruser.
public_key:
description:
- Public_key of cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Cloud connector user that is used for integration into cloud platforms
avi_cloudconnectoruser:
controller: '{{ controller }}'
name: root
password: '{{ password }}'
private_key: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----'
public_key: 'ssh-rsa ...'
tenant_ref: admin
username: '{{ username }}'
"""
RETURN = '''
obj:
description: CloudConnectorUser (api/cloudconnectoruser) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
azure_serviceprincipal=dict(type='dict',),
azure_userpass=dict(type='dict',),
name=dict(type='str', required=True),
private_key=dict(type='str', no_log=True,),
public_key=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudconnectoruser',
set(['private_key']))
if __name__ == '__main__':
main()
| gpl-3.0 |
JohanComparat/nbody-npt-functions | bin/bin_onePT/simulation_vs_data_plot.py | 1 | 7351 | import astropy.units as uu
import astropy.cosmology as co
aa = co.Planck13
import math as m
from scipy.integrate import quad
import os
aah = co.FlatLambdaCDM(H0=100.0 *uu.km / (uu.Mpc *uu.s), Om0=0.307, Tcmb0=2.725 *uu.K, Neff=3.05, m_nu=[ 0. , 0. , 0.06]*uu.eV, Ob0=0.0483)
rhom0 = aah.critical_density0.to(uu.solMass*uu.Mpc**-3).value
"""
#aa.critical_density0.to(uu.solMass*uu.Mpc**-3).value
#aah.critical_density0.to(uu.solMass*uu.Mpc**-3).value
vol = lambda zmin, zmax, area : (aah.comoving_volume(zmax)-aah.comoving_volume(zmin))*n.pi*area/129600.
volELG = n.log10(vol(0.6,1.,1400).value)
volQSO = n.log10(vol(0.9,2.2,7500).value)
volLya = n.log10(vol(2.1,3.5,7500).value)
n.log10(vol(0.9,1.6,24000).value)
n.log10(vol(0.9,2.2,24000).value)
n.log10(vol(0.1,0.4,8000).value)
n.log10(vol(0.75,1.4,2).value)
n.log10(vol(0.2,1.,6).value)
n.log10(vol(0.2,1.4,0.6).value)
"""
from scipy.interpolate import interp1d
import numpy as n
import matplotlib
matplotlib.rcParams['font.size']=12
import matplotlib.pyplot as p
import glob
import sys
from scipy.optimize import curve_fit
import cPickle
from os.path import join
from scipy.optimize import minimize
# limits at z0
Npmin = 1000
limits_04 = [Npmin*9.63 * 10**7, 5e12]
limits_10 = [Npmin*1.51 * 10**9., 5e13]
limits_25 = [Npmin*2.359 * 10**10., 5e14]
limits_40 = [Npmin* 9.6 * 10**10. , 5e15]
zmin = 0.
zmax = 5
NDecimal = 3
# defining directories :
dir = ".." #join("D:","data","MultiDark")
zl_04 = join(dir,"MD_0.4Gpc","output_SMD.list")
zl_10 = join(dir,"MD_1Gpc","output_MDPL.list")
zl_25 = join(dir,"MD_2.5Gpc","output_BigMD.list")
zl_40 = join(dir,"MD_4Gpc","output_HMD.list")
"""
n0_04, a0_04, z0_04 = n.loadtxt(zl_04,unpack=True)
n0_10, z0_10, a0_10 = n.loadtxt(zl_10,unpack=True)
n0_25, a0_25, z0_25 = n.loadtxt(zl_25,unpack=True)
n0_40, z0_40, a0_40 = n.loadtxt(zl_40,unpack=True)
print n.max(z0_04[z0_04<3]), len(z0_04[z0_04<3])
print n.max(z0_10[z0_10<3]), len(z0_10[z0_10<3])
print n.max(z0_25[z0_25<3]), len(z0_25[z0_25<3])
print n.max(z0_40[z0_40<3]), len(z0_40[z0_40<3])
fig = p.figure(0,(13,6))
#fig.axes([0.15,0.3,0.8, 0.5])
ax1 = fig.add_subplot(111)
bins = n.arange(0.,1,0.01)
ax1.hist(n.log10(1+z0_04), bins=bins,label='SMD', histtype='step', lw=4)
ax1.hist(n.log10(1+z0_10), bins=bins,label='MDPL', histtype='step',lw=2)
ax1.hist(n.log10(1+z0_25), bins=bins,label='BigMD', histtype='step')
ax1.hist(n.log10(1+z0_40), bins=bins,label='HMD', histtype='step')
ax1.plot([n.log10(1+0.9),n.log10(1+2.2)],[1.7,1.7], label='QSO', lw=3)
ax1.plot([n.log10(1+0.6),n.log10(1+1.0)],[1.5,1.5], label='ELG', lw=3)
ax1.plot([n.log10(1+0.4),n.log10(1+0.8)],[1.3,1.3], label='LRG', lw=3)
ax1.grid()
xtik = n.hstack((n.arange(0,1.1,0.2), [1.5, 2, 3]))
ages = n.array([n.round(aa.age(xt).value) for xt in xtik ])
ax1.set_xticks(n.log10(1+xtik))
ax1.set_xticklabels(xtik)
ax1.set_xlabel('redshift')
ax1.set_ylabel('N snapshots / dlog(1+z)=0.01')
gl = p.legend(loc=0)
gl.set_frame_on(False)
ax2 = ax1.twiny()
#ax2.set_xlabel('age Gyr')
ax2.set_xticks(ax1.get_xticks())
ax2.set_xticklabels(ages)
ax2.set_xlabel('age (Gyr)')
#ax1.set_xlim((0,0.6))
#ax1.set_ylim((0,10))
p.savefig(join("..","presentationPlots","Nsnapshots.png"))
p.clf()
"""
name, Lbox, Npart, Mp = n.loadtxt( join("..", "data","existing-boxes.txt"), unpack=True, dtype=[('name', '<S20'), ('Lbox', '<i4'), ('Npart', '<i4'), ('Mpart', '<f4')])
nameS, volumeS_i, MhaloS = n.loadtxt(join("..", "data","surveys.txt"), unpack=True, dtype=[('name', '<S12'), ('volumeS', '<f4'), ('MhaloS', '<f4')])
h=0.7
volumeS = volumeS_i - n.log10( h**3.)
logVol, massN100, massN10k, massN1M = n.loadtxt(join("..", "data", "M200c-volume-number.txt"), unpack=True)
NpH = 300
funPP = lambda volume, npart: 100*aa.h**2*rhom0* volume / npart
vols = n.logspace(5,13,100)
fig = p.figure(1,(6,6))
#fig.axes([0.15,0.3,0.8, 0.5])
ax1 = fig.add_subplot(111)
#sel = (massN100>0)
#ax1.plot(10**logVol[sel]/h**3., 1.5*10**massN100[sel], label = r'HMF $10^2$ halos')
#sel = (massN10k>0)
#ax1.plot(10**logVol[sel]/h**3., 1.5*10**massN10k[sel], label = r'HMF $10^4$ halos')
sel = (massN1M>0)
ax1.plot(10**logVol[sel]/h**3., 1.5*10**massN1M[sel], label = r'HMF $10^6$ halos')
for ii, el in enumerate(name):
#ax1.plot(Lbox[ii]**3., 300*10**Mp[ii], 'kx')
ax1.plot(Lbox[ii]**3., NpH*10**Mp[ii], 'k+')
#ax1.plot(Lbox[ii]**3., 300*10**Mp[ii], 'k_')
#ax1.arrow(Lbox[ii]**3.,NpH*10**Mp[ii],0, NpH*10**Mp[ii]*2, fc='k', ec='k',head_width=Lbox[ii]**3.*0.9, head_length=NpH*10**Mp[ii]*1.1)
#ax1.arrow(Lbox[ii]**3.,NpH*10**Mp[ii],-(Lbox[ii])**3./2., 0, fc='k', ec='k')
ax1.annotate(el.replace('\\n','\n'), xy=(1.1*Lbox[ii]**3., NpH*10**Mp[ii]),fontsize=8)#,rotation=45) #, xytext=(Lbox[ii]**3.*1.07, 1.07*NpH*10**Mp[ii])
for ii, el in enumerate(nameS):
#print el
p.plot(10**volumeS[ii], 10**MhaloS[ii], 'b^')
ax1.annotate(el.replace('\\n','\n'), xy=(1.1*10**volumeS[ii], 10**MhaloS[ii]),color='b',fontsize=8)#,rotation=45)#, xytext=(10**volumeS[ii]/10, 100*10**MhaloS[ii]),color='b',fontsize=11,rotation=45)
ax1.plot(vols, funPP(vols, 1000**3.), 'r--', label=r'1000$^3$')
ax1.plot(vols, funPP(vols, 4000**3.), 'm--', label=r'4000$^3$')
ax1.plot(vols, funPP(vols, 10000**3.), 'y--', label=r'10000$^3$')
ax1.plot(vols, funPP(vols, 40000**3.), 'c--', label=r'40000$^3$')
totalVolume = aa.comoving_volume(3.5).value*2./3
ax1.axvline(totalVolume, color='k', ls='dotted', label=r'$\frac{2}{3}V(z<3.5)$')
p.plot(totalVolume, 3e10, 'k*')
p.annotate('Ultimate\nCosmology\nSimulation', xy=(totalVolume*1.1, 3e10), color='k', fontsize=8)
ax1.set_xscale('log')
ax1.set_yscale('log')
p.grid()
ax1.set_xlabel(r'volume [Mpc$^{3}$]')
ax1.set_ylabel(r'Halo mass resolved [$M_\odot$]')
ax1.set_ylim((7e9,1e15))
ax1.set_xlim((1e6, 1e13))
#p.title(str(NpH)+' particles per halo')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.savefig(join(os.environ['MVIR_DIR'],"MassHalo-Volume.png"))
p.clf()
sys.exit()
fig = p.figure(1,(6,6))
#fig.axes([0.15,0.3,0.8, 0.5])
ax1 = fig.add_subplot(111)
for ii, el in enumerate(name):
p.plot(Lbox[ii], 300*10**Mp[ii], 'bo')
ax1.annotate(el, xy=(Lbox[ii], 300*10**Mp[ii]), xytext=(Lbox[ii]*1.07, 1.07*300*10**Mp[ii]))
ax1.set_xscale('log')
ax1.set_yscale('log')
p.grid()
ax1.set_xlabel('box length [Mpc/h]')
ax1.set_ylabel('Halo mass resolved [Msun/h]')
p.savefig(join("..","presentationPlots","Mass-Lbox.png"))
p.clf()
fig = p.figure(1,(6,6))
#fig.axes([0.15,0.3,0.8, 0.5])
ax1 = fig.add_subplot(111)
for ii, el in enumerate(name):
p.plot(Lbox[ii]**3., Npart[ii]**3., 'bo')
ax1.annotate(el, xy=(Lbox[ii]**3., Npart[ii]**3.), xytext=(Lbox[ii]**3.*1.07, 1.07*Npart[ii]**3.))
ax1.set_xscale('log')
ax1.set_yscale('log')
p.grid()
ax1.set_xlabel(r'box volume [Mpc$^{3}/h^{-3}$]')
ax1.set_ylabel('Number of particle')
p.savefig(join("..","presentationPlots","Npart-Volume.png"))
p.clf()
fig = p.figure(1,(6,6))
#fig.axes([0.15,0.3,0.8, 0.5])
ax1 = fig.add_subplot(111)
for ii, el in enumerate(name):
p.plot(Lbox[ii]**3., 10**-Mp[ii], 'bo')
ax1.annotate(el, xy=(Lbox[ii]**3., 10**-Mp[ii]), xytext=(Lbox[ii]**3.*1.07, 1.07*10**-Mp[ii]))
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylim((1e-7,1e-12))
ax1.set_xlim((1e7, 1e13))
p.grid()
ax1.set_xlabel(r'box volume [Mpc$^{3}/h^{-3}$]')
ax1.set_ylabel('Particle mass [h/Msun]')
p.savefig(join("..","presentationPlots","mass-1-Volume.png"))
p.clf()
sys.exit()
| cc0-1.0 |
hobbyjobs/photivo | scons-local-2.2.0/SCons/Tool/mslib.py | 14 | 2255 | """SCons.Tool.mslib
Tool-specific initialization for lib (MicroSoft library archiver).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mslib.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Tool.msvs
import SCons.Tool.msvc
import SCons.Util
from MSCommon import msvc_exists, msvc_setup_env_once
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
xzzy/statdev | applications/management/commands/generate_test_users.py | 4 | 6548 | from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from mixer.backend.django import mixer
import logging
logger = logging.getLogger('statdev')
User = get_user_model()
class Command(BaseCommand):
help = 'Create Test users accounts and assign groups'
def handle(self, *args, **options):
processor = Group.objects.get(name='Processor')
assessor = Group.objects.get(name='Assessor')
approver = Group.objects.get(name='Approver')
referee = Group.objects.get(name='Referee')
emergency = Group.objects.get(name='Emergency')
director = Group.objects.get(name='Director')
executive = Group.objects.get(name='Executive')
referee = Group.objects.get(name='Referee')
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Admin", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(processor)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Assessor", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(assessor)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Manager", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(approver)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Director", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(director)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Exec", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(executive)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Emergency", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(emergency)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Referee", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(referee)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Referee", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(referee)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Referee", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(referee)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Referee", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(referee)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Referee", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(referee)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Referee", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
self.user1.groups.add(referee)
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Customer", last_name="1", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Customer", last_name="2", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Customer", last_name="3", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Customer", last_name="4", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
if not User.objects.filter(email="[email protected]").exists():
self.user1 = mixer.blend(User, email="[email protected]", first_name="Customer", last_name="5", is_superuser=False, is_staff=False)
self.user1.set_password('pass')
self.user1.save()
print ("Test Account Creation Completed")
return
| apache-2.0 |
tdjordan/tortoisegit | gitgtk/datamine.py | 1 | 25938 | #
# Data Mining dialog for TortoiseHg and Mercurial
#
# Copyright (C) 2008 Steve Borho <[email protected]>
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import os
import pango
import Queue
import re
import threading, thread2
import time
from mercurial import hg, ui, util, revlog
from hglib import hgcmd_toq, toutf, fromutf
from gdialog import *
from vis import treemodel
from vis.colormap import AnnotateColorMap, AnnotateColorSaturation
from vis.treeview import TreeView
import gtklib
class DataMineDialog(GDialog):
COL_REVID = 0
COL_TEXT = 1
COL_TOOLTIP = 2
COL_PATH = 3
COL_COLOR = 4
COL_USER = 5
def get_title(self):
return 'DataMining - ' + os.path.basename(self.repo.root)
def get_icon(self):
return 'menurepobrowse.ico'
def parse_opts(self):
pass
def get_tbbuttons(self):
self.stop_button = self.make_toolbutton(gtk.STOCK_STOP, 'Stop',
self._stop_current_search, tip='Stop operation on current tab')
return [
self.make_toolbutton(gtk.STOCK_FIND, 'New Search',
self._search_clicked, tip='Open new search tab'),
self.stop_button
]
def prepare_display(self):
os.chdir(self.repo.root)
def save_settings(self):
settings = GDialog.save_settings(self)
settings['datamine'] = ()
return settings
def load_settings(self, settings):
GDialog.load_settings(self, settings)
# settings['datamine']
def get_body(self):
""" Initialize the Dialog. """
self.grep_cmenu = self.grep_context_menu()
self.ann_cmenu = self.annotate_context_menu()
self.changedesc = {}
self.newpagecount = 1
vbox = gtk.VBox()
notebook = gtk.Notebook()
notebook.set_tab_pos(gtk.POS_TOP)
notebook.set_scrollable(True)
notebook.popup_enable()
notebook.show()
self.notebook = notebook
vbox.pack_start(self.notebook, True, True, 2)
self.stbar = gtklib.StatusBar()
vbox.pack_start(self.stbar, False, False, 2)
self.stop_button.set_sensitive(False)
return vbox
def _destroying(self, gtkobj):
self._stop_all_searches()
GDialog._destroying(self, gtkobj)
def ann_header_context_menu(self, treeview):
_menu = gtk.Menu()
_button = gtk.CheckMenuItem("Filename")
_button.connect("toggled", self.toggle_annatate_columns, treeview, 1)
_menu.append(_button)
_button = gtk.CheckMenuItem("User")
_button.connect("toggled", self.toggle_annatate_columns, treeview, 2)
_menu.append(_button)
_menu.show_all()
return _menu
def grep_context_menu(self):
_menu = gtk.Menu()
_menu.append(create_menu('di_splay change', self._cmenu_display))
_menu.append(create_menu('_annotate file', self._cmenu_annotate))
_menu.append(create_menu('_file history', self._cmenu_file_log))
_menu.show_all()
return _menu
def annotate_context_menu(self):
_menu = gtk.Menu()
_menu.append(create_menu('di_splay change', self._cmenu_display))
_menu.show_all()
return _menu
def _cmenu_display(self, menuitem):
from changeset import ChangeSet
statopts = {'rev' : [self.currev] }
dialog = ChangeSet(self.ui, self.repo, self.cwd, [], statopts, False)
dialog.display()
def _cmenu_annotate(self, menuitem):
self.add_annotate_page(self.curpath, self.currev)
def _cmenu_file_log(self, menuitem):
from history import GLog
dialog = GLog(self.ui, self.repo, self.cwd, [self.repo.root], {}, False)
dialog.open_with_file(self.curpath)
dialog.display()
def _grep_button_release(self, widget, event):
if event.button == 3 and not (event.state & (gtk.gdk.SHIFT_MASK |
gtk.gdk.CONTROL_MASK)):
self._grep_popup_menu(widget, event.button, event.time)
return False
def _grep_popup_menu(self, treeview, button=0, time=0):
self.grep_cmenu.popup(None, None, None, button, time)
return True
def _grep_row_act(self, tree, path, column):
"""Default action is the first entry in the context menu
"""
self.grep_cmenu.get_children()[0].activate()
return True
def get_rev_desc(self, rev):
if rev in self.changedesc:
return self.changedesc[rev]
ctx = self.repo.changectx(rev)
author = util.shortuser(ctx.user())
summary = ctx.description().replace('\0', '')
summary = summary.split('\n')[0]
date = time.strftime("%y-%m-%d %H:%M", time.gmtime(ctx.date()[0]))
desc = author+'@'+str(rev)+' '+date+' "'+summary+'"'
self.changedesc[rev] = (desc, author)
return (desc, author)
def _search_clicked(self, button, data):
self.add_search_page()
def create_tab_close_button(self):
button = gtk.Button()
iconBox = gtk.HBox(False, 0)
image = gtk.Image()
image.set_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
gtk.Button.set_relief(button, gtk.RELIEF_NONE)
settings = gtk.Widget.get_settings(button)
(w,h) = gtk.icon_size_lookup_for_settings(settings, gtk.ICON_SIZE_MENU)
gtk.Widget.set_size_request(button, w + 4, h + 4)
image.show()
iconBox.pack_start(image, True, False, 0)
button.add(iconBox)
iconBox.show()
return button
def add_search_page(self):
frame = gtk.Frame()
frame.set_border_width(10)
vbox = gtk.VBox()
search_hbox = gtk.HBox()
regexp = gtk.Entry()
includes = gtk.Entry()
if self.cwd.startswith(self.repo.root):
includes.set_text(util.canonpath(self.repo.root, self.cwd, '.'))
excludes = gtk.Entry()
search = gtk.Button('Search')
search_hbox.pack_start(gtk.Label('Regexp:'), False, False, 4)
search_hbox.pack_start(regexp, True, True, 4)
search_hbox.pack_start(gtk.Label('Includes:'), False, False, 4)
search_hbox.pack_start(includes, True, True, 4)
search_hbox.pack_start(gtk.Label('Excludes:'), False, False, 4)
search_hbox.pack_start(excludes, True, True, 4)
search_hbox.pack_start(search, False, False)
self.tooltips.set_tip(search, 'Start this search')
self.tooltips.set_tip(regexp, 'Regular expression search pattern')
self.tooltips.set_tip(includes, 'Comma separated list of'
' inclusion patterns. By default, the entire repository'
' is searched.')
self.tooltips.set_tip(excludes, 'Comma separated list of'
' exclusion patterns. Exclusion patterns are applied'
' after inclusion patterns.')
vbox.pack_start(search_hbox, False, False, 4)
hbox = gtk.HBox()
follow = gtk.CheckButton('Follow copies and renames')
ignorecase = gtk.CheckButton('Ignore case')
linenum = gtk.CheckButton('Show line numbers')
showall = gtk.CheckButton('Show all matching revisions')
hbox.pack_start(follow, False, False, 4)
hbox.pack_start(ignorecase, False, False, 4)
hbox.pack_start(linenum, False, False, 4)
hbox.pack_start(showall, False, False, 4)
vbox.pack_start(hbox, False, False, 4)
treeview = gtk.TreeView()
treeview.get_selection().set_mode(gtk.SELECTION_SINGLE)
treeview.set_property('fixed-height-mode', True)
treeview.connect("cursor-changed", self._grep_selection_changed)
treeview.connect('button-release-event', self._grep_button_release)
treeview.connect('popup-menu', self._grep_popup_menu)
treeview.connect('row-activated', self._grep_row_act)
results = gtk.ListStore(str, str, str, str)
treeview.set_model(results)
for title, width, col, emode in (
('Rev', 10, self.COL_REVID, pango.ELLIPSIZE_NONE),
('File', 25, self.COL_PATH, pango.ELLIPSIZE_START),
('Matches', 80, self.COL_TEXT, pango.ELLIPSIZE_END)):
cell = gtk.CellRendererText()
cell.set_property("width-chars", width)
cell.set_property("ellipsize", emode)
cell.set_property("family", "Monospace")
column = gtk.TreeViewColumn(title)
column.set_resizable(True)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(cell.get_size(treeview)[2])
column.pack_start(cell, expand=True)
column.add_attribute(cell, "text", col)
treeview.append_column(column)
if hasattr(treeview, 'set_tooltip_column'):
treeview.set_tooltip_column(self.COL_TOOLTIP)
scroller = gtk.ScrolledWindow()
scroller.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroller.add(treeview)
vbox.pack_start(scroller, True, True)
frame.add(vbox)
frame.show_all()
hbox = gtk.HBox()
lbl = gtk.Label('Search %d' % self.newpagecount)
close = self.create_tab_close_button()
close.connect('clicked', self.close_page, frame)
hbox.pack_start(lbl, True, True, 2)
hbox.pack_start(close, False, False)
hbox.show_all()
num = self.notebook.append_page(frame, hbox)
self.newpagecount += 1
objs = (treeview.get_model(), frame, regexp, follow, ignorecase,
excludes, includes, linenum, showall, search_hbox)
# Clicking 'search' or hitting Enter in any text entry triggers search
search.connect('clicked', self.trigger_search, objs)
regexp.connect('activate', self.trigger_search, objs)
includes.connect('activate', self.trigger_search, objs)
excludes.connect('activate', self.trigger_search, objs)
if hasattr(self.notebook, 'set_tab_reorderable'):
self.notebook.set_tab_reorderable(frame, True)
self.notebook.set_current_page(num)
regexp.grab_focus()
def trigger_search(self, button, objs):
(model, frame, regexp, follow, ignorecase,
excludes, includes, linenum, showall, search_hbox) = objs
re = regexp.get_text()
if not re:
Prompt('No regular expression given',
'You must provide a search expression', self).run()
regexp.grab_focus()
return
q = Queue.Queue()
args = [self.repo.root, q, 'grep']
if follow.get_active(): args.append('--follow')
if ignorecase.get_active(): args.append('--ignore-case')
if linenum.get_active(): args.append('--line-number')
if showall.get_active(): args.append('--all')
incs = [x.strip() for x in includes.get_text().split(',')]
excs = [x.strip() for x in excludes.get_text().split(',')]
for i in incs:
if i: args.extend(['-I', i])
for x in excs:
if x: args.extend(['-X', x])
args.append(re)
thread = thread2.Thread(target=hgcmd_toq, args=args)
thread.start()
frame._mythread = thread
self.stop_button.set_sensitive(True)
model.clear()
search_hbox.set_sensitive(False)
self.stbar.begin()
self.stbar.set_status_text('hg ' + ' '.join(args[2:]))
hbox = gtk.HBox()
lbl = gtk.Label('Search "%s"' % re.split()[0])
close = self.create_tab_close_button()
close.connect('clicked', self.close_page, frame)
hbox.pack_start(lbl, True, True, 2)
hbox.pack_start(close, False, False)
hbox.show_all()
self.notebook.set_tab_label(frame, hbox)
gobject.timeout_add(50, self.grep_wait, thread, q, model,
search_hbox, regexp, frame)
def grep_wait(self, thread, q, model, search_hbox, regexp, frame):
"""
Handle all the messages currently in the queue (if any).
"""
while q.qsize():
line = q.get(0).rstrip('\r\n')
try:
(path, revid, text) = line.split(':', 2)
except ValueError:
continue
tip, user = self.get_rev_desc(long(revid))
model.append((revid, toutf(text), tip, toutf(path)))
if thread.isAlive():
return True
else:
if threading.activeCount() == 1:
self.stop_button.set_sensitive(False)
frame._mythread = None
search_hbox.set_sensitive(True)
regexp.grab_focus()
self.stbar.end()
return False
def _grep_selection_changed(self, treeview):
"""
Callback for when the user selects grep output.
"""
(path, focus) = treeview.get_cursor()
model = treeview.get_model()
if path is not None and model is not None:
iter = model.get_iter(path)
self.currev = model[iter][self.COL_REVID]
self.curpath = fromutf(model[iter][self.COL_PATH])
self.stbar.set_status_text(toutf(model[iter][self.COL_TOOLTIP]))
def _stop_current_search(self, button, widget):
num = self.notebook.get_current_page()
frame = self.notebook.get_nth_page(num)
self._stop_search(frame)
def _stop_all_searches(self):
for num in xrange(self.notebook.get_n_pages()):
frame = self.notebook.get_nth_page(num)
self._stop_search(frame)
def _stop_search(self, frame):
if hasattr(frame, '_mythread') and frame._mythread:
frame._mythread.terminate()
frame._mythread.join()
frame._mythread = None
def close_page(self, button, widget):
'''Close page button has been pressed'''
num = self.notebook.page_num(widget)
if num != -1 and self.notebook.get_n_pages() > 1:
self.notebook.remove_page(num)
def _add_header_context_menu(self, col, menu):
lb = gtk.Label(col.get_title())
lb.show()
col.set_widget(lb)
wgt = lb.get_parent()
while wgt:
if type(wgt) == gtk.Button:
wgt.connect("button-press-event",
self._tree_header_button_press, menu)
break
wgt = wgt.get_parent()
def _tree_header_button_press(self, widget, event, menu):
if event.button == 3:
menu.popup(None, None, None, event.button, event.time)
return True
return False
def add_annotate_page(self, path, revid):
'''
Add new annotation page to notebook. Start scan of
file 'path' revision history, start annotate of supplied
revision 'revid'.
'''
if revid == '.':
ctx = self.repo.workingctx().parents()[0]
try:
fctx = ctx.filectx(path)
except revlog.LookupError:
Prompt('File is unrevisioned',
'Unable to annotate ' + path, self).run()
return
rev = fctx.filelog().linkrev(fctx.filenode())
revid = str(rev)
else:
rev = long(revid)
frame = gtk.Frame()
frame.set_border_width(10)
vbox = gtk.VBox()
# File log revision graph
graphview = TreeView(self.repo, 5000, self.stbar)
graphview.connect('revisions-loaded', self.revisions_loaded, rev)
graphview.refresh(True, None, {'filehist':path, 'filerev':rev})
graphview.set_property('rev-column-visible', True)
graphview.set_property('date-column-visible', True)
hbox = gtk.HBox()
followlabel = gtk.Label('')
follow = gtk.Button('Follow')
follow.connect('clicked', self.follow_rename)
follow.hide()
follow.set_sensitive(False)
hbox.pack_start(gtk.Label(''), True, True)
hbox.pack_start(followlabel, False, False)
hbox.pack_start(follow, False, False)
# Annotation text tree view
treeview = gtk.TreeView()
treeview.get_selection().set_mode(gtk.SELECTION_SINGLE)
treeview.set_property('fixed-height-mode', True)
treeview.set_border_width(0)
treeview.connect("cursor-changed", self._ann_selection_changed)
treeview.connect('button-release-event', self._ann_button_release)
treeview.connect('popup-menu', self._ann_popup_menu)
treeview.connect('row-activated', self._ann_row_act)
results = gtk.ListStore(str, str, str, str, str, str)
treeview.set_model(results)
context_menu = self.ann_header_context_menu(treeview)
for title, width, col, emode, visible in (
('Rev', 10, self.COL_REVID, pango.ELLIPSIZE_NONE, True),
('File', 15, self.COL_PATH, pango.ELLIPSIZE_START, False),
('User', 15, self.COL_USER, pango.ELLIPSIZE_END, False),
('Matches', 80, self.COL_TEXT, pango.ELLIPSIZE_END, True)):
cell = gtk.CellRendererText()
cell.set_property("width-chars", width)
cell.set_property("ellipsize", emode)
cell.set_property("family", "Monospace")
column = gtk.TreeViewColumn(title)
column.set_resizable(True)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(cell.get_size(treeview)[2])
column.pack_start(cell, expand=True)
column.add_attribute(cell, "text", col)
column.add_attribute(cell, "background", self.COL_COLOR)
column.set_visible(visible)
treeview.append_column(column)
self._add_header_context_menu(column, context_menu)
treeview.set_headers_clickable(True)
if hasattr(treeview, 'set_tooltip_column'):
treeview.set_tooltip_column(self.COL_TOOLTIP)
results.path = path
results.rev = revid
scroller = gtk.ScrolledWindow()
scroller.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroller.add(treeview)
vpaned = gtk.VPaned()
vpaned.pack1(graphview, True, True)
vpaned.pack2(scroller, True, True)
vbox.pack_start(vpaned, True, True)
vbox.pack_start(hbox, False, False)
frame.add(vbox)
frame.show_all()
hbox = gtk.HBox()
lbl = gtk.Label(toutf(os.path.basename(path) + '@' + revid))
close = self.create_tab_close_button()
close.connect('clicked', self.close_page, frame)
hbox.pack_start(lbl, True, True, 2)
hbox.pack_start(close, False, False)
hbox.show_all()
num = self.notebook.append_page_menu(frame,
hbox, gtk.Label(toutf(path + '@' + revid)))
if hasattr(self.notebook, 'set_tab_reorderable'):
self.notebook.set_tab_reorderable(frame, True)
self.notebook.set_current_page(num)
graphview.connect('revision-selected', self.log_selection_changed,
path, followlabel, follow)
objs = (frame, treeview.get_model(), path)
graphview.treeview.connect('row-activated', self.log_activate, objs)
graphview.treeview.connect('button-release-event',
self._ann_button_release)
graphview.treeview.connect('popup-menu', self._ann_popup_menu)
def toggle_annatate_columns(self, button, treeview, col):
b = button.get_active()
treeview.get_column(col).set_visible(b)
def log_selection_changed(self, graphview, path, label, button):
row = graphview.get_revision()
rev = row[treemodel.REVID]
self.currev = str(rev)
ctx = self.repo.changectx(rev)
filectx = ctx.filectx(path)
info = filectx.renamed()
if info:
(rpath, node) = info
frev = self.repo.file(rpath).linkrev(node)
button.set_label(toutf('%s@%s' % (rpath, frev)))
button.show()
button.set_sensitive(True)
label.set_text('Follow Rename:')
else:
button.hide()
button.set_sensitive(False)
label.set_text('')
def follow_rename(self, button):
path, rev = button.get_label().rsplit('@', 1)
self.add_annotate_page(path, rev)
def log_activate(self, treeview, path, column, objs):
model = treeview.get_model()
iter = model.get_iter(path)
rev = model.get_value(iter, treemodel.REVID)
self.trigger_annotate(rev, objs)
def revisions_loaded(self, graphview, rev):
graphview.set_revision_id(rev)
treeview = graphview.treeview
path, column = treeview.get_cursor()
# It's possible that the requested change was not found in the
# file's filelog history. In that case, no row will be
# selected.
if path != None:
treeview.row_activated(path, column)
def trigger_annotate(self, rev, objs):
'''
User has selected a file revision to annotate. Trigger a
background thread to perform the annotation. Disable the select
button until this operation is complete.
'''
(frame, model, path) = objs
q = Queue.Queue()
args = [self.repo.root, q, 'annotate', '--follow', '--number',
'--rev', str(rev), path]
thread = threading.Thread(target=hgcmd_toq, args=args)
thread.start()
frame._mythread = thread
self.stop_button.set_sensitive(True)
# date of selected revision
ctx = self.repo.changectx(long(rev))
curdate = ctx.date()[0]
# date of initial revision
fctx = self.repo.filectx(path, fileid=0)
basedate = fctx.date()[0]
agedays = (curdate - basedate) / (24 * 60 * 60)
colormap = AnnotateColorSaturation(agedays)
model.clear()
self.stbar.begin()
self.stbar.set_status_text(toutf('hg ' + ' '.join(args[2:])))
hbox = gtk.HBox()
lbl = gtk.Label(toutf(os.path.basename(path) + '@' + str(rev)))
close = self.create_tab_close_button()
close.connect('clicked', self.close_page, frame)
hbox.pack_start(lbl, True, True, 2)
hbox.pack_start(close, False, False)
hbox.show_all()
self.notebook.set_tab_label(frame, hbox)
gobject.timeout_add(50, self.annotate_wait, thread, q, model,
curdate, colormap, frame)
def annotate_wait(self, thread, q, model, curdate, colormap, frame):
"""
Handle all the messages currently in the queue (if any).
"""
while q.qsize():
line = q.get(0).rstrip('\r\n')
try:
(revpath, text) = line.split(':', 1)
revid, path = revpath.lstrip().split(' ', 1)
rowrev = long(revid)
except ValueError:
continue
tip, user = self.get_rev_desc(rowrev)
ctx = self.repo.changectx(rowrev)
color = colormap.get_color(ctx, curdate)
model.append((revid, toutf(text), tip, toutf(path.strip()),
color, toutf(user)))
if thread.isAlive():
return True
else:
if threading.activeCount() == 1:
self.stop_button.set_sensitive(False)
frame._mythread = None
self.stbar.end()
return False
def _ann_selection_changed(self, treeview):
"""
User selected line of annotate output, describe revision
responsible for this line in the status bar
"""
(path, focus) = treeview.get_cursor()
model = treeview.get_model()
if path is not None and model is not None:
iter = model.get_iter(path)
self.currev = model[iter][self.COL_REVID]
self.path = model.path
self.stbar.set_status_text(model[iter][self.COL_TOOLTIP])
def _ann_button_release(self, widget, event):
if event.button == 3 and not (event.state & (gtk.gdk.SHIFT_MASK |
gtk.gdk.CONTROL_MASK)):
self._ann_popup_menu(widget, event.button, event.time)
return False
def _ann_popup_menu(self, treeview, button=0, time=0):
self.ann_cmenu.popup(None, None, None, button, time)
return True
def _ann_row_act(self, tree, path, column):
self.ann_cmenu.get_children()[0].activate()
def create_menu(label, callback):
menuitem = gtk.MenuItem(label, True)
menuitem.connect('activate', callback)
menuitem.set_border_width(1)
return menuitem
def run(root='', cwd='', files=[], **opts):
u = ui.ui()
u.updateopts(debug=False, traceback=False)
repo = hg.repository(u, path=root)
cmdoptions = {
'follow':False, 'follow-first':False, 'copies':False, 'keyword':[],
'limit':0, 'rev':[], 'removed':False, 'no_merges':False, 'date':None,
'only_merges':None, 'prune':[], 'git':False, 'verbose':False,
'include':[], 'exclude':[]
}
dialog = DataMineDialog(u, repo, cwd, files, cmdoptions, True)
dialog.display()
for f in files:
if os.path.isfile(f):
cf = util.canonpath(root, cwd, f)
dialog.add_annotate_page(cf, '.')
elif os.path.isdir(f):
Prompt('Invalid path', "Can't annotate directory: %s" % f,
dialog).run()
if not dialog.notebook.get_n_pages():
dialog.add_search_page()
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
if __name__ == "__main__":
import sys
import hglib
opts = {}
opts['cwd'] = os.getcwd()
opts['root'] = hglib.rootpath()
opts['files'] = sys.argv[1:] or []
run(**opts)
| gpl-2.0 |
Giovanni21M/Text-Playing-Game | base.py | 1 | 15226 | from sys import exit
from random import randint
from time import sleep
class Engine:
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('death')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
current_scene.enter()
class Beginning:
def enter(self):
global username
username = input("\nPlease enter your name, brave one: ")
print("\n***********************************")
print("Welcome to Text Playing Game, %s." % username)
print("Prepare to begin your journey!")
print("***********************************\n")
return 'guildhall'
class Blacksmith:
def enter(self):
print("\nYou better have coin coming into my shop, %s." % username)
while True:
daggerPurchase = Trading('dagger', None)
swordPurchase = Trading('sword', None)
choice = input("\nWill you be crafting anything today? ")
choice = choice.lower()
if choice == "yes":
print("\nWhat weapon will you be having crafted today?")
print("Minotaur's Horn Dagger - 15 coins")
print("Dragon Fang Sword - 35 coins")
while True:
choice2 = input("Weapon: ")
choice2 = choice2.lower()
if choice2 == "dagger":
daggerPurchase.purchase()
return 'guildhall'
elif choice2 == "sword":
swordPurchase.purchase()
return 'guildhall'
else:
print("We don't have that relic.")
elif choice == "no":
print("Come back wit'more coin ye cheap bastard!")
return 'guildhall'
else:
print("That's not a choice.")
class Death:
quips = [
"\nGood game.",
"\nGet rekt nerd.",
"\nGit gud.",
"\nSleep with the L."
]
def enter(self):
print(Death.quips[randint(0, len(self.quips)-1)])
exit(1)
class Dragon:
def enter(self):
print("\nBefore you can tell what it is you die.")
return 'death'
class Finished:
def enter(self):
print("\nFinished so soon?")
print("See you next time.")
return 'finished'
class GuildHall:
def enter(self):
print("\nWelcome to the Guild Hall, %s." % username)
print("I am your quest guide, Herold. What would you like to do?\n")
print("Go on quest.")
print("Visit the blacksmith.")
while True:
choice = input("Well...? ")
choice = choice.lower()
if choice == "quest":
print("\nWhere does your adventure take you\n")
print("Maze")
print("Mountain")
while True:
choice2 = input("")
choice2 = choice2.lower()
if choice2 == "maze":
return 'maze'
elif choice2 == "mountain":
return 'mountain'
else:
print("\nThere are beasts to be slain, hurry and choose!\n")
elif choice == "blacksmith":
return 'blacksmith'
else:
print("\nThat's not an option.\n")
class Maze:
def enter(self):
print("\nYou enter Pandora's Labyrinth")
print("and hear a light roar coming from inside.")
while True:
choice = input("Do you go turn back or go deeper? ")
choice = choice.lower()
if choice == "back":
return 'guildhall'
elif choice == "deeper":
while True:
print("\nThere are now three directions to choose from.")
choice2 = input("Do you go left, right, or straight? ")
choice2 = choice2.lower()
if choice2 == "left":
print("\nYou've decided to go down the left path.")
print("You head deeper and deeper,")
print("the cries are getting louder and louder.")
while True:
choice3 = input("Head towards the roar or light? ")
choice3 == choice3.lower()
if choice3 == "light":
return 'mountain'
elif choice3 == "roar":
return 'minotaur'
else:
print("\nMAKE YOUR CHOICE BEFORE IT'S TOO LATE!\n")
elif choice2 == "right":
print("\nYou've decided to make the right choice")
print("and see a light up ahead. You walk into it.")
return 'maze'
elif choice2 == "straight":
print("\nYou're heading deeper and deeper.")
print("It's now getting hotter as well.")
print("You've fallen through a hole in the floor.")
return 'guildhall'
else:
print("\nMake your choice, dying soul.\n")
else:
print("\nAre you so scared of the unknown?\n")
class Minotaur:
def enter(self):
print("\nYou've encountered the mighty Minotaur,")
print("get ready to engage in battle.")
while (
(Battle.characters['hero']['hp'] != 0) or
(Battle.characters['minotaur']['hp'] != 0)
):
dmg = Battle('minotaur')
gainxp = Leveling('minotaur')
coin = Trading(None, 'minotaur')
choice = input("\nDo you attack? ")
choice = choice.lower()
if (choice == "yes") or (choice == "attack"):
dmg.player_damage()
print("Enemy HP: ", Battle.characters['minotaur']['hp'])
elif choice == "no":
dmg.enemy_damage()
print("Your HP: ", Battle.characters['hero']['hp'])
else:
print("\nYou left yourself open!\n")
dmg.enemy_damage()
print("Your HP: ", Battle.characters['hero']['hp'])
if Battle.characters['hero']['hp'] <= 0:
print("\nYou have been slain by the mighty Minotaur!.")
return 'death'
elif Battle.characters['minotaur']['hp'] <= 0:
Battle.characters['hero']['hp'] = 10
Battle.characters['minotaur']['hp'] = 13
print("\n****************************")
print("You have slain the Minotaur!")
gainxp.exp_boost()
coin.currency_earn()
print("****************************")
return 'guildhall'
class Mountain:
def enter(self):
print("\nThere are two paths to choose from, do so.")
choice = input("Left or right? ")
choice = choice.lower()
while True:
if choice == "left":
print("\nHalf way up you lose your footing and fall off.")
return 'death'
elif choice == "right":
print("\nA gust of wind almost blows you off the mountain.")
print("You brace yourself and soon notice the sky")
print("is engulfed in darkness.")
return 'dragon'
else:
print("\nYou spot bandits in the distance.")
sleep(1)
print("*thump*")
sleep(1)
print("*thump*")
sleep(1)
print("*thump*")
print("They're getting closer.")
sleep(1)
print("*thump*")
sleep(1)
print("The bandits have now surrounded you.")
luck = randint(0,10)
if luck % 2 == 0:
print("\nYou easily slay 3 of them.")
print("Two of them try running away, you grab one")
print("of their bows and shoot them both down.")
print("You've made it out alive.")
return 'mountain'
else:
print("\nThe bandits kill you and take your belongings.")
return 'death'
class Map:
scenes = {
'beginning': Beginning(),
'blacksmith': Blacksmith(),
'death': Death(),
'dragon': Dragon(),
'finished': Finished(),
'guildhall': GuildHall(),
'maze': Maze(),
'minotaur': Minotaur(),
'mountain': Mountain(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
class Battle:
characters = {
'hero' : {
'hp' : 10,
'attack' : randint(8,12),
'defense' : 10,
'level': 1,
'experience': 0,
'equipment': None,
'currency': 0
},
'minotaur' : {
'hp' : 13,
'attack' : randint(9,14),
'defense' : 9
},
'dragon' : {
'hp' : 50,
'attack' : randint(25,41),
'defense' : 30
}
}
def __init__(self, enemy_data):
self.enemy_data = enemy_data
def enemy_damage(self):
enemy_atk = Battle.characters[self.enemy_data]['attack']
player_def = Battle.characters['hero']['defense']
if enemy_atk > player_def:
enemy_dps = enemy_atk - player_def
Battle.characters['hero']['hp'] -= enemy_dps
else:
Battle.characters['hero']['hp'] -= 1
def player_damage(self):
if Battle.characters['hero']['equipment'] == 'dagger':
Battle.characters['hero']['attack'] += 8
player_atk = Battle.characters['hero']['attack']
elif Battle.characters['hero']['equipment'] == 'sword':
Battle.characters['hero']['attack'] += 25
player_atk = Battle.characters['hero']['attack']
else:
player_atk = Battle.characters['hero']['attack']
enemy_def = Battle.characters[self.enemy_data]['defense']
if player_atk > enemy_def:
player_dps = player_atk - enemy_def
Battle.characters[self.enemy_data]['hp'] -= player_dps
else:
Battle.characters[self.enemy_data]['hp'] -= 1
class Leveling:
def __init__(self, char_exp):
self.char_exp = char_exp
def stat_increase(x, y):
Battle.characters['hero']['hp'] += Battle.characters['hero']['hp'] / 2
Battle.characters['hero']['attack'] = randint(x,y)
Battle.characters['hero']['defense'] += Battle.characters['hero']['defense'] / 2
print("HP: ", Battle.characters['hero']['hp'])
print("Atk: ", Battle.characters['hero']['attack'])
print("Def: ", Battle.characters['hero']['defense'])
def level_up():
Battle.characters['hero']['level'] += 1
print("\n********************")
print("You are now level", Battle.characters['hero']['level'])
print("********************")
if Battle.characters['hero']['level'] == 2:
Leveling.stat_increase(13, 18)
elif Battle.characters['hero']['level'] == 3:
Leveling.stat_increase(17, 24)
elif Battle.characters['hero']['level'] == 4:
Leveling.stat_increase(30, 45)
def exp_reset(value):
Battle.characters['hero']['experience'] = 0 + value
print("\nExperience: ", Battle.characters['hero']['experience'])
def extra_exp(exp):
global extra
extra = Battle.characters['hero']['experience'] - exp
def exp_boost(self):
if self.char_exp == 'minotaur':
Battle.characters['hero']['experience'] += 5
print("You gain 5 experience points.")
elif self.char_exp == 'dragon':
Battle.characters['hero']['experience'] += 15
print("You gain 15 experience points.")
if (
(Battle.characters['hero']['experience'] >= 20) and
(Battle.characters['hero']['level'] == 1)
):
Leveling.extra_exp(20)
Leveling.level_up()
Leveling.exp_reset(extra)
elif (
(Battle.characters['hero']['experience'] >= 50) and
(Battle.characters['hero']['level'] == 2)
):
Leveling.extra_exp(50)
Leveling.level_up()
Leveling.exp_reset(extra)
elif (
(Battle.characters['hero']['experience'] >= 80) and
(Battle.characters['hero']['level'] == 3)
):
Leveling.extra_exp(80)
Leveling.level_up()
Leveling.exp_reset(extra)
class Trading:
def __init__(self, relic, enemy):
self.relic = relic
self.enemy = enemy
def leftover(self):
dagger = "Minotaur's Horn Dagger"
sword = "Dragon Fang Sword"
if self.relic == 'dagger':
print("\nYou have purchased the %s!" % dagger)
print("You have spent 15 coins on this relic.")
elif self.relic == 'sword':
print("\nYou have purchased the %s! %s!" % sword)
print("You have spent 35 coins on this relic.")
print("You have %g coins leftover." % Battle.characters['hero']['currency'])
def currency_earn(self):
if self.enemy == 'minotaur':
Battle.characters['hero']['currency'] += randint(2,6)
elif self.enemy == 'dragon':
Battle.characters['hero']['currency'] += randint(9,16)
print("You currency is ", Battle.characters['hero']['currency'])
def purchase(self):
relicDagger = Trading('dagger', None)
relicSword = Trading('sword', None)
if (
(Battle.characters['hero']['currency'] >= 15) and
(self.relic == 'dagger')
):
Battle.characters['hero']['currency'] -= 15
Battle.characters['hero']['equipment'] = 'dagger'
relicDagger.leftover()
elif (
(Battle.characters['hero']['currency'] >= 35) and
(self.relic == 'sword')
):
Battle.characters['hero']['currency'] -= 35
Battle.characters['hero']['equipment'] = 'sword'
relicSword.leftover()
else:
print("\nYou don't have enough coins for this relic.")
a_map = Map('beginning')
a_game = Engine(a_map)
a_game.play()
| mit |
iulian787/spack | lib/spack/spack/test/spec_syntax.py | 2 | 29060 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
import shlex
import llnl.util.filesystem as fs
import spack.hash_types as ht
import spack.repo
import spack.store
import spack.spec as sp
from spack.parse import Token
from spack.spec import Spec
from spack.spec import SpecParseError, RedundantSpecError
from spack.spec import AmbiguousHashError, InvalidHashError, NoSuchHashError
from spack.spec import DuplicateArchitectureError
from spack.spec import DuplicateDependencyError, DuplicateCompilerSpecError
from spack.spec import SpecFilenameError, NoSuchSpecFileError
from spack.spec import MultipleVersionError
from spack.variant import DuplicateVariantError
# Sample output for a complex lexing.
complex_lex = [Token(sp.ID, 'mvapich_foo'),
Token(sp.DEP),
Token(sp.ID, '_openmpi'),
Token(sp.AT),
Token(sp.ID, '1.2'),
Token(sp.COLON),
Token(sp.ID, '1.4'),
Token(sp.COMMA),
Token(sp.ID, '1.6'),
Token(sp.PCT),
Token(sp.ID, 'intel'),
Token(sp.AT),
Token(sp.ID, '12.1'),
Token(sp.COLON),
Token(sp.ID, '12.6'),
Token(sp.ON),
Token(sp.ID, 'debug'),
Token(sp.OFF),
Token(sp.ID, 'qt_4'),
Token(sp.DEP),
Token(sp.ID, 'stackwalker'),
Token(sp.AT),
Token(sp.ID, '8.1_1e')]
# Another sample lexer output with a kv pair.
kv_lex = [Token(sp.ID, 'mvapich_foo'),
Token(sp.ID, 'debug'),
Token(sp.EQ),
Token(sp.VAL, '4'),
Token(sp.DEP),
Token(sp.ID, '_openmpi'),
Token(sp.AT),
Token(sp.ID, '1.2'),
Token(sp.COLON),
Token(sp.ID, '1.4'),
Token(sp.COMMA),
Token(sp.ID, '1.6'),
Token(sp.PCT),
Token(sp.ID, 'intel'),
Token(sp.AT),
Token(sp.ID, '12.1'),
Token(sp.COLON),
Token(sp.ID, '12.6'),
Token(sp.ON),
Token(sp.ID, 'debug'),
Token(sp.OFF),
Token(sp.ID, 'qt_4'),
Token(sp.DEP),
Token(sp.ID, 'stackwalker'),
Token(sp.AT),
Token(sp.ID, '8.1_1e')]
class TestSpecSyntax(object):
# ========================================================================
# Parse checks
# ========================================================================
def check_parse(self, expected, spec=None):
"""Assert that the provided spec is able to be parsed.
If this is called with one argument, it assumes that the
string is canonical (i.e., no spaces and ~ instead of - for
variants) and that it will convert back to the string it came
from.
If this is called with two arguments, the first argument is
the expected canonical form and the second is a non-canonical
input to be parsed.
"""
if spec is None:
spec = expected
output = sp.parse(spec)
parsed = (" ".join(str(spec) for spec in output))
assert expected == parsed
def check_lex(self, tokens, spec):
"""Check that the provided spec parses to the provided token list."""
spec = shlex.split(str(spec))
lex_output = sp.SpecLexer().lex(spec)
assert len(tokens) == len(lex_output), "unexpected number of tokens"
for tok, spec_tok in zip(tokens, lex_output):
if tok.type == sp.ID or tok.type == sp.VAL:
assert tok == spec_tok
else:
# Only check the type for non-identifiers.
assert tok.type == spec_tok.type
def _check_raises(self, exc_type, items):
for item in items:
with pytest.raises(exc_type):
print("CHECKING: ", item, "=======================")
Spec(item)
# ========================================================================
# Parse checks
# ========================================================================
def test_package_names(self):
self.check_parse("mvapich")
self.check_parse("mvapich_foo")
self.check_parse("_mvapich_foo")
def test_anonymous_specs(self):
self.check_parse("%intel")
self.check_parse("@2.7")
self.check_parse("^zlib")
self.check_parse("+foo")
self.check_parse("arch=test-None-None", "platform=test")
self.check_parse('@2.7:')
def test_anonymous_specs_with_multiple_parts(self):
# Parse anonymous spec with multiple tokens
self.check_parse('@4.2: languages=go', 'languages=go @4.2:')
self.check_parse('@4.2: languages=go')
def test_simple_dependence(self):
self.check_parse("openmpi ^hwloc")
self.check_parse("openmpi ^hwloc", "openmpi^hwloc")
self.check_parse("openmpi ^hwloc ^libunwind")
self.check_parse("openmpi ^hwloc ^libunwind",
"openmpi^hwloc^libunwind")
def test_version_after_compiler(self):
self.check_parse('[email protected]%[email protected]', 'foo %[email protected] @2.0')
def test_dependencies_with_versions(self):
self.check_parse("openmpi ^[email protected]")
self.check_parse("openmpi ^[email protected]:")
self.check_parse("openmpi ^hwloc@:1.4b7-rc3")
self.check_parse("openmpi ^[email protected]:1.4b7-rc3")
def test_multiple_specs(self):
self.check_parse("mvapich emacs")
def test_multiple_specs_after_kv(self):
self.check_parse('mvapich cppflags="-O3 -fPIC" emacs')
self.check_parse('mvapich cflags="-O3" emacs',
'mvapich cflags=-O3 emacs')
def test_multiple_specs_long_second(self):
self.check_parse('mvapich [email protected]%intel cflags="-O3"',
'mvapich emacs @1.1.1 %intel cflags=-O3')
self.check_parse('mvapich cflags="-O3 -fPIC" emacs ^ncurses%intel')
self.check_parse('mvapich cflags="-O3 -fPIC" emacs ^ncurses%intel',
'mvapich cflags="-O3 -fPIC" emacs^ncurses%intel')
def test_full_specs(self):
self.check_parse(
"mvapich_foo"
" ^[email protected]:1.4,1.6%[email protected]+debug~qt_4"
" ^[email protected]_1e")
self.check_parse(
"mvapich_foo"
" ^[email protected]:1.4,1.6%[email protected]~qt_4 debug=2"
" ^[email protected]_1e")
self.check_parse(
'mvapich_foo'
' ^[email protected]:1.4,1.6%[email protected] cppflags="-O3" +debug~qt_4'
' ^[email protected]_1e')
self.check_parse(
"mvapich_foo"
" ^[email protected]:1.4,1.6%[email protected]~qt_4 debug=2"
" ^[email protected]_1e arch=test-redhat6-x86")
def test_yaml_specs(self):
self.check_parse(
"[email protected]%[email protected]"
" ^[email protected]")
tempspec = r"builtin.yaml-cpp%gcc"
self.check_parse(
tempspec.strip("builtin."),
spec=tempspec)
tempspec = r"testrepo.yaml-cpp%gcc"
self.check_parse(
tempspec.strip("testrepo."),
spec=tempspec)
tempspec = r"[email protected]%gcc"
self.check_parse(
tempspec.strip("builtin."),
spec=tempspec)
tempspec = r"[email protected]%[email protected]"
self.check_parse(
tempspec.strip("builtin."),
spec=tempspec)
tempspec = r"[email protected]%[email protected]" \
r" ^[email protected]"
self.check_parse(
tempspec.strip("builtin."),
spec=tempspec)
def test_canonicalize(self):
self.check_parse(
"mvapich_foo"
" ^[email protected]:1.4,1.6%[email protected]:12.6+debug~qt_4"
" ^[email protected]_1e",
"mvapich_foo "
"^[email protected],1.2:1.4%[email protected]:12.6+debug~qt_4 "
"^[email protected]_1e")
self.check_parse(
"mvapich_foo"
" ^[email protected]:1.4,1.6%[email protected]:12.6+debug~qt_4"
" ^[email protected]_1e",
"mvapich_foo "
"^[email protected]_1e "
"^[email protected],1.2:1.4%[email protected]:12.6~qt_4+debug")
self.check_parse(
"x ^y@1,2:3,4%intel@1,2,3,4+a~b+c~d+e~f",
"x ^y~f+e~d+c~b+a@4,2:3,1%intel@4,3,2,1")
self.check_parse(
"x arch=test-redhat6-None"
" ^y arch=test-None-core2"
" ^z arch=linux-None-None",
"x os=fe "
"^y target=be "
"^z platform=linux")
self.check_parse(
"x arch=test-debian6-core2"
" ^y arch=test-debian6-core2",
"x os=default_os target=default_target"
" ^y os=default_os target=default_target")
self.check_parse("x ^y", "x@: ^y@:")
def test_parse_errors(self):
errors = ['x@@1.2', 'x ^y@@1.2', '[email protected]::', 'x::']
self._check_raises(SpecParseError, errors)
def _check_hash_parse(self, spec):
"""Check several ways to specify a spec by hash."""
# full hash
self.check_parse(str(spec), '/' + spec.dag_hash())
# partial hash
self.check_parse(str(spec), '/ ' + spec.dag_hash()[:5])
# name + hash
self.check_parse(str(spec), spec.name + '/' + spec.dag_hash())
# name + version + space + partial hash
self.check_parse(
str(spec), spec.name + '@' + str(spec.version) +
' /' + spec.dag_hash()[:6])
@pytest.mark.db
def test_spec_by_hash(self, database):
specs = database.query()
assert len(specs) # make sure something's in the DB
for spec in specs:
self._check_hash_parse(spec)
@pytest.mark.db
def test_dep_spec_by_hash(self, database):
mpileaks_zmpi = database.query_one('mpileaks ^zmpi')
zmpi = database.query_one('zmpi')
fake = database.query_one('fake')
assert 'fake' in mpileaks_zmpi
assert 'zmpi' in mpileaks_zmpi
mpileaks_hash_fake = sp.Spec('mpileaks ^/' + fake.dag_hash())
assert 'fake' in mpileaks_hash_fake
assert mpileaks_hash_fake['fake'] == fake
mpileaks_hash_zmpi = sp.Spec(
'mpileaks %' + str(mpileaks_zmpi.compiler) +
' ^ / ' + zmpi.dag_hash())
assert 'zmpi' in mpileaks_hash_zmpi
assert mpileaks_hash_zmpi['zmpi'] == zmpi
assert mpileaks_hash_zmpi.compiler == mpileaks_zmpi.compiler
mpileaks_hash_fake_and_zmpi = sp.Spec(
'mpileaks ^/' + fake.dag_hash()[:4] + '^ / ' + zmpi.dag_hash()[:5])
assert 'zmpi' in mpileaks_hash_fake_and_zmpi
assert mpileaks_hash_fake_and_zmpi['zmpi'] == zmpi
assert 'fake' in mpileaks_hash_fake_and_zmpi
assert mpileaks_hash_fake_and_zmpi['fake'] == fake
@pytest.mark.db
def test_multiple_specs_with_hash(self, database):
mpileaks_zmpi = database.query_one('mpileaks ^zmpi')
callpath_mpich2 = database.query_one('callpath ^mpich2')
# name + hash + separate hash
specs = sp.parse('mpileaks /' + mpileaks_zmpi.dag_hash() +
'/' + callpath_mpich2.dag_hash())
assert len(specs) == 2
# 2 separate hashes
specs = sp.parse('/' + mpileaks_zmpi.dag_hash() +
'/' + callpath_mpich2.dag_hash())
assert len(specs) == 2
# 2 separate hashes + name
specs = sp.parse('/' + mpileaks_zmpi.dag_hash() +
'/' + callpath_mpich2.dag_hash() +
' callpath')
assert len(specs) == 3
# hash + 2 names
specs = sp.parse('/' + mpileaks_zmpi.dag_hash() +
' callpath' +
' callpath')
assert len(specs) == 3
# hash + name + hash
specs = sp.parse('/' + mpileaks_zmpi.dag_hash() +
' callpath' +
' / ' + callpath_mpich2.dag_hash())
assert len(specs) == 2
@pytest.mark.db
def test_ambiguous_hash(self, mutable_database):
x1 = Spec('a')
x1.concretize()
x1._hash = 'xy'
x2 = Spec('a')
x2.concretize()
x2._hash = 'xx'
mutable_database.add(x1, spack.store.layout)
mutable_database.add(x2, spack.store.layout)
# ambiguity in first hash character
self._check_raises(AmbiguousHashError, ['/x'])
# ambiguity in first hash character AND spec name
self._check_raises(AmbiguousHashError, ['a/x'])
@pytest.mark.db
def test_invalid_hash(self, database):
mpileaks_zmpi = database.query_one('mpileaks ^zmpi')
zmpi = database.query_one('zmpi')
mpileaks_mpich = database.query_one('mpileaks ^mpich')
mpich = database.query_one('mpich')
# name + incompatible hash
self._check_raises(InvalidHashError, [
'zmpi /' + mpich.dag_hash(),
'mpich /' + zmpi.dag_hash()])
# name + dep + incompatible hash
self._check_raises(InvalidHashError, [
'mpileaks ^mpich /' + mpileaks_zmpi.dag_hash(),
'mpileaks ^zmpi /' + mpileaks_mpich.dag_hash()])
@pytest.mark.db
def test_nonexistent_hash(self, database):
"""Ensure we get errors for nonexistant hashes."""
specs = database.query()
# This hash shouldn't be in the test DB. What are the odds :)
no_such_hash = 'aaaaaaaaaaaaaaa'
hashes = [s._hash for s in specs]
assert no_such_hash not in [h[:len(no_such_hash)] for h in hashes]
self._check_raises(NoSuchHashError, [
'/' + no_such_hash,
'mpileaks /' + no_such_hash])
@pytest.mark.db
def test_redundant_spec(self, database):
"""Check that redundant spec constraints raise errors.
TODO (TG): does this need to be an error? Or should concrete
specs only raise errors if constraints cause a contradiction?
"""
mpileaks_zmpi = database.query_one('mpileaks ^zmpi')
callpath_zmpi = database.query_one('callpath ^zmpi')
dyninst = database.query_one('dyninst')
mpileaks_mpich2 = database.query_one('mpileaks ^mpich2')
redundant_specs = [
# redudant compiler
'/' + mpileaks_zmpi.dag_hash() + '%' + str(mpileaks_zmpi.compiler),
# redudant version
'mpileaks/' + mpileaks_mpich2.dag_hash() +
'@' + str(mpileaks_mpich2.version),
# redundant dependency
'callpath /' + callpath_zmpi.dag_hash() + '^ libelf',
# redundant flags
'/' + dyninst.dag_hash() + ' cflags="-O3 -fPIC"']
self._check_raises(RedundantSpecError, redundant_specs)
def test_duplicate_variant(self):
duplicates = [
'[email protected]+debug+debug',
'x ^[email protected]+debug debug=true',
'x ^[email protected] debug=false debug=true',
'x ^[email protected] debug=false ~debug'
]
self._check_raises(DuplicateVariantError, duplicates)
def test_multiple_versions(self):
multiples = [
'[email protected]@2.3',
'[email protected]:[email protected]',
'[email protected]@2.3:2.4',
'[email protected]@2.3,2.4',
'[email protected] +foo~bar @2.3',
'[email protected]%[email protected]@2.3:2.4',
]
self._check_raises(MultipleVersionError, multiples)
def test_duplicate_dependency(self):
self._check_raises(DuplicateDependencyError, ["x ^y ^y"])
def test_duplicate_compiler(self):
duplicates = [
"x%intel%intel",
"x%intel%gcc",
"x%gcc%intel",
"x ^y%intel%intel",
"x ^y%intel%gcc",
"x ^y%gcc%intel"
]
self._check_raises(DuplicateCompilerSpecError, duplicates)
def test_duplicate_architecture(self):
duplicates = [
"x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64",
"x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le",
"x arch=linux-rhel7-ppc64le arch=linux-rhel7-x86_64",
"y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64",
"y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le"
]
self._check_raises(DuplicateArchitectureError, duplicates)
def test_duplicate_architecture_component(self):
duplicates = [
"x os=fe os=fe",
"x os=fe os=be",
"x target=fe target=fe",
"x target=fe target=be",
"x platform=test platform=test",
"x os=fe platform=test target=fe os=fe",
"x target=be platform=test os=be os=fe"
]
self._check_raises(DuplicateArchitectureError, duplicates)
@pytest.mark.usefixtures('config')
def test_parse_yaml_simple(self, mock_packages, tmpdir):
s = Spec('libdwarf')
s.concretize()
specfile = tmpdir.join('libdwarf.yaml')
with specfile.open('w') as f:
f.write(s.to_yaml(hash=ht.build_hash))
# Check an absolute path to spec.yaml by itself:
# "spack spec /path/to/libdwarf.yaml"
specs = sp.parse(specfile.strpath)
assert len(specs) == 1
# Check absolute path to spec.yaml mixed with a clispec, e.g.:
# "spack spec mvapich_foo /path/to/libdwarf.yaml"
specs = sp.parse('mvapich_foo {0}'.format(specfile.strpath))
assert len(specs) == 2
@pytest.mark.usefixtures('config')
def test_parse_filename_missing_slash_as_spec(self, mock_packages, tmpdir):
"""Ensure that libelf.yaml parses as a spec, NOT a file."""
s = Spec('libelf')
s.concretize()
specfile = tmpdir.join('libelf.yaml')
# write the file to the current directory to make sure it exists,
# and that we still do not parse the spec as a file.
with specfile.open('w') as f:
f.write(s.to_yaml(hash=ht.build_hash))
# Check the spec `libelf.yaml` in the working directory, which
# should evaluate to a spec called `yaml` in the `libelf`
# namespace, NOT a spec for `libelf`.
with tmpdir.as_cwd():
specs = sp.parse("libelf.yaml")
assert len(specs) == 1
spec = specs[0]
assert spec.name == "yaml"
assert spec.namespace == "libelf"
assert spec.fullname == "libelf.yaml"
# check that if we concretize this spec, we get a good error
# message that mentions we might've meant a file.
with pytest.raises(spack.repo.UnknownPackageError) as exc_info:
spec.concretize()
assert exc_info.value.long_message
assert ("Did you mean to specify a filename with './libelf.yaml'?"
in exc_info.value.long_message)
# make sure that only happens when the spec ends in yaml
with pytest.raises(spack.repo.UnknownPackageError) as exc_info:
Spec('builtin.mock.doesnotexist').concretize()
assert (
not exc_info.value.long_message or (
"Did you mean to specify a filename with" not in
exc_info.value.long_message
)
)
@pytest.mark.usefixtures('config')
def test_parse_yaml_dependency(self, mock_packages, tmpdir):
s = Spec('libdwarf')
s.concretize()
specfile = tmpdir.join('libelf.yaml')
with specfile.open('w') as f:
f.write(s['libelf'].to_yaml(hash=ht.build_hash))
print("")
print("")
print("PARSING HERE")
# Make sure we can use yaml path as dependency, e.g.:
# "spack spec libdwarf ^ /path/to/libelf.yaml"
specs = sp.parse('libdwarf ^ {0}'.format(specfile.strpath))
assert len(specs) == 1
@pytest.mark.usefixtures('config')
def test_parse_yaml_relative_paths(self, mock_packages, tmpdir):
s = Spec('libdwarf')
s.concretize()
specfile = tmpdir.join('libdwarf.yaml')
with specfile.open('w') as f:
f.write(s.to_yaml(hash=ht.build_hash))
file_name = specfile.basename
parent_dir = os.path.basename(specfile.dirname)
# Relative path to specfile
with fs.working_dir(specfile.dirname):
# Test for command like: "spack spec libelf.yaml"
# This should parse a single spec, but should not concretize.
# See test_parse_filename_missing_slash_as_spec()
specs = sp.parse('{0}'.format(file_name))
assert len(specs) == 1
# Make sure this also works: "spack spec ./libelf.yaml"
specs = sp.parse('./{0}'.format(file_name))
assert len(specs) == 1
# Should also be accepted: "spack spec ../<cur-dir>/libelf.yaml"
specs = sp.parse('../{0}/{1}'.format(parent_dir, file_name))
assert len(specs) == 1
# Should also handle mixed clispecs and relative paths, e.g.:
# "spack spec mvapich_foo ../<cur-dir>/libelf.yaml"
specs = sp.parse('mvapich_foo ../{0}/{1}'.format(
parent_dir, file_name))
assert len(specs) == 2
@pytest.mark.usefixtures('config')
def test_parse_yaml_relative_subdir_path(self, mock_packages, tmpdir):
s = Spec('libdwarf')
s.concretize()
specfile = tmpdir.mkdir('subdir').join('libdwarf.yaml')
with specfile.open('w') as f:
f.write(s.to_yaml(hash=ht.build_hash))
file_name = specfile.basename
# Relative path to specfile
with tmpdir.as_cwd():
assert os.path.exists('subdir/{0}'.format(file_name))
# Test for command like: "spack spec libelf.yaml"
specs = sp.parse('subdir/{0}'.format(file_name))
assert len(specs) == 1
@pytest.mark.usefixtures('config')
def test_parse_yaml_dependency_relative_paths(self, mock_packages, tmpdir):
s = Spec('libdwarf')
s.concretize()
specfile = tmpdir.join('libelf.yaml')
with specfile.open('w') as f:
f.write(s['libelf'].to_yaml(hash=ht.build_hash))
file_name = specfile.basename
parent_dir = os.path.basename(specfile.dirname)
# Relative path to specfile
with fs.working_dir(specfile.dirname):
# Test for command like: "spack spec libelf.yaml"
specs = sp.parse('libdwarf^{0}'.format(file_name))
assert len(specs) == 1
# Make sure this also works: "spack spec ./libelf.yaml"
specs = sp.parse('libdwarf^./{0}'.format(file_name))
assert len(specs) == 1
# Should also be accepted: "spack spec ../<cur-dir>/libelf.yaml"
specs = sp.parse('libdwarf^../{0}/{1}'.format(
parent_dir, file_name))
assert len(specs) == 1
def test_parse_yaml_error_handling(self):
self._check_raises(NoSuchSpecFileError, [
# Single spec that looks like a yaml path
'/bogus/path/libdwarf.yaml',
'../../libdwarf.yaml',
'./libdwarf.yaml',
# Dependency spec that looks like a yaml path
'libdwarf^/bogus/path/libelf.yaml',
'libdwarf ^../../libelf.yaml',
'libdwarf^ ./libelf.yaml',
# Multiple specs, one looks like a yaml path
'mvapich_foo /bogus/path/libelf.yaml',
'mvapich_foo ../../libelf.yaml',
'mvapich_foo ./libelf.yaml',
])
def test_nice_error_for_no_space_after_spec_filename(self):
"""Ensure that omitted spaces don't give weird errors about hashes."""
self._check_raises(SpecFilenameError, [
'/bogus/path/libdwarf.yamlfoobar',
'libdwarf^/bogus/path/libelf.yamlfoobar ^/path/to/bogus.yaml',
])
@pytest.mark.usefixtures('config')
def test_yaml_spec_not_filename(self, mock_packages, tmpdir):
with pytest.raises(spack.repo.UnknownPackageError):
Spec('builtin.mock.yaml').concretize()
with pytest.raises(spack.repo.UnknownPackageError):
Spec('builtin.mock.yamlfoobar').concretize()
@pytest.mark.usefixtures('config')
def test_parse_yaml_variant_error(self, mock_packages, tmpdir):
s = Spec('a')
s.concretize()
specfile = tmpdir.join('a.yaml')
with specfile.open('w') as f:
f.write(s.to_yaml(hash=ht.build_hash))
with pytest.raises(RedundantSpecError):
# Trying to change a variant on a concrete spec is an error
sp.parse('{0} ~bvv'.format(specfile.strpath))
# ========================================================================
# Lex checks
# ========================================================================
def test_ambiguous(self):
# This first one is ambiguous because - can be in an identifier AND
# indicate disabling an option.
with pytest.raises(AssertionError):
self.check_lex(
complex_lex,
"mvapich_foo"
"^[email protected]:1.4,1.6%[email protected]:12.6+debug-qt_4"
"^[email protected]_1e"
)
# The following lexes are non-ambiguous (add a space before -qt_4)
# and should all result in the tokens in complex_lex
def test_minimal_spaces(self):
self.check_lex(
complex_lex,
"mvapich_foo"
"^[email protected]:1.4,1.6%[email protected]:12.6+debug -qt_4"
"^[email protected]_1e")
self.check_lex(
complex_lex,
"mvapich_foo"
"^[email protected]:1.4,1.6%[email protected]:12.6+debug~qt_4"
"^[email protected]_1e")
def test_spaces_between_dependences(self):
self.check_lex(
complex_lex,
"mvapich_foo "
"^[email protected]:1.4,1.6%[email protected]:12.6+debug -qt_4 "
"^stackwalker @ 8.1_1e")
self.check_lex(
complex_lex,
"mvapich_foo "
"^[email protected]:1.4,1.6%[email protected]:12.6+debug~qt_4 "
"^stackwalker @ 8.1_1e")
def test_spaces_between_options(self):
self.check_lex(
complex_lex,
"mvapich_foo "
"^_openmpi @1.2:1.4,1.6 %intel @12.1:12.6 +debug -qt_4 "
"^stackwalker @8.1_1e")
def test_way_too_many_spaces(self):
self.check_lex(
complex_lex,
"mvapich_foo "
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 "
"^ stackwalker @ 8.1_1e")
self.check_lex(
complex_lex,
"mvapich_foo "
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug ~ qt_4 "
"^ stackwalker @ 8.1_1e")
def test_kv_with_quotes(self):
self.check_lex(
kv_lex,
"mvapich_foo debug='4' "
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 "
"^ stackwalker @ 8.1_1e")
self.check_lex(
kv_lex,
'mvapich_foo debug="4" '
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 "
"^ stackwalker @ 8.1_1e")
self.check_lex(
kv_lex,
"mvapich_foo 'debug = 4' "
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 "
"^ stackwalker @ 8.1_1e")
def test_kv_without_quotes(self):
self.check_lex(
kv_lex,
"mvapich_foo debug=4 "
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 "
"^ stackwalker @ 8.1_1e")
def test_kv_with_spaces(self):
self.check_lex(
kv_lex,
"mvapich_foo debug = 4 "
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 "
"^ stackwalker @ 8.1_1e")
self.check_lex(
kv_lex,
"mvapich_foo debug =4 "
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 "
"^ stackwalker @ 8.1_1e")
self.check_lex(
kv_lex,
"mvapich_foo debug= 4 "
"^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 "
"^ stackwalker @ 8.1_1e")
@pytest.mark.parametrize('expected_tokens,spec_string', [
([Token(sp.ID, 'target'),
Token(sp.EQ, '='),
Token(sp.VAL, 'broadwell')],
'target=broadwell'),
([Token(sp.ID, 'target'),
Token(sp.EQ, '='),
Token(sp.VAL, ':broadwell,icelake')],
'target=:broadwell,icelake')
])
def test_target_tokenization(self, expected_tokens, spec_string):
self.check_lex(expected_tokens, spec_string)
| lgpl-2.1 |
stratus-ss/python_scripts | archive/warfile_deployment/deploy_commands.py | 1 | 7371 | import os
import time
import subprocess
class CurlWarfile:
def __init__(self, server_name, warfile_location, **tomcat_information):
self.predeployed_warfile_hashes = []
deployment_path = "path=/%s" % warfile_location.split("/")[-1].replace("#", "/").replace(".war", "")
warfile_name = deployment_path.split("/")[-1]
# A different curl command is required depending on whether the tomcat version is 6 or 7
if tomcat_information['tomcat_version'] == "7":
deploy_command = '/usr/bin/curl -u%s:%s --anyauth --upload-file %s ' \
'--url "http://%s:%s/manager/text/deploy?%s" -w "Deployed %s"' %\
(tomcat_information['tomcat_user'], tomcat_information['tomcat_password'],
warfile_location, server_name, tomcat_information['tomcat_port'], deployment_path,
warfile_name)
undeploy_command = '/usr/bin/curl -u%s:%s --url "http://%s:%s/manager/text/undeploy?%s" -w "Deleted %s "' %\
(tomcat_information['tomcat_user'], tomcat_information['tomcat_password'], server_name,
tomcat_information['tomcat_port'], deployment_path, warfile_name)
else:
deploy_command = '/usr/bin/curl -u%s:%s --anyauth --form deployWar=@%s ' \
'--url http://%s:%s/manager/html/upload -w "Deployed %s "' %\
(tomcat_information['tomcat_user'], tomcat_information['tomcat_password'],
warfile_location, server_name, tomcat_information['tomcat_port'], warfile_name)
undeploy_command = '/usr/bin/curl -u%s:%s --url "http://%s:%s/manager/html/undeploy?%s" -w \
"Deleted %s "' % (tomcat_information['tomcat_user'],
tomcat_information['tomcat_password'], server_name,
tomcat_information['tomcat_port'], deployment_path, warfile_name)
self.undeploy_warfile(server_name=server_name, command=undeploy_command, warfile_name=warfile_name,
tomcat_port=tomcat_information['tomcat_port'])
self.skip_server = self.deploy_warfile(server_name=server_name, command=deploy_command,
warfile_name=warfile_name, tomcat_port=tomcat_information['tomcat_port'])
def undeploy_warfile(self, **undeployment_arguments):
print("")
print("=======================================")
print("Beginning Undeploy of old version of %s to %s on port %s" % (undeployment_arguments['warfile_name'],
undeployment_arguments['server_name'],
undeployment_arguments['tomcat_port']))
print("")
self.check_curl_success(undeployment_arguments['server_name'], undeployment_arguments['command'])
def deploy_warfile(self, **deployment_arguments):
print("")
print("=======================================")
print("Beginning Deploy of %s to %s on port %s" % (deployment_arguments['warfile_name'],
deployment_arguments['server_name'],
deployment_arguments['tomcat_port']))
did_deployment_fail = self.check_curl_success(deployment_arguments['server_name'],
deployment_arguments['command'])
retry_count = 0
if did_deployment_fail:
while did_deployment_fail:
print("Curl Deployment failed, retrying in 5 seconds. This is attempt number %s" % (retry_count + 1))
time.sleep(5)
did_deploy_fail = self.check_curl_success(deployment_arguments['server_name'],
deployment_arguments['command'])
retry_count += 1
if retry_count > 2:
print("I was unable to reach %s. Deployment failed" % deployment_arguments['server_name'])
return(True)
def check_curl_success(self, server_name, command):
curl_output_line_number = 0
# Curl uses stderr to show its progress so subprocess is required to capture this output
for curl_output_line in subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).stderr.read().split("\n"):
curl_output_line_number += 1
print(curl_output_line.split("\n")[0].split("curl: ")[0])
if "curl" in curl_output_line:
print(curl_output_line.split("\n")[0].split("curl: ")[1])
print("")
if curl_output_line_number == 3:
# counter refers to the line number of the output that the Xferd stats are printed to
# Because the curl command constantly updates this line until the transfer is complete
# The output is simply appended to the list. Therefore the most reliable way to determine
# Whether the file has transfered is to get the -8th column which is labeled as '% Xferd'
if "refused" in curl_output_line or curl_output_line == "":
print("The connection to the server %s was refused... is the port closed?\n\n" % server_name)
curl_fail = True
return(curl_fail)
if int(curl_output_line.split("\n")[0].rstrip().split()[-8]) != 100:
curl_fail = True
else:
curl_fail = False
return curl_fail
class WhichLocalWarfilesToDeploy:
def __init__(self, warfile_list, warfile_path, server_name_list):
deploy_these_files = []
self.component_to_server_map = {}
for warfile in warfile_list:
full_path_to_warfile = warfile_path + os.sep + warfile
if os.path.exists(full_path_to_warfile):
deploy_these_files.append(full_path_to_warfile)
for server in server_name_list:
self.component_to_server_map[server] = deploy_these_files
class DetermineHowToRestartTomcat:
def __init__(self, tomcat_script_location, server_name):
self.tomcat_script_location = tomcat_script_location
if "init" in self.tomcat_script_location:
if len(server_name.split("-")[0]) > 2:
self.restart_command = self.tomcat_restart()
else:
self.stop_command = self.tomcat_stop()
else:
self.legacy_command = self.tomcat_legacy()
def tomcat_restart(self):
return("sudo nohup %s restart >/dev/null 2>&1" % self.tomcat_script_location)
def tomcat_stop(self):
return("sudo nohup %s stop >/dev/null 2>&1; sleep 10; sudo nohup %s start >/dev/null 2>&1" %
(self.tomcat_script_location, self.tomcat_script_location))
def tomcat_legacy(self):
return("sudo nohup %s > /dev/null 2>&1" % self.tomcat_script_location)
class DeployETLs:
pass
| lgpl-3.0 |
bsmedberg/socorro | alembic/versions/3a5471a358bf_adding_a_migration_f.py | 11 | 1623 | """Adding a migration for the exploitability report.
Revision ID: 3a5471a358bf
Revises: 191d0453cc07
Create Date: 2013-10-25 07:07:33.968691
"""
# revision identifiers, used by Alembic.
revision = '3a5471a358bf'
down_revision = '4aacaea3eb48'
from alembic import op
from socorro.lib import citexttype, jsontype
from socorro.lib.migrations import load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute('TRUNCATE exploitability_reports CASCADE');
op.add_column(u'exploitability_reports', sa.Column(u'version_string', sa.TEXT(), nullable=True))
op.add_column(u'exploitability_reports', sa.Column(u'product_name', sa.TEXT(), nullable=True))
op.add_column(u'exploitability_reports', sa.Column(u'product_version_id', sa.INTEGER(), nullable=False))
### end Alembic commands ###
load_stored_proc(op, ['update_exploitability.sql'])
for i in range(15, 30):
backfill_date = '2013-11-%s' % i
op.execute("""
SELECT backfill_exploitability('%s')
""" % backfill_date)
op.execute(""" COMMIT """)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'exploitability_reports', u'product_version_id')
op.drop_column(u'exploitability_reports', u'product_name')
op.drop_column(u'exploitability_reports', u'version_string')
load_stored_proc(op, ['update_exploitability.sql'])
### end Alembic commands ### | mpl-2.0 |
albertomurillo/ansible | lib/ansible/modules/cloud/ovirt/ovirt_affinity_label.py | 75 | 6902 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_affinity_label
short_description: Module to manage affinity labels in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage affinity labels in oVirt/RHV. It can also manage assignments
of those labels to hosts and VMs."
options:
name:
description:
- "Name of the affinity label to manage."
required: true
state:
description:
- "Should the affinity label be present or absent."
choices: ['present', 'absent']
default: present
cluster:
description:
- "Name of the cluster where vms and hosts resides."
vms:
description:
- "List of the VMs names, which should have assigned this affinity label."
hosts:
description:
- "List of the hosts names, which should have assigned this affinity label."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign affinity label to vms vm1 and vm2 and host host1
- ovirt_affinity_label:
name: mylabel
cluster: mycluster
vms:
- vm1
- vm2
hosts:
- host1
# To detach all VMs from label
- ovirt_affinity_label:
name: mylabel
cluster: mycluster
vms: []
# Remove affinity label
- ovirt_affinity_label:
state: absent
name: mylabel
'''
RETURN = '''
id:
description: ID of the affinity label which is managed
returned: On success if affinity label is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
affinity_label:
description: "Dictionary of all the affinity label attributes. Affinity label attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
type: dict
returned: On success if affinity label is found.
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from collections import defaultdict
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
ovirt_full_argument_spec,
)
class AffinityLabelsModule(BaseModule):
def build_entity(self):
return otypes.AffinityLabel(name=self._module.params['name'])
def post_create(self, entity):
self.update_check(entity)
def pre_remove(self, entity):
self._module.params['vms'] = []
self._module.params['hosts'] = []
self.update_check(entity)
def _update_label_assignments(self, entity, name, label_obj_type):
objs_service = getattr(self._connection.system_service(), '%s_service' % name)()
if self._module.params[name] is not None:
objs = self._connection.follow_link(getattr(entity, name))
objs_names = defaultdict(list)
for obj in objs:
labeled_entity = objs_service.service(obj.id).get()
if self._module.params['cluster'] is None:
objs_names[labeled_entity.name].append(obj.id)
elif self._connection.follow_link(labeled_entity.cluster).name == self._module.params['cluster']:
objs_names[labeled_entity.name].append(obj.id)
for obj in self._module.params[name]:
if obj not in objs_names:
for obj_id in objs_service.list(
search='name=%s and cluster=%s' % (obj, self._module.params['cluster'])
):
label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
if not self._module.check_mode:
label_service.add(**{
name[:-1]: label_obj_type(id=obj_id.id)
})
self.changed = True
for obj in objs_names:
if obj not in self._module.params[name]:
label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
if not self._module.check_mode:
for obj_id in objs_names[obj]:
label_service.service(obj_id).remove()
self.changed = True
def update_check(self, entity):
self._update_label_assignments(entity, 'vms', otypes.Vm)
self._update_label_assignments(entity, 'hosts', otypes.Host)
return True
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
cluster=dict(default=None),
name=dict(default=None, required=True),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['cluster']),
],
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
affinity_labels_service = connection.system_service().affinity_labels_service()
affinity_labels_module = AffinityLabelsModule(
connection=connection,
module=module,
service=affinity_labels_service,
)
state = module.params['state']
if state == 'present':
ret = affinity_labels_module.create()
elif state == 'absent':
ret = affinity_labels_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
trenshy/repo | plugin.video.armagedomfilmes/mechanize/_mozillacookiejar.py | 149 | 6321 | """Mozilla / Netscape cookie loading / saving.
Copyright 2002-2006 John J Lee <[email protected]>
Copyright 1997-1999 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import re, time, logging
from _clientcookie import reraise_unmasked_exceptions, FileCookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("ClientCookie").debug
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise LoadError(
"%s does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith("#") or
line.strip().startswith("$") or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t", 6)
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
if domain_specified != initial_dot:
raise LoadError("domain and domain specified flag don't "
"match in %s: %s" % (filename, line))
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except:
reraise_unmasked_exceptions((IOError, LoadError))
raise LoadError("invalid Netscape format file %s: %s" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
debug("Saving Netscape cookies.txt file")
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
debug(" Not saving %s: marked for discard", cookie.name)
continue
if not ignore_expires and cookie.is_expired(now):
debug(" Not saving %s: expired", cookie.name)
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()
| gpl-2.0 |
ProjectCalla/SomeCrawler | somecrawler/crawler/crawl/OsirisResults.py | 1 | 1777 | __author__ = 'j'
from somecrawler.config import OsirisConfig, LinkConfig, XpathConfig as xpathConf
from lxml import etree
from somecrawler.crawler.crawl import Base
class OsirisResultsProducer(Base.BaseProducer):
name = OsirisConfig.PRODUCER_NAME
user = None
browser = None
def __init__(self, user, selenium_things):
Base.BaseProducer.__init__(self)
self.user = user
self.browser = selenium_things.browser
def start(self):
print "Starting Osiris Results producer"
return self.getResults(self.browser)
def getResults(self, browser):
browser.get(LinkConfig.OSIRIS_RESULTS)
return etree.HTML(browser.page_source)
class OsirisResultsConsumer(Base.BaseConsumer):
name = OsirisConfig.CONSUMER_NAME
def start(self):
result = self.parse()
def parse(self):
items = {}
for i in range(2, 17):
item = {}
item['test_date'] = self.source.xpath(xpathConf.OSIRIS_RESULTS_MAIN.format(i, 1))[0]
item['course_code'] = self.source.xpath(xpathConf.OSIRIS_RESULTS_MAIN.format(i, 2))[0]
item['course'] = self.source.xpath(xpathConf.OSIRIS_RESULTS_MAIN.format(i, 3))[0]
item['exam_type'] = self.source.xpath(xpathConf.OSIRIS_RESULTS_MAIN.format(i, 4))[0]
item['professor'] = self.source.xpath(xpathConf.OSIRIS_RESULTS_MAIN.format(i, 5))[0]
item['weging'] = self.source.xpath(xpathConf.OSIRIS_RESULTS_MAIN.format(i, 6))[0]
item['result'] = self.source.xpath(xpathConf.OSIRIS_RESULTS_MAIN.format(i, 8))[0]
item['mutation_date'] = self.source.xpath(xpathConf.OSIRIS_RESULTS_MAIN.format(i, 10))[0]
items[str(i)] = item
print items
return items
| gpl-3.0 |
rzarzynski/tempest | tempest/api/volume/test_snapshot_metadata.py | 1 | 4473 | # Copyright 2013 Huawei Technologies Co.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest import test
class SnapshotV2MetadataTestJSON(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(SnapshotV2MetadataTestJSON, cls).setup_clients()
cls.client = cls.snapshots_client
@classmethod
def resource_setup(cls):
super(SnapshotV2MetadataTestJSON, cls).resource_setup()
# Create a volume
cls.volume = cls.create_volume()
# Create a snapshot
cls.snapshot = cls.create_snapshot(volume_id=cls.volume['id'])
cls.snapshot_id = cls.snapshot['id']
def tearDown(self):
# Update the metadata to {}
self.client.update_snapshot_metadata(self.snapshot_id, {})
super(SnapshotV2MetadataTestJSON, self).tearDown()
@test.attr(type='gate')
@test.idempotent_id('a2f20f99-e363-4584-be97-bc33afb1a56c')
def test_create_get_delete_snapshot_metadata(self):
# Create metadata for the snapshot
metadata = {"key1": "value1",
"key2": "value2",
"key3": "value3"}
expected = {"key2": "value2",
"key3": "value3"}
body = self.client.create_snapshot_metadata(self.snapshot_id,
metadata)
# Get the metadata of the snapshot
body = self.client.get_snapshot_metadata(self.snapshot_id)
self.assertEqual(metadata, body)
# Delete one item metadata of the snapshot
self.client.delete_snapshot_metadata_item(
self.snapshot_id, "key1")
body = self.client.get_snapshot_metadata(self.snapshot_id)
self.assertEqual(expected, body)
@test.attr(type='gate')
@test.idempotent_id('bd2363bc-de92-48a4-bc98-28943c6e4be1')
def test_update_snapshot_metadata(self):
# Update metadata for the snapshot
metadata = {"key1": "value1",
"key2": "value2",
"key3": "value3"}
update = {"key3": "value3_update",
"key4": "value4"}
# Create metadata for the snapshot
body = self.client.create_snapshot_metadata(self.snapshot_id,
metadata)
# Get the metadata of the snapshot
body = self.client.get_snapshot_metadata(self.snapshot_id)
self.assertEqual(metadata, body)
# Update metadata item
body = self.client.update_snapshot_metadata(
self.snapshot_id, update)
# Get the metadata of the snapshot
body = self.client.get_snapshot_metadata(self.snapshot_id)
self.assertEqual(update, body)
@test.attr(type='gate')
@test.idempotent_id('e8ff85c5-8f97-477f-806a-3ac364a949ed')
def test_update_snapshot_metadata_item(self):
# Update metadata item for the snapshot
metadata = {"key1": "value1",
"key2": "value2",
"key3": "value3"}
update_item = {"key3": "value3_update"}
expect = {"key1": "value1",
"key2": "value2",
"key3": "value3_update"}
# Create metadata for the snapshot
body = self.client.create_snapshot_metadata(self.snapshot_id,
metadata)
# Get the metadata of the snapshot
body = self.client.get_snapshot_metadata(self.snapshot_id)
self.assertEqual(metadata, body)
# Update metadata item
body = self.client.update_snapshot_metadata_item(
self.snapshot_id, "key3", update_item)
# Get the metadata of the snapshot
body = self.client.get_snapshot_metadata(self.snapshot_id)
self.assertEqual(expect, body)
class SnapshotV1MetadataTestJSON(SnapshotV2MetadataTestJSON):
_api_version = 1
| apache-2.0 |
marcinn/midicontrol | midicontrol/controller.py | 1 | 2888 | import os
from subprocess import Popen, PIPE
from .events import (
EventsManager, Event,
EV_KEYUP, EV_KEYDOWN, EV_SLIDE)
from .utils import debug
from . import x11
class Controller(object):
def __init__(self, target_window_name=None):
self.target_window_name = target_window_name
def parse_and_send_commands(event, cmds):
self.send_commands(self.parse_cmds(cmds, event))
self.event_manager = EventsManager(callback=parse_and_send_commands)
FNULL = open(os.devnull, 'w')
self.xte = Popen(['xte'], stdin=PIPE, stdout=FNULL)
@property
def modifiers(self):
return list(map(lambda x: x[0], filter(
lambda x: x[1], self.event_manager._modifiers.items())))
def enable_modifier(self, modifier):
self.event_manager._modifiers[modifier] = True
def disable_modifier(self, modifier):
self.event_manager._modifiers[modifier] = False
def handle_midi_event(self, midi_event, data=None):
code = midi_event.getControllerNumber()
val = midi_event.getControllerValue()
if self.is_button(code):
event = Event(
EV_KEYDOWN if val > 0 else EV_KEYUP,
value=int(val>0), code=code)
else:
event = Event(EV_SLIDE, value=val, code=code)
self.event_manager.emit(event)
self.event_manager.update()
def send_commands(self, cmds):
if cmds:
debug("COMMANDS: %s" % cmds)
if (not self.target_window_name or
x11.is_window_in_focus(self.target_window_name)):
for cmd in cmds:
self.xte.stdin.write((cmd+'\n').encode())
self.xte.stdin.flush()
def parse_cmds(self, cmds, event):
if callable(cmds):
cmds = self.parse_cmds(cmds(event), event)
if isinstance(cmds, str):
cmds = [cmds]
out = []
for cmd in cmds:
if callable(cmd):
cmd = cmd(event)
if isinstance(cmd, (list, tuple)):
out += self.parse_cmds(cmd, event)
else:
out.append(cmd)
return out
def run(self):
self.event_manager.loop()
def keyup(self, *args, **kwargs):
self.event_manager.keyup(*args, **kwargs)
def keydown(self, *args, **kwargs):
self.event_manager.keydown(*args, **kwargs)
def slideup(self, *args, **kwargs):
self.event_manager.slideup(*args, **kwargs)
def slidedown(self, *args, **kwargs):
self.event_manager.slidedown(*args, **kwargs)
def longpress(self, *args, **kwargs):
self.event_manager.longpress(*args, **kwargs)
def doublehit(self, *args, **kwargs):
self.event_manager.doublehit(*args, **kwargs)
def is_button(self, code):
return True
| bsd-3-clause |
Jobava/pootle | docs/server/apache-wsgi.py | 7 | 1406 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import site
import sys
# You probably will need to change these paths to match your deployment,
# most likely because of the Python version you are using.
ALLDIRS = [
'/var/www/pootle/env/lib/python2.7/site-packages',
'/var/www/pootle/env/lib/python2.7/site-packages/pootle/apps',
]
# Remember original sys.path.
prev_sys_path = list(sys.path)
# Add each new site-packages directory.
for directory in ALLDIRS:
site.addsitedir(directory)
# Reorder sys.path so new directories at the front.
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
# Set the Pootle settings module as DJANGO_SETTINGS_MODULE.
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
# Set the WSGI application.
def application(environ, start_response):
"""Wrapper for Django's WSGIHandler().
This allows to get values specified by SetEnv in the Apache
configuration or interpose other changes to that environment, like
installing middleware.
"""
try:
os.environ['POOTLE_SETTINGS'] = environ['POOTLE_SETTINGS']
except KeyError:
pass
from django.core.wsgi import get_wsgi_application
_wsgi_application = get_wsgi_application()
return _wsgi_application(environ, start_response)
| gpl-3.0 |
SlimRemix/android_external_chromium_org | tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/smoke.py | 34 | 2466 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SmokePage(page_module.Page):
def __init__(self, url, page_set, name=''):
super(SmokePage, self).__init__(url=url, page_set=page_set, name=name)
self.archive_data_file = '../data/chrome_proxy_smoke.json'
class Page1(SmokePage):
"""
Why: Check chrome proxy response headers.
"""
def __init__(self, page_set):
super(Page1, self).__init__(
url='http://aws1.mdw.la/fw/',
page_set=page_set,
name='header validation')
class Page2(SmokePage):
"""
Why: Check data compression
"""
def __init__(self, page_set):
super(Page2, self).__init__(
url='http://aws1.mdw.la/static/',
page_set=page_set,
name='compression: image')
class Page3(SmokePage):
"""
Why: Check bypass
"""
def __init__(self, page_set):
super(Page3, self).__init__(
url='http://aws1.mdw.la/bypass/',
page_set=page_set,
name='bypass')
self.restart_after = True
class Page4(SmokePage):
"""
Why: Check data compression
"""
def __init__(self, page_set):
super(Page4, self).__init__(
url='http://aws1.mdw.la/static/',
page_set=page_set,
name='compression: javascript')
class Page5(SmokePage):
"""
Why: Check data compression
"""
def __init__(self, page_set):
super(Page5, self).__init__(
url='http://aws1.mdw.la/static/',
page_set=page_set,
name='compression: css')
class Page6(SmokePage):
"""
Why: Expect 'malware ahead' page. Use a short navigation timeout because no
response will be received.
"""
def __init__(self, page_set):
super(Page6, self).__init__(
url='http://www.ianfette.org/',
page_set=page_set,
name='safebrowsing')
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self, timeout_in_seconds=5)
class SmokePageSet(page_set_module.PageSet):
""" Chrome proxy test sites """
def __init__(self):
super(SmokePageSet, self).__init__(
archive_data_file='../data/chrome_proxy_smoke.json')
self.AddPage(Page1(self))
self.AddPage(Page2(self))
self.AddPage(Page3(self))
self.AddPage(Page4(self))
self.AddPage(Page5(self))
self.AddPage(Page6(self))
| bsd-3-clause |
mikebrevard/UnixAdministration | vagrant/etc/data/genData/venv/lib/python3.4/site-packages/setuptools/command/upload_docs.py | 390 | 6782 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
from distutils.command.upload import upload
import os
import socket
import zipfile
import tempfile
import sys
import shutil
from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3
from pkg_resources import iter_entry_points
errors = 'surrogateescape' if PY3 else 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = httplib.HTTPConnection(netloc)
elif schema == 'https':
conn = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = 'multipart/form-data; boundary=%s' % boundary
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
| mit |
Changaco/oh-mainline | vendor/packages/PyYaml/tests/lib3/test_resolver.py | 62 | 3298 |
import yaml
import pprint
def test_implicit_resolver(data_filename, detect_filename, verbose=False):
correct_tag = None
node = None
try:
correct_tag = open(detect_filename, 'r').read().strip()
node = yaml.compose(open(data_filename, 'rb'))
assert isinstance(node, yaml.SequenceNode), node
for scalar in node.value:
assert isinstance(scalar, yaml.ScalarNode), scalar
assert scalar.tag == correct_tag, (scalar.tag, correct_tag)
finally:
if verbose:
print("CORRECT TAG:", correct_tag)
if hasattr(node, 'value'):
print("CHILDREN:")
pprint.pprint(node.value)
test_implicit_resolver.unittest = ['.data', '.detect']
def _make_path_loader_and_dumper():
global MyLoader, MyDumper
class MyLoader(yaml.Loader):
pass
class MyDumper(yaml.Dumper):
pass
yaml.add_path_resolver('!root', [],
Loader=MyLoader, Dumper=MyDumper)
yaml.add_path_resolver('!root/scalar', [], str,
Loader=MyLoader, Dumper=MyDumper)
yaml.add_path_resolver('!root/key11/key12/*', ['key11', 'key12'],
Loader=MyLoader, Dumper=MyDumper)
yaml.add_path_resolver('!root/key21/1/*', ['key21', 1],
Loader=MyLoader, Dumper=MyDumper)
yaml.add_path_resolver('!root/key31/*/*/key14/map', ['key31', None, None, 'key14'], dict,
Loader=MyLoader, Dumper=MyDumper)
return MyLoader, MyDumper
def _convert_node(node):
if isinstance(node, yaml.ScalarNode):
return (node.tag, node.value)
elif isinstance(node, yaml.SequenceNode):
value = []
for item in node.value:
value.append(_convert_node(item))
return (node.tag, value)
elif isinstance(node, yaml.MappingNode):
value = []
for key, item in node.value:
value.append((_convert_node(key), _convert_node(item)))
return (node.tag, value)
def test_path_resolver_loader(data_filename, path_filename, verbose=False):
_make_path_loader_and_dumper()
nodes1 = list(yaml.compose_all(open(data_filename, 'rb').read(), Loader=MyLoader))
nodes2 = list(yaml.compose_all(open(path_filename, 'rb').read()))
try:
for node1, node2 in zip(nodes1, nodes2):
data1 = _convert_node(node1)
data2 = _convert_node(node2)
assert data1 == data2, (data1, data2)
finally:
if verbose:
print(yaml.serialize_all(nodes1))
test_path_resolver_loader.unittest = ['.data', '.path']
def test_path_resolver_dumper(data_filename, path_filename, verbose=False):
_make_path_loader_and_dumper()
for filename in [data_filename, path_filename]:
output = yaml.serialize_all(yaml.compose_all(open(filename, 'rb')), Dumper=MyDumper)
if verbose:
print(output)
nodes1 = yaml.compose_all(output)
nodes2 = yaml.compose_all(open(data_filename, 'rb'))
for node1, node2 in zip(nodes1, nodes2):
data1 = _convert_node(node1)
data2 = _convert_node(node2)
assert data1 == data2, (data1, data2)
test_path_resolver_dumper.unittest = ['.data', '.path']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
| agpl-3.0 |
rismalrv/edx-platform | common/lib/capa/capa/tests/__init__.py | 129 | 2700 | """Tools for helping with testing capa."""
import gettext
import os
import os.path
import fs.osfs
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.inputtypes import Status
from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
the template name. To make the output valid xml, quotes the content, and wraps it in a <div>
"""
return '<div>{0}</div>'.format(saxutils.escape(repr(context)))
def calledback_url(dispatch='score_update'):
return dispatch
xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_capa_system():
"""
Construct a mock LoncapaSystem instance.
"""
the_system = Mock(
spec=LoncapaSystem,
ajax_url='/dummy-ajax-url',
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
get_python_lib_zip=lambda: None,
DEBUG=True,
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
i18n=gettext.NullTranslations(),
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
render_template=tst_render_template,
seed=0,
STATIC_URL='/dummy-static/',
STATUS_CLASS=Status,
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
)
return the_system
def mock_capa_module():
"""
capa response types needs just two things from the capa_module: location and track_function.
"""
capa_module = Mock()
capa_module.location.to_deprecated_string.return_value = 'i4x://Foo/bar/mock/abc'
# The following comes into existence by virtue of being called
# capa_module.runtime.track_function
return capa_module
def new_loncapa_problem(xml, capa_system=None, seed=723):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system(),
capa_module=mock_capa_module())
def load_fixture(relpath):
"""
Return a `unicode` object representing the contents
of the fixture file at the given path within a test_files directory
in the same directory as the test file.
"""
abspath = os.path.join(os.path.dirname(__file__), 'test_files', relpath)
with open(abspath) as fixture_file:
contents = fixture_file.read()
return contents.decode('utf8')
| agpl-3.0 |
nerdymcnerdyson/pythonPlay | converter/TweeUtilities/Nodes/LinkNode.py | 1 | 1431 | #from . import NodeBase
#from . import NodeRegExes
from TweeUtilities.Nodes import *
import logging
class LinkNode(NodeBase.SequenceNode):
def __init__(self, target, delay, text):
self.type = NodeBase.SequenceNodeType.link
self.target = target
self.delay = delay
self.text = text
logger = logging.getLogger(__name__+"."+(type(self).__name__))
# 'application' code
# logger.debug('debug message')
# logger.info('info message')
# logger.warn('warn message')
# logger.error('error message')
# logger.critical('critical message')
#factory method.. returns instance of class or None
@staticmethod
def tryIsNodeType(inputString):
links = NodeRegExes.LinkTokenRegex.findall(inputString)
if len(links) == 0:
return None
choice = links[0]
choice = choice.strip().strip('[').strip(']')
shortEndIndex = choice.find('^')
short = ""
if shortEndIndex != -1:
short = choice[:shortEndIndex]
choice = choice[shortEndIndex+1:]
breakIndex = choice.find('|')
full = choice[:breakIndex]
#if len(short) <= 0:
#short = full
target = choice[breakIndex+1:]
return LinkNode(target, short,full)
def javascriptOutputString(self):
return '{"type":"link","js":"%s"}'%self.target
| apache-2.0 |
guaka/trust-metrics | examples/dataset_all_methods.py | 1 | 3434 | """
Create a network and call all its method on it.
"""
# make sure example can be run from examples/ directory
# import sys
# sys.path.append('../trustlet')
from trustlet import *
def call_all_methods(N):
"""Call a bunch of methods of the network."""
def call_methods(method_names):
"""Helper, works with list or with just one."""
for method_name in list(method_names):
if hasattr(N, method_name):
method = getattr(N, method_name)
if hasattr(method, "__call__"):
if method_name[:4] == "show":
method()
else:
print method_name, method()
else:
print method_name, method #is not actually a method
else:
print N.__class__.__name__, "does not have the method", method_name
print "\n\nNetwork: ", N.__class__.__name__
call_methods(("number_of_nodes", "number_of_edges", "is_directed"))
print "\nDEGREES"
call_methods(("avg_degree", "std_in_degree", "std_out_degree"))
#TODO: print "get_degree_correlation_coefficient()="+N.get_degree_correlation_coefficient()
call_methods(("degree_histogram", "powerlaw_exponent"))
print "\nCOMPONENTS"
call_methods(("is_connected",
"is_strongly_connected",
"connected_components_size",
"strongly_connected_components_size",
"average_clustering",
"transitivity",
"avg_shortest_distance",
))
print "\nWEIGHTS ON EDGES"
call_methods(("is_weighted",
"has_discrete_weights",
"weights",
#"min_weight", #doesn't work
#"max_weight",
))
# TODO: return the number of edges whose value is the smallest of the possible values
# print "get_number_of_edges(get_weights()[0])", N.get_number_of_edges(get_weights()[0])
print "\nRECIPROCATION"
call_methods(("link_reciprocity",
"show_reciprocity_matrix"
))
print "\nCONTROVERSIALITY"
# get the average controversiality of users with at least 3 incoming edges
# controversiality could be simply the standard deviation of received trust statements
call_methods(("avg_controversiality",
"controversial_nodes",
))
# create some datasets
dummy = DummyNetwork()
dummy_weighted = DummyWeightedNetwork()
# unweighted = DummyUnweightedNetwork() #on unweighted networks, some methods should return nothing
# undirected = DummyUndirectedNetwork() #on undirected networks, some methods should return nothing
# unconnected_undirected_unweighted = DummyUnconnectedUndirectedUnweighted() # should read from "data/unconnected_undirected_unweighted.dot"
# unconnected_directed_weighted = DummyUnconnectedUndirectedUnweighted() # should read from "data/unconnected_directed_weighted.net"
squeak_network = SqueakfoundationNetwork(download=True)
# advogato_network = AdvogatoNetwork()
# datasets = [dummy, unweighted, undirected, unconnected_undirected_unweighted, unconnected_directed_weighted, advogato]
datasets = [dummy,
dummy_weighted,
squeak_network,
# advogato_network,
]
for network in datasets:
call_all_methods(network)
| gpl-2.0 |
ssssam/rdflib | rdflib/plugins/parsers/pyRdfa/transform/__init__.py | 23 | 4436 | # -*- coding: utf-8 -*-
"""
Transformer sub-package for the pyRdfa package. It contains modules with transformer functions; each may be
invoked by pyRdfa to transform the dom tree before the "real" RDfa processing.
@summary: RDFa Transformer package
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: __init__.py,v 1.8 2012/06/12 11:47:19 ivan Exp $
$Date: 2012/06/12 11:47:19 $
"""
__version__ = "3.0"
# Here are the transfomer functions that are to be performed for all RDFa files, no matter what
def top_about(root, options, state) :
"""
@param root: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
def set_about(node) :
if has_one_of_attributes(node, "rel", "rev") :
if not has_one_of_attributes(top, "about", "src") :
node.setAttribute("about","")
else :
if not has_one_of_attributes(node, "href", "resource", "about", "src") :
node.setAttribute("about","")
from ..host import HostLanguage
from ..utils import has_one_of_attributes
if not has_one_of_attributes(root, "about") :
# The situation is a bit complicated: if a @resource is present without anything else, then it sets
# the subject, ie, should be accepted...
if has_one_of_attributes(root, "resource", "href", "src") :
if has_one_of_attributes(root, "rel", "rev","property") :
root.setAttribute("about","")
else :
root.setAttribute("about","")
if options.host_language in [ HostLanguage.xhtml, HostLanguage.html5, HostLanguage.xhtml5 ] :
if state.rdfa_version >= "1.1" :
pass
else :
for top in root.getElementsByTagName("head") :
if not has_one_of_attributes(top, "href", "resource", "about", "src") :
set_about(top)
for top in root.getElementsByTagName("body") :
if not has_one_of_attributes(top, "href", "resource", "about", "src") :
set_about(top)
def empty_safe_curie(node, options, state) :
"""
Remove the attributes whose value is an empty safe curie. It also adds an 'artificial' flag, ie, an
attribute (called 'emptysc') into the node to signal that there _is_ an attribute with an ignored
safe curie value. The name of the attribute is 'about_pruned' or 'resource_pruned'.
@param node: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
def prune_safe_curie(node,name) :
if node.hasAttribute(name) :
av = node.getAttribute(name)
if av == '[]' :
node.removeAttribute(name)
node.setAttribute(name+'_pruned','')
msg = "Attribute @%s uses an empty safe CURIE; the attribute is ignored" % name
options.add_warning(msg, node=node)
prune_safe_curie(node, "about")
prune_safe_curie(node, "resource")
for n in node.childNodes :
if n.nodeType == node.ELEMENT_NODE :
empty_safe_curie(n, options, state)
def vocab_for_role(node, options, state) :
"""
The value of the @role attribute (defined separately in the U{Role Attribute Specification Lite<http://www.w3.org/TR/role-attribute/#using-role-in-conjunction-with-rdfa>}) should be as if a @vocab value to the
XHTML vocabulary was defined for it. This method turns all terms in role attributes into full URI-s, so that
this would not be an issue for the run-time.
@param node: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
from ..termorcurie import termname, XHTML_URI
def handle_role(node) :
if node.hasAttribute("role") :
old_values = node.getAttribute("role").strip().split()
new_values = ""
for val in old_values :
if termname.match(val) :
new_values += XHTML_URI + val + ' '
else :
new_values += val + ' '
node.setAttribute("role", new_values.strip())
handle_role(node)
for n in node.childNodes :
if n.nodeType == node.ELEMENT_NODE :
vocab_for_role(n, options, state)
| bsd-3-clause |
bcwaldon/changeling | changeling/storage.py | 1 | 2662 | import json
import boto.s3.connection
import changeling.exception
class S3Storage(object):
def __init__(self, access, secret, bucket):
self.access_key = access
self.secret_key = secret
self.bucket_name = bucket
self._connection = None
def initialize(self):
#NOTE(bcwaldon): This operation is idempotent
self.connection.create_bucket(self.bucket_name)
@property
def connection(self):
if self._connection is None:
self._connection = boto.s3.connection.S3Connection(self.access_key,
self.secret_key)
return self._connection
@property
def bucket(self):
return self.connection.get_bucket(self.bucket_name)
@staticmethod
def _change_key(change_id):
return '%s.change' % change_id
@staticmethod
def _history_key(change_id):
return '%s.history' % change_id
def list_changes(self):
objects = self.bucket.list()
for obj in objects:
if obj.name.endswith('.change'):
yield json.loads(obj.get_contents_as_string())
def get_change(self, change_id):
key = self.bucket.get_key(self._change_key(change_id))
if key is None:
raise changeling.exception.ChangeNotFound(change_id)
return json.loads(key.get_contents_as_string())
def save_change(self, change_id, change_data):
key = self.bucket.new_key(self._change_key(change_id))
key.set_contents_from_string(json.dumps(change_data))
def delete_change(self, change_id):
key = self.bucket.get_key(self._change_key(change_id))
try:
key.delete()
except AttributeError:
raise changeling.exception.ChangeNotFound(change_id)
def extend_change_history(self, change_id, item_data):
key = self.bucket.new_key(self._history_key(change_id))
try:
key_contents = key.get_contents_as_string()
history = json.loads(key_contents)
except boto.exception.S3ResponseError:
history = []
history.append(item_data)
key.set_contents_from_string(json.dumps(history))
def get_change_history(self, change_id):
key = self.bucket.get_key(self._history_key(change_id))
try:
return json.loads(key.get_contents_as_string())
except AttributeError:
return []
def StorageFactory(config):
obj = S3Storage(config['s3.access_key'],
config['s3.secret_key'],
config['s3.bucket'])
obj.initialize()
return obj
| apache-2.0 |
open-synergy/opnsynid-accounting-report | opnsynid_income_statement_aeroo_report/wizards/wizard_income_statement.py | 1 | 2705 | # -*- coding: utf-8 -*-
# Copyright 2015 OpenSynergy Indonesia
# Copyright 2020 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api, osv
from openerp.tools.translate import _
class WizardIncomeStatement(models.TransientModel):
_name = "account.wizard_income_statement"
_description = "Print Income Statement"
@api.model
def _default_company_id(self):
return self.env.user.company_id.id
@api.model
def _default_fiscalyear_id(self):
fiscalyear_id = self.env["account.fiscalyear"].find()
return fiscalyear_id or False
@api.model
def _default_period_id(self):
period_ids = self.env["account.period"].find()
return period_ids and period_ids[0] or False
company_id = fields.Many2one(
string="Company",
comodel_name="res.company",
required=True,
default=_default_company_id,
)
fiscalyear_id = fields.Many2one(
string="Fiscal Year",
comodel_name="account.fiscalyear",
required=True,
default=_default_fiscalyear_id,
)
period_id = fields.Many2one(
string="Period",
comodel_name="account.period",
required=True,
default=_default_period_id,
)
output_format = fields.Selection(
string="Output Format",
required=True,
default="ods",
selection=[
("xls", "XLS"),
("ods", "ODS")
]
)
show_zero = fields.Boolean(
string="Show Zero Balance",
default=True,
)
state = fields.Selection(
string="State",
selection=[
("all", "All"),
("draft", "Draft"),
("posted", "Posted")
],
required=True,
default="posted",
)
def button_print_report(self, cr, uid, ids, data, context=None):
datas = {}
output_format = ""
if context is None:
context = {}
datas["form"] = self.read(cr, uid, ids)[0]
if datas["form"]["output_format"] == "xls":
output_format = "report_income_statement_xls"
elif datas["form"]["output_format"] == "ods":
output_format = "report_income_statement_ods"
else:
err = "Output Format cannot be empty"
raise osv.except_osv(_("Warning"), _(err))
return {
"type": "ir.actions.report.xml",
"report_name": output_format,
"datas": datas,
}
@api.onchange(
"fiscalyear_id",
)
def onchange_period_id(self):
if self.fiscalyear_id:
self.period_id = False
| agpl-3.0 |
kived/py4a-updater | pyupdater/client.py | 3 | 1455 | from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.lib import osc
from kivy.properties import StringProperty, AliasProperty
from pyupdater import CLIENT_PORT, SERVICE_PORT, MESSAGE_UPDATE_AVAILABLE, MESSAGE_DO_UPDATE, MESSAGE_CHECK_FOR_UPDATE, \
SERVICE_PATH
from pyupdater.util import get_current_version
class UpdateClient(EventDispatcher):
update_version = StringProperty('')
def get_update_available(self):
return bool(self.update_version)
update_available = AliasProperty(get_update_available, bind=('update_version',))
current_version = StringProperty('')
def __init__(self, **kwargs):
super(UpdateClient, self).__init__(**kwargs)
osc.init()
oscid = osc.listen('127.0.0.1', CLIENT_PORT)
osc.bind(oscid, self.recv_osc, SERVICE_PATH)
Clock.schedule_interval(lambda _: osc.readQueue(oscid), 0)
self.current_version = str(get_current_version())
def recv_osc(self, message, *args):
print 'client osc message:', message, args
command = message[2]
if command == MESSAGE_UPDATE_AVAILABLE:
version_number = message[3]
self.update_version = str(version_number)
def send_osc(self, *message):
osc.sendMsg(SERVICE_PATH, message, port=SERVICE_PORT)
def do_update(self, *_):
print 'client: do update'
if self.update_available:
self.send_osc(MESSAGE_DO_UPDATE)
def check_for_update(self, *_):
print 'client: check for updates'
self.send_osc(MESSAGE_CHECK_FOR_UPDATE)
| mit |
Zanzibar82/streamondemand.test | channels/casacinema.py | 1 | 4138 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canal para casacinema
# http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "casacinema"
__category__ = "F,S,A"
__type__ = "generic"
__title__ = "casacinema"
__language__ = "IT"
sito="http://casa-cinema.net/"
def isGeneric():
return True
def mainlist( item ):
logger.info( "streamondemand.casacinema mainlist" )
itemlist = []
itemlist.append( Item( channel=__channel__, title="[COLOR azure]Film - Novita'[/COLOR]", action="peliculas", url=sito, thumbnail="http://dc584.4shared.com/img/XImgcB94/s7/13feaf0b538/saquinho_de_pipoca_01" ) )
itemlist.append( Item( channel=__channel__, title="[COLOR azure]Categorie[/COLOR]", action="categorias", url=sito, thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png" ) )
itemlist.append( Item( channel=__channel__, title="[COLOR azure]Film Sub - Ita[/COLOR]", action="peliculas", url="http://casa-cinema.net/genere/sub-ita", thumbnail="http://i.imgur.com/qUENzxl.png" ) )
itemlist.append( Item( channel=__channel__, title="[COLOR yellow]Cerca...[/COLOR]", action="search", thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search" ) )
return itemlist
def search( item, texto ):
logger.info( "[casacinema.py] " + item.url + " search " + texto )
item.url = sito + "?s=" + texto
try:
return peliculas( item )
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def peliculas( item ):
logger.info( "streamondemand.casacinema peliculas" )
itemlist = []
## Descarga la pagina
data = scrapertools.cache_page( item.url )
## Extrae las entradas (carpetas)
patron = '<div class="box-single-movies">\s*'
patron += '<a href="([^>"]+)".*?title="([^>"]+)" >.*?<img class.*?<img.*?src="([^>"]+)"'
matches = re.compile( patron, re.DOTALL ).findall( data )
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapertools.decodeHtmlentities( scrapedtitle )
html = scrapertools.cache_page(scrapedurl)
start = html.find("<div class=\"row content-post\" >")
end = html.find("<a class=\"addthis_button_facebook_like\" fb:like:layout=\"button_count\"></a>", start)
scrapedplot = html[start:end]
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append( Item( channel=__channel__, action="findvideos", title="[COLOR azure]" + title + "[/COLOR]", url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=title , plot=scrapedplot , viewmode="movie_with_plot") )
## Paginación
next_page = scrapertools.find_single_match( data, 'rel="next" href="([^"]+)"' )
if next_page != "":
itemlist.append( Item( channel=__channel__, action="peliculas", title="[COLOR orange]Successivo >>[/COLOR]", url=next_page, thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png") )
return itemlist
def categorias(item):
logger.info("streamondemand.casacinema categorias")
itemlist = []
data = scrapertools.cache_page( item.url )
# The categories are the options for the combo
patron = '<td[^<]+<a href="([^"]+)">[^>]+>([^<]+)</a>[^/]+/td>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append( Item( channel=__channel__, action="peliculas" , title="[COLOR azure]" + scrapedtitle + "[/COLOR]", url=urlparse.urljoin( sito, scrapedurl ) ) )
return itemlist
| gpl-3.0 |
OpenCMISS/neon | src/opencmiss/neon/ui/problems/biomeng321lab1.py | 3 | 2054 | '''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from opencmiss.neon.ui.problems.base import BaseProblem
from opencmiss.neon.ui.problems.ui_biomeng321lab1 import Ui_Biomeng321Lab1
from opencmiss.neon.core.problems.biomeng321lab1 import BOUNDARY_CONDITIONS
class Biomeng321Lab1(BaseProblem):
def __init__(self, shared_opengl_widget, parent=None):
super(Biomeng321Lab1, self).__init__(parent)
self._ui = Ui_Biomeng321Lab1()
self._ui.setupUi(self)
self._ui.comboBoxBoundaryConditions.clear()
self._ui.comboBoxBoundaryConditions.addItems(BOUNDARY_CONDITIONS)
self._makeConnections()
def _makeConnections(self):
self._ui.comboBoxBoundaryConditions.currentIndexChanged.connect(self._boundaryConditionChanged)
def _boundaryConditionChanged(self, index):
self._problem.setBoundaryCondition(self._ui.comboBoxBoundaryConditions.currentText())
def updateUi(self):
boundary_condition = self._problem.getBoundaryCondition()
if boundary_condition in BOUNDARY_CONDITIONS:
index = BOUNDARY_CONDITIONS.index(boundary_condition)
self._ui.comboBoxBoundaryConditions.setCurrentIndex(index)
def serialize(self):
d = {}
d['problem'] = self._problem.serialize()
return json.dumps(d)
def deserialize(self, string):
d = json.loads(string)
if 'problem' in d:
self._problem.deserialize(d['problem'])
self.updateUi()
| apache-2.0 |
anirudhjayaraman/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
YYWen0o0/python-frame-django | tests/http_utils/tests.py | 33 | 2235 | from __future__ import unicode_literals
import io
import gzip
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.http.utils import conditional_content_removal
from django.test import TestCase
# based on Python 3.3's gzip.compress
def gzip_compress(data):
buf = io.BytesIO()
f = gzip.GzipFile(fileobj=buf, mode='wb', compresslevel=0)
try:
f.write(data)
finally:
f.close()
return buf.getvalue()
class HttpUtilTests(TestCase):
def test_conditional_content_removal(self):
"""
Tests that content is removed from regular and streaming responses with
a status_code of 100-199, 204, 304 or a method of "HEAD".
"""
req = HttpRequest()
# Do nothing for 200 responses.
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'abc')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'abc')
# Strip content for some status codes.
for status_code in (100, 150, 199, 204, 304):
res = HttpResponse('abc', status=status_code)
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'], status=status_code)
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Issue #20472
abc = gzip_compress(b'abc')
res = HttpResponse(abc, status=304)
res['Content-Encoding'] = 'gzip'
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse([abc], status=304)
res['Content-Encoding'] = 'gzip'
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Strip content for HEAD requests.
req.method = 'HEAD'
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
| bsd-3-clause |
joseguerrero/sembrando | src/presentacion/librerias/contenido.py | 1 | 14608 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Este archivo contiene un diccionario de el contenido del recurso y un diccionario
con las definiciones utilizadas por el glosario de términos
"""
cont = {
"texto3_2":u"Las plantas como todos los seres vivos realizan funciones de crecimiento, alimentación, respiración y reproducción, pero a "
u"diferencia de los otros seres vivos son los únicos que realizan la función de fotosíntesis, que les permite elaborar sus alimentos para "
u"nutrirse. ",
"texto4_2":u"La raíz es el órgano que fija la planta al suelo y a través de los pelos radicales absorbe los minerales, agua y nutrientes necesarios para la alimentación de la planta y sus frutos. ",
"texto4_3":u"El tallo es el órgano que transpira, sostiene las hojas y transporta el agua y los minerales desde la raíz mediante un complejo sistema de conductos. ",
"texto4_4":u"Las hojas son los órganos que realizan la transformación de los componentes básicos (agua y minerales) en los nutrientes. Otras de sus funciones son la respiración, transpiración y la fotosíntesis. ",
"texto4_5":u"La flor es el órgano reproductor de las plantas, en su interior produce el fruto y este a su vez encierra la semilla. ",
"anim4_1": u"La raíz absorbe del suelo la savia bruta y la conduce hasta el tallo. ",
"anim4_2": u"Además de las hojas, el tallo sostiene las ramas, las flores y los frutos. ",
"anim4_3": u"Las plantas también absorben el agua a través de las hojas cuando cae la lluvia. ",
"anim4_4": u"La semilla se produce a través de la unión del polen con el óvulo. Fin de contenido, vuelve al menú. ",
"texto5_2":u"La reproducción es el proceso que permite a los seres vivos crear otros seres vivos semejantes o iguales a ellos mismos. ",
"texto5_3":u"Las plantas se dividen en dos grandes grupos: las plantas con flor y las plantas sin flor. ",
"texto5_4":u"La reproducción de las plantas puede ser asexual, es decir, no posee células sexuales diferenciadas, ello implica que un sólo organismo da origen a un nuevo ser. ",
"texto5_5":u"También, la reproducción de las plantas puede ser sexual, si el nuevo ser se forma por la unión de una célula femenina y una masculina. ",
"texto5_6":u"Vamos a conocer más sobre la reproducción sexual. ",
"anim5_1": u"En todas las plantas se da el proceso de reproducción. ",
"anim5_2": u"En las plantas existen dos tipos de reproducción, ellas llevan por nombre reproducción sexual y reproducción asexual. ",
"anim5_3": u"Las plantas sin flor se reproducen de forma asexual. ",
"anim5_4": u"Las plantas con flor se reproducen de forma sexual. ",
"texto6_2":u"La flor es el órgano reproductor de las plantas donde se realiza la reproducción por semillas, "
u"en el interior de la flor se une una célula sexual femenina y una masculina para formar un fruto "
u"y posteriormente la semilla, luego esa semilla desarrolla una nueva planta. ",
"texto6_3":u"Ahora bien, la germinación es una forma de reproducción que consiste en el desprendimiento "
u"natural o artificial de una semilla que es capaz de crecer hasta formar un nuevo individuo, semejante "
u"a la planta de la cual se desprendió. ",
"texto6_4":u"Hay semillas muy pequeñas como las del pimentón y tomate; hay semillas muy grandes como las de mango y cocotero. ",
"anim6_1": u"La flor está constituida por: pétalo, corola, sépalos, cáliz, pistilos, estambre. "
u"Ahora bien, el pistilo es el órgano sexual femenino y está conformado por: estigma, ovario, óvulo y estilo. "
u"y el estambre es el órgano sexual masculino y está compuesto por: antena y filamento. ",
"anim6_2": u"El proceso de germinación inicia con la semilla, de ella comienzan a salir las raíces, luego que las raíces están bien "
u"formadas inicia el crecimiento del tallo, las ramas y posteriormente el de las hojas. ",
"texto7_2":u"La reproducción asexual se realiza, generalmente, a partir de cualquier órgano de la planta, como los bulbos o los fragmentos de tallos. ",
"texto7_3":u"¿Quieres saber cómo se realiza la reproducción asexual? ¡Vamos a conocerlo! ",
"texto7_4":u"Los humanos recurren al proceso de reproducción asexual para obtener cosechas de mejor calidad y con mayor cantidad. Estos procesos llevan por nombre Acodo e Injerto. ",
"texto7_5":u"Por acodo: consiste en doblar una rama de una planta, enterrarla y cuando tenga raíces separarla de la planta madre. Como ejemplo está la mora. ",
"texto7_6":u"Por injerto: consiste en introducir un fragmento de tallo de una planta a otra planta, ambas de la misma especie, "
u"formando una nueva planta que mantiene las características de las plantas originales. ",
"anim7_1": u"Algunos ejemplos de plantas que se cultivan en Venezuela y se pueden reproducir por acodo son: guanábana, ciruela, níspero. ",
"anim7_2": u"El aguacate es un fruto que se siembra en Venezuela y se puede reproducir por injerto. Fin de contenido, vuelve al menú. ",
"texto8_2":u"La agricultura es el conjunto de técnicas y conocimientos para cultivar la tierra. En ella se engloban los diferentes trabajos de tratamiento del suelo y los cultivos de vegetales. ",
"texto8_3":u"Comprende todo un conjunto de acciones humanas que transforma el ambiente natural, con el fin de hacerlo más apto para el crecimiento de los alimentos que requiere la comunidad. ",
"texto8_4":u"Es una actividad de gran importancia estratégica como base fundamental para la soberanía alimentaria de las naciones. ",
"texto9_2_1": u"Región Capital ",
"texto9_2_1l": u"Región Capital. ",
"texto9_2_2": u"Los productos agrícolas que se cultivan en esta región son: cacao, coco, yuca, cereales, entre otros. ",
"texto9_2_3": u"¿Conoces los nombres de los estados que comprende esta región? Escribe los nombres de estos estados. ",
"texto9_2_4": u"Sabías que... en la región Capital se dan varios tipos de agricultura. ",
"texto9_3_1": u"Región Central ",
"texto9_3_1l": u"Región Central. ",
"texto9_3_2": u"Los productos agrícolas que se cultivan en esta región son: aguacate, café, caraota, cebolla, maíz, pimentón, quinchoncho, arroz, tabaco, papas, cambur, tomate, leguminosa de grano, entre otros. ",
"texto9_3_3": u"¿Has visitado alguno de los estados que comprende esta región? Escribe el nombre del estado que visitaste, cuéntale a tus compañeras y compañeros sobre la agricultura de esta región. ",
"texto9_4_1": u"Región los Llanos ",
"texto9_4_1l": u"Región los Llanos. ",
"texto9_4_2": u"Los productos agrícolas que se cultivan en esta región son: arroz, caraota, frijol, maíz, mango, patilla, tabaco, algodón, sorgo, tomate, cambur, plátano, entre otros. ",
"texto9_4_3": u"El árbol emblemático del estado Apure es el Merecure. ¿Conoces los nombres de los árboles emblemáticos de los estados que conforman esta región? Describe estos árboles y crea un mensaje acerca de la prevención de la deforestación. ",
"texto9_5_1": u"Región Occidental ",
"texto9_5_1l": u"Región Occidental. ",
"texto9_5_2": u"Los productos agrícolas que se cultivan en esta región son: caña de azúcar, coco, maíz, melón, ñame, ocumo, patilla, tomate, sorgo, berenjena, café, frijol, maíz, naranja, palma aceitera, plátano, arroz, algodón, ajonjolí, entre otros. ",
"texto9_5_3": u"Esta región esta constituida por 4 estados. ¿Cuál de estos estados es el más seco de Venezuela? Socializa con tus compañeras y compañeros sobre las características de este estado. ",
"texto9_6_1": u"Región Zuliana ",
"texto9_6_1l": u"Región Zuliana. ",
"texto9_6_2": u"Los productos agrícolas que se cultivan en esta región son: cambur, plátano, caña de azúcar, coco, yuca, algodón, frijol, melón, cacao, maíz, sorgo, entre otros. ",
"texto9_6_3": u"¿Cuál es el lago que se encuentra en esta región? Realiza una composición sobre este lago y compártela con tus compañeras y compañeros. ",
"texto9_7_1": u"Región los Andes ",
"texto9_7_1l": u"Región los Andes. ",
"texto9_7_2": u"Los productos agrícolas que se cultivan en esta región son: café, caña de azúcar, cacao, maíz, papa, ajo, cambur, caraota, cebolla, plátano, tomate, lechuga, apio, coliflor, zanahoria, entre otros. ",
"texto9_7_3": u"¿Cuál de los estados que comprende esta región tiene por capital a San Cristóbal? La región los Andes está constituida por 4 estados, escribe sobre la flora de esta región de Venezuela. ¿Por qué en la región los Andes la agricultura se desarrolla en terrazas? ",
"texto9_8_1": u"Región Nor Oriental ",
"texto9_8_1l": u"Región Nor Oriental. ",
"texto9_8_2": u"Los productos agrícolas que se cultivan en esta región son: maní, maíz, algodón, caña, sorgo, café, cacao, cambur, raíces, tubérculo, caña de azúcar, coco, yuca, entre otros. ",
"texto9_8_3": u"¿Conoces la flora del estado Anzoátegui? Elabora una receta con productos agrícolas cultivados en esta región. ",
"texto9_9_1": u"Región Guayana ",
"texto9_9_1l": u"Región Guayana. ",
"texto9_9_2": u"Los productos agrícolas que se cultivan en esta región son: arroz, maíz, coco, plátano, yuca, piña, palmito, mapuey, ñame, caucho, sarrapia, zarzaparrilla, vainilla, entre otros. ",
"texto9_9_3": u"Guayana es una región particularmente selvática, los alimentos que se cultivan allí requieren de mucha agua. ¿Conoces los ríos más importantes de esta región? Elabora una composición sobre alguno de estos ríos. ",
"texto9_10_1": u"Región Insular ",
"texto9_10_1l": u"Región Insular. ",
"texto9_10_2": u"Los productos agrícolas que se cultivan en esta región son: berenjena, maíz, melón, pimentón y tomate, entre otros. ",
"texto9_10_3": u"¿Conoces cuáles son las islas que constituyen el estado Nueva Esparta? Socializa con tus compañeras y compañeros sobre como se desarrolla la agricultura en esta región. ",
"texto9_10_4": u"¿Cómo se traslada el cultivo de la región Insular a otras regiones de Venezuela? ",
"texto11":u" Pulsa sobre cada botón para que puedas explorar las orientaciones y sugerencias. ",
"texto11_5_1": u"• Socializa con tus compañeras y compañeros las experiencias sobre las actividades desarrolladas en tu hogar en beneficio del cuidado de las áreas verdes ",
"texto11_5_2": u"• Realiza con la ayuda de la o el docente actividades de clasificación de las plantas, tomando en cuenta su tipo de reproducción ",
"texto11_5_3": u"• Elabora germinadores que posteriormente favorezcan a la comunidad ",
"texto11_6_1": u"• Propicia el trabajo en equipo para fortalecer el intercambio de experiencias en cuanto a la clasificación de las plantas ",
"texto11_6_2": u"• Promueve actividades para que las y los estudiantes experimenten y practiquen la siembra de plantas ",
"texto11_6_3": u"• Promueve la recuperación de áreas verdes que impulsen a la conservación del ambiente ",
"texto11_7_1": u"• Desarrolla el conocimiento de las plantas, su reproducción y cuidado en el hogar ",
"texto11_7_2": u"• Contribuye en la participación de la recuperación de áreas verdes en tu comunidad ",
"texto11_7_3": u"• Realiza campañas de concienciación para la conservación de las áreas verdes de tu comunidad ",
"texto11_5_1l": u"Socializa con tus compañeras y compañeros las experiencias sobre las actividades desarrolladas en tu hogar en beneficio del cuidado de las áreas verdes. ",
"texto11_5_2l": u"Realiza con la ayuda de la o el docente actividades de clasificación de las plantas, tomando en cuenta su tipo de reproducción. ",
"texto11_5_3l": u"Elabora germinadores que posteriormente favorezcan a la comunidad. ",
"texto11_6_1l": u"Propicia el trabajo en equipo para fortalecer el intercambio de experiencias en cuanto a la clasificación de las plantas. ",
"texto11_6_2l": u"Promueve actividades para que las y los estudiantes experimenten y practiquen la siembra de plantas. ",
"texto11_6_3l": u"Promueve la recuperación de áreas verdes que impulsen a la conservación del ambiente. ",
"texto11_7_1l": u"Desarrolla el conocimiento de las plantas, su reproducción y cuidado en el hogar. ",
"texto11_7_2l": u"Contribuye en la participación de la recuperación de áreas verdes en tu comunidad. ",
"texto11_7_3l": u"Realiza campañas de concienciación para la conservación de las áreas verdes de tu comunidad. "
}
conceptos = {
"absorber":u" Ejercer atracción sobre un fluido con el que está en contacto, de modo que las moléculas de este "
u"penetren en aquella. ",
"celula":u" Unidad fundamental de los organismos vivos capaz de reproducirse independientemente, de tamaño microscópico y "
u"formada por un citoplasma, núcleo y membrana. ",
"componentes":u" Se trata de elementos que a través de algún tipo de asociación dan lugar a un conjunto uniforme. ",
"fotosintesis":u" Proceso por el cual las plantas verdes, las algas y algunas bacterias utilizan la energía de la luz "
u"para su desarrollo, crecimiento y reproducción. ",
"germinar":u" Comenzar a desarrollarse desde la semilla. ",
"germinacion":u" Acción de germinar. ",
"minerales":u" Elemento químico simple cuya presencia e intervención es imprescindible para la actividad de las células. ",
"nutrientes":u" Producto químico procedente del exterior de la célula que ésta necesita para realizar sus funciones vitales. ",
"organo":u" Cada una de las partes del cuerpo animal o vegetal que ejercen una función. ",
"rasexual":u" Consiste en que a partir de un organismo se desprende una sola célula o trozos del cuerpo, "
u"que por procesos mitóticos, son capaces de formar un individuo completo idéntico a él. ",
"rsexual":u" Proceso mediante el cual se crean nuevos individuos similares a sus progenitores, gracias a la combinación entre sus células sexuales masculinas y femeninas. ",
"transformacion":u" Procedimiento mediante el cual algo se modifica, altera o cambia de forma manteniendo su identidad. ",
"transportar":u" Llevar a alguien o algo de un lugar a otro. "
}
| gpl-3.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.2/django/contrib/localflavor/id/forms.py | 311 | 6834 | """
ID-specific Form helpers
"""
import re
import time
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
postcode_re = re.compile(r'^[1-9]\d{4}$')
phone_re = re.compile(r'^(\+62|0)[2-9]\d{7,10}$')
plate_re = re.compile(r'^(?P<prefix>[A-Z]{1,2}) ' + \
r'(?P<number>\d{1,5})( (?P<suffix>([A-Z]{1,3}|[1-9][0-9]{,2})))?$')
nik_re = re.compile(r'^\d{16}$')
class IDPostCodeField(Field):
"""
An Indonesian post code field.
http://id.wikipedia.org/wiki/Kode_pos
"""
default_error_messages = {
'invalid': _('Enter a valid post code'),
}
def clean(self, value):
super(IDPostCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.strip()
if not postcode_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) < 10110:
raise ValidationError(self.error_messages['invalid'])
# 1xxx0
if value[0] == '1' and value[4] != '0':
raise ValidationError(self.error_messages['invalid'])
return u'%s' % (value, )
class IDProvinceSelect(Select):
"""
A Select widget that uses a list of provinces of Indonesia as its
choices.
"""
def __init__(self, attrs=None):
from id_choices import PROVINCE_CHOICES
super(IDProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class IDPhoneNumberField(Field):
"""
An Indonesian telephone number field.
http://id.wikipedia.org/wiki/Daftar_kode_telepon_di_Indonesia
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
}
def clean(self, value):
super(IDPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
phone_number = re.sub(r'[\-\s\(\)]', '', smart_unicode(value))
if phone_re.search(phone_number):
return smart_unicode(value)
raise ValidationError(self.error_messages['invalid'])
class IDLicensePlatePrefixSelect(Select):
"""
A Select widget that uses a list of vehicle license plate prefix code
of Indonesia as its choices.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
"""
def __init__(self, attrs=None):
from id_choices import LICENSE_PLATE_PREFIX_CHOICES
super(IDLicensePlatePrefixSelect, self).__init__(attrs,
choices=LICENSE_PLATE_PREFIX_CHOICES)
class IDLicensePlateField(Field):
"""
An Indonesian vehicle license plate field.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
Plus: "B 12345 12"
"""
default_error_messages = {
'invalid': _('Enter a valid vehicle license plate number'),
}
def clean(self, value):
super(IDLicensePlateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
plate_number = re.sub(r'\s+', ' ',
smart_unicode(value.strip())).upper()
matches = plate_re.search(plate_number)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure prefix is in the list of known codes.
from id_choices import LICENSE_PLATE_PREFIX_CHOICES
prefix = matches.group('prefix')
if prefix not in [choice[0] for choice in LICENSE_PLATE_PREFIX_CHOICES]:
raise ValidationError(self.error_messages['invalid'])
# Only Jakarta (prefix B) can have 3 letter suffix.
suffix = matches.group('suffix')
if suffix is not None and len(suffix) == 3 and prefix != 'B':
raise ValidationError(self.error_messages['invalid'])
# RI plates don't have suffix.
if prefix == 'RI' and suffix is not None and suffix != '':
raise ValidationError(self.error_messages['invalid'])
# Number can't be zero.
number = matches.group('number')
if number == '0':
raise ValidationError(self.error_messages['invalid'])
# CD, CC and B 12345 12
if len(number) == 5 or prefix in ('CD', 'CC'):
# suffix must be numeric and non-empty
if re.match(r'^\d+$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
# Known codes range is 12-124
if prefix in ('CD', 'CC') and not (12 <= int(number) <= 124):
raise ValidationError(self.error_messages['invalid'])
if len(number) == 5 and not (12 <= int(suffix) <= 124):
raise ValidationError(self.error_messages['invalid'])
else:
# suffix must be non-numeric
if suffix is not None and re.match(r'^[A-Z]{,3}$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
return plate_number
class IDNationalIdentityNumberField(Field):
"""
An Indonesian national identity number (NIK/KTP#) field.
http://id.wikipedia.org/wiki/Nomor_Induk_Kependudukan
xx.xxxx.ddmmyy.xxxx - 16 digits (excl. dots)
"""
default_error_messages = {
'invalid': _('Enter a valid NIK/KTP number'),
}
def clean(self, value):
super(IDNationalIdentityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub(r'[\s.]', '', smart_unicode(value))
if not nik_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'])
def valid_nik_date(year, month, day):
try:
t1 = (int(year), int(month), int(day), 0, 0, 0, 0, 0, -1)
d = time.mktime(t1)
t2 = time.localtime(d)
if t1[:3] != t2[:3]:
return False
else:
return True
except (OverflowError, ValueError):
return False
year = int(value[10:12])
month = int(value[8:10])
day = int(value[6:8])
current_year = time.localtime().tm_year
if year < int(str(current_year)[-2:]):
if not valid_nik_date(2000 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
elif not valid_nik_date(1900 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
if value[:6] == '000000' or value[12:] == '0000':
raise ValidationError(self.error_messages['invalid'])
return '%s.%s.%s.%s' % (value[:2], value[2:6], value[6:12], value[12:])
| bsd-3-clause |
mconlon17/vivo-1.5-improvement | tools/test_find_person.py | 2 | 1130 | """
test_find_person.py -- from a ufid dictionary, find the ufid and return
the URI of the person.
Version 0.1 MC 2013-12-28
-- Initial version. Make a dictionary and make a dictionary with
debug=True
Version 0.2 MC 2014-08-30
-- PEP 8, support for vivopeople
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.2"
from vivopeople import make_ufid_dictionary
from vivopeople import find_person
from datetime import datetime
print datetime.now(), "Start"
print datetime.now(), "Making ufid dictionary"
ufid_dictionary = make_ufid_dictionary(debug=True)
print datetime.now(), "ufid dictionary has ", len(ufid_dictionary), "entries"
ufids = \
[
"02001000",
"57000000",
"80147616",
"33100000",
"16010000",
"16020000",
"60100000",
"84808900",
"27000000",
"11040000"
]
for ufid in ufids:
[found, uri] = find_person(ufid, ufid_dictionary)
print str(found).ljust(5), ufid, uri
print datetime.now(), "Finished"
| bsd-3-clause |
ToxicFrog/lancow | madcow/modules/djmemebot.py | 3 | 4591 | """Watch URLs in channel, punish people for living under a rock"""
import random
import sys
import re
import os
from madcow.conf import settings
from madcow.util import Module
from madcow.util.text import encode, decode
DEFAULT_INSULTS = ['OLD MEME ALERT!',
'omg, SO OLD!',
'Welcome to yesterday.',
'been there, done that.',
'you missed the mememobile.',
'oldest. meme. EVAR.',
'jesus christ you suck.',
'you need a new memesource, bucko.',
'that was funny the first time i saw it.',
'new to the internet?',
'i think that came installed with the internet.',
'are you serious?',
'CHOO!! CHOO!! ALL ABOARD THE OLD MEME TRAIN!']
url_re = re.compile(r'(https?://\S+)', re.IGNORECASE)
score_request_re = re.compile(r'^\s*score(?:(?:\s+|[:-]+\s*)(\S+?)(?:\s*-\s*(\S+))?)?\s*$', re.I)
class Memebot(object):
"""Interface to django-memebot"""
def __init__(self, settings_file, logger, insults=None):
if insults is None:
insults = DEFAULT_INSULTS
self.insults = insults
self.log = logger
settings_file = os.path.realpath(settings_file)
project_dir, settings_filename = os.path.split(settings_file)
settings_name = os.path.splitext(settings_filename)[0]
install_dir, project_name = os.path.split(project_dir)
for package_dir in project_dir, install_dir:
while package_dir in sys.path:
sys.path.remove(package_dir)
sys.path.insert(0, package_dir)
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name)
sys.dont_write_bytecode
import django.db
def get_scores(self, range=None, name=None):
from gruntle.memebot.models import UserProfile
if name is not None:
name = name.lower()
profiles = [(i + 1, profile) for i, profile in enumerate(UserProfile.objects.get_by_score())
if (range is not None and i >= range[0] and i <= range[1]) or
(profile.user.username.lower() == name)]
return u', '.join(u'#%d: %s (%d)' % (rank, profile.user.username, profile.score) for rank, profile in profiles)
def process_url(self, url, nick, source_name):
from gruntle.memebot.models import Link
from gruntle.memebot.exceptions import OldMeme, BlackListError
try:
link = Link.objects.add_link(url, nick.lower(), source_name.lower(), settings.PROTOCOL)
self.log.info('%s posted a link to %s: %r', nick, source_name, link)
except OldMeme, exc:
return '%s First posted by %s on %s' % (random.choice(self.insults),
exc.link.user.username,
exc.link.created.ctime())
except BlackListError, exc:
self.log.warn('%s posted a link to %s that was blacklisted: %s', nick, source_name, exc)
class MemebotModule(Module):
"""Madcow module for memebot"""
pattern = Module._any
allow_threading = False
priority = 10
terminate = False
require_addressing = False
help = 'score [name | x - y] - get memescore'
def __init__(self, *args, **kwargs):
super(Main, self).__init__(*args, **kwargs)
self.memebot = Memebot(settings.DJMEMEBOT_SETTINGS_FILE, logger=self.log, insults=settings.OLD_MEME_INSULTS)
def response(self, nick, args, kwargs):
message = encode(args[0])
if kwargs['addressed']:
match = score_request_re.search(message)
if match is not None:
start, end = match.groups()
# asking for a username
if end is None and start is not None:
return self.memebot.get_scores(name=start)
start = ((int(start) if start is not None and start.isdigit() else None) or 1) - 1
end = ((int(end) if end is not None and end.isdigit() else None) or start + 10) - 1
if start < 0:
start = 0
if end < start:
end = start
return self.memebot.get_scores(range=(start, end))
match = url_re.search(message)
if match is not None:
url = decode(match.group(1))
return self.memebot.process_url(url, nick, kwargs['channel'])
Main = MemebotModule
| gpl-3.0 |
fernandezcuesta/ansible | lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py | 18 | 15963 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_taskdefinition
short_description: register a task definition in ecs
description:
- Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS)
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ json, boto, botocore, boto3 ]
options:
state:
description:
- State whether the task definition should exist or be deleted
required: true
choices: ['present', 'absent']
arn:
description:
- The arn of the task description to delete
required: false
family:
description:
- A Name that would be given to the task definition
required: false
revision:
description:
- A revision number for the task definition
required: False
containers:
description:
- A list of containers definitions
required: False
network_mode:
description:
- The Docker networking mode to use for the containers in the task.
required: false
default: bridge
choices: [ 'bridge', 'host', 'none' ]
version_added: 2.3
task_role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
the permissions that are specified in this role.
required: false
version_added: 2.3
volumes:
description:
- A list of names of volumes to be attached
required: False
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create task definition
ecs_taskdefinition:
containers:
- name: simple-app
cpu: 10
essential: true
image: "httpd:2.4"
memory: 300
mountPoints:
- containerPath: /usr/local/apache2/htdocs
sourceVolume: my-vol
portMappings:
- containerPort: 80
hostPort: 80
- name: busybox
command:
- >
/bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
</h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
cpu: 10
entryPoint:
- sh
- "-c"
essential: false
image: busybox
memory: 200
volumesFrom:
- sourceContainer: simple-app
volumes:
- name: my-vol
family: test-cluster-taskdef
state: present
register: task_output
'''
RETURN = '''
taskdefinition:
description: a reflection of the input parameters
type: dict
returned: always
'''
try:
import boto
import botocore
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
class EcsTaskManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Can't authorize connection - " % str(e))
def describe_task(self, task_name):
try:
response = self.ecs.describe_task_definition(taskDefinition=task_name)
return response['taskDefinition']
except botocore.exceptions.ClientError:
return None
def register_task(self, family, task_role_arn, network_mode, container_definitions, volumes):
validated_containers = []
# Ensures the number parameters are int as required by boto
for container in container_definitions:
for param in ('memory', 'cpu', 'memoryReservation'):
if param in container:
container[param] = int(container[param])
if 'portMappings' in container:
for port_mapping in container['portMappings']:
for port in ('hostPort', 'containerPort'):
if port in port_mapping:
port_mapping[port] = int(port_mapping[port])
validated_containers.append(container)
try:
response = self.ecs.register_task_definition(family=family,
taskRoleArn=task_role_arn,
networkMode=network_mode,
containerDefinitions=container_definitions,
volumes=volumes)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
return response['taskDefinition']
def describe_task_definitions(self, family):
data = {
"taskDefinitionArns": [],
"nextToken": None
}
def fetch():
# Boto3 is weird about params passed, so only pass nextToken if we have a value
params = {
'familyPrefix': family
}
if data['nextToken']:
params['nextToken'] = data['nextToken']
result = self.ecs.list_task_definitions(**params)
data['taskDefinitionArns'] += result['taskDefinitionArns']
data['nextToken'] = result.get('nextToken', None)
return data['nextToken'] is not None
# Fetch all the arns, possibly across multiple pages
while fetch():
pass
# Return the full descriptions of the task definitions, sorted ascending by revision
return list(
sorted(
[self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
key=lambda td: td['revision']
)
)
def deregister_task(self, taskArn):
response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
return response['taskDefinition']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
arn=dict(required=False, type='str'),
family=dict(required=False, type='str'),
revision=dict(required=False, type='int'),
containers=dict(required=False, type='list'),
network_mode=dict(required=False, default='bridge', choices=['bridge', 'host', 'none'], type='str'),
task_role_arn=dict(required=False, default='', type='str'),
volumes=dict(required=False, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
task_to_describe = None
task_mgr = EcsTaskManager(module)
results = dict(changed=False)
for container in module.params['containers']:
if 'environment' in container:
for environment in container['environment']:
environment['value'] = str(environment['value'])
if module.params['state'] == 'present':
if 'containers' not in module.params or not module.params['containers']:
module.fail_json(msg="To use task definitions, a list of containers must be specified")
if 'family' not in module.params or not module.params['family']:
module.fail_json(msg="To use task definitions, a family must be specified")
family = module.params['family']
existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
if 'revision' in module.params and module.params['revision']:
# The definition specifies revision. We must gurantee that an active revision of that number will result from this.
revision = int(module.params['revision'])
# A revision has been explicitly specified. Attempt to locate a matching revision
tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
if existing and existing['status'] != "ACTIVE":
# We cannot reactivate an inactive revision
module.fail_json(msg="A task in family '%s' already exists for revsion %d, but it is inactive" % (family, revision))
elif not existing:
if not existing_definitions_in_family and revision != 1:
module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
(revision, existing_definitions_in_family[-1]['revision'] + 1))
else:
existing = None
def _right_has_values_of_left(left, right):
# Make sure the values are equivalent for everything left has
for k, v in left.items():
if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
# We don't care about list ordering because ECS can change things
if isinstance(v, list) and k in right:
left_list = v
right_list = right[k] or []
if len(left_list) != len(right_list):
return False
for list_val in left_list:
if list_val not in right_list:
return False
else:
return False
# Make sure right doesn't have anything that left doesn't
for k, v in right.items():
if v and k not in left:
return False
return True
def _task_definition_matches(requested_volumes, requested_containers, existing_task_definition):
if td['status'] != "ACTIVE":
return None
existing_volumes = td.get('volumes', []) or []
if len(requested_volumes) != len(existing_volumes):
# Nope.
return None
if len(requested_volumes) > 0:
for requested_vol in requested_volumes:
found = False
for actual_vol in existing_volumes:
if _right_has_values_of_left(requested_vol, actual_vol):
found = True
break
if not found:
return None
existing_containers = td.get('containerDefinitions', []) or []
if len(requested_containers) != len(existing_containers):
# Nope.
return None
for requested_container in requested_containers:
found = False
for actual_container in existing_containers:
if _right_has_values_of_left(requested_container, actual_container):
found = True
break
if not found:
return None
return existing_task_definition
# No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
for td in existing_definitions_in_family:
requested_volumes = module.params.get('volumes', []) or []
requested_containers = module.params.get('containers', []) or []
existing = _task_definition_matches(requested_volumes, requested_containers, td)
if existing:
break
if existing:
# Awesome. Have an existing one. Nothing to do.
results['taskdefinition'] = existing
else:
if not module.check_mode:
# Doesn't exist. create it.
volumes = module.params.get('volumes', []) or []
results['taskdefinition'] = task_mgr.register_task(module.params['family'],
module.params['task_role_arn'],
module.params['network_mode'],
module.params['containers'],
volumes)
results['changed'] = True
elif module.params['state'] == 'absent':
# When de-registering a task definition, we can specify the ARN OR the family and revision.
if module.params['state'] == 'absent':
if 'arn' in module.params and module.params['arn'] is not None:
task_to_describe = module.params['arn']
elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
module.params['revision'] is not None:
task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
else:
module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
existing = task_mgr.describe_task(task_to_describe)
if not existing:
pass
else:
# It exists, so we should delete it and mark changed. Return info about the task definition deleted
results['taskdefinition'] = existing
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
task_mgr.deregister_task(task_to_describe)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
crossroadchurch/paul | tests/interfaces/openlp_core_ui/test_servicemanager.py | 1 | 19346 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Package to test the openlp.core.lib package.
"""
from unittest import TestCase
from openlp.core.common import Registry
from openlp.core.lib import ScreenList, ServiceItem, ItemCapabilities
from openlp.core.ui.mainwindow import MainWindow
from tests.interfaces import MagicMock, patch
from tests.helpers.testmixin import TestMixin
class TestServiceManager(TestCase, TestMixin):
def setUp(self):
"""
Create the UI
"""
Registry.create()
self.setup_application()
ScreenList.create(self.app.desktop())
Registry().register('application', MagicMock())
# Mock classes and methods used by mainwindow.
with patch('openlp.core.ui.mainwindow.SettingsForm') as mocked_settings_form, \
patch('openlp.core.ui.mainwindow.ImageManager') as mocked_image_manager, \
patch('openlp.core.ui.mainwindow.LiveController') as mocked_live_controller, \
patch('openlp.core.ui.mainwindow.PreviewController') as mocked_preview_controller, \
patch('openlp.core.ui.mainwindow.OpenLPDockWidget') as mocked_dock_widget, \
patch('openlp.core.ui.mainwindow.QtGui.QToolBox') as mocked_q_tool_box_class, \
patch('openlp.core.ui.mainwindow.QtGui.QMainWindow.addDockWidget') as mocked_add_dock_method, \
patch('openlp.core.ui.mainwindow.ThemeManager') as mocked_theme_manager, \
patch('openlp.core.ui.mainwindow.ProjectorManager') as mocked_projector_manager, \
patch('openlp.core.ui.mainwindow.Renderer') as mocked_renderer:
self.main_window = MainWindow()
self.service_manager = Registry().get('service_manager')
def tearDown(self):
"""
Delete all the C++ objects at the end so that we don't have a segfault
"""
del self.main_window
def basic_service_manager_test(self):
"""
Test the Service Manager UI Functionality
"""
# GIVEN: A New Service Manager instance
# WHEN I have set up the display
self.service_manager.setup_ui(self.service_manager)
# THEN the count of items should be zero
self.assertEqual(self.service_manager.service_manager_list.topLevelItemCount(), 0,
'The service manager list should be empty ')
def default_context_menu_test(self):
"""
Test the context_menu() method with a default service item
"""
# GIVEN: A service item added
self.service_manager.setup_ui(self.service_manager)
with patch('PyQt4.QtGui.QTreeWidget.itemAt') as mocked_item_at_method, \
patch('PyQt4.QtGui.QWidget.mapToGlobal'), \
patch('PyQt4.QtGui.QMenu.exec_'):
mocked_item = MagicMock()
mocked_item.parent.return_value = None
mocked_item_at_method.return_value = mocked_item
# We want 1 to be returned for the position
mocked_item.data.return_value = 1
# A service item without capabilities.
service_item = ServiceItem()
self.service_manager.service_items = [{'service_item': service_item}]
q_point = None
# Mocked actions.
self.service_manager.edit_action.setVisible = MagicMock()
self.service_manager.create_custom_action.setVisible = MagicMock()
self.service_manager.maintain_action.setVisible = MagicMock()
self.service_manager.notes_action.setVisible = MagicMock()
self.service_manager.time_action.setVisible = MagicMock()
self.service_manager.auto_start_action.setVisible = MagicMock()
# WHEN: Show the context menu.
self.service_manager.context_menu(q_point)
# THEN: The following actions should be not visible.
self.service_manager.edit_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.create_custom_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.maintain_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.notes_action.setVisible.assert_called_with(True), 'The action should be set visible.'
self.service_manager.time_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.auto_start_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
def edit_context_menu_test(self):
"""
Test the context_menu() method with a edit service item
"""
# GIVEN: A service item added
self.service_manager.setup_ui(self.service_manager)
with patch('PyQt4.QtGui.QTreeWidget.itemAt') as mocked_item_at_method, \
patch('PyQt4.QtGui.QWidget.mapToGlobal'), \
patch('PyQt4.QtGui.QMenu.exec_'):
mocked_item = MagicMock()
mocked_item.parent.return_value = None
mocked_item_at_method.return_value = mocked_item
# We want 1 to be returned for the position
mocked_item.data.return_value = 1
# A service item without capabilities.
service_item = ServiceItem()
service_item.add_capability(ItemCapabilities.CanEdit)
service_item.edit_id = 1
self.service_manager.service_items = [{'service_item': service_item}]
q_point = None
# Mocked actions.
self.service_manager.edit_action.setVisible = MagicMock()
self.service_manager.create_custom_action.setVisible = MagicMock()
self.service_manager.maintain_action.setVisible = MagicMock()
self.service_manager.notes_action.setVisible = MagicMock()
self.service_manager.time_action.setVisible = MagicMock()
self.service_manager.auto_start_action.setVisible = MagicMock()
# WHEN: Show the context menu.
self.service_manager.context_menu(q_point)
# THEN: The following actions should be not visible.
self.service_manager.edit_action.setVisible.assert_called_with(True), \
'The action should be set visible.'
self.service_manager.create_custom_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.maintain_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.notes_action.setVisible.assert_called_with(True), 'The action should be set visible.'
self.service_manager.time_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.auto_start_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
def maintain_context_menu_test(self):
"""
Test the context_menu() method with a maintain
"""
# GIVEN: A service item added
self.service_manager.setup_ui(self.service_manager)
with patch('PyQt4.QtGui.QTreeWidget.itemAt') as mocked_item_at_method, \
patch('PyQt4.QtGui.QWidget.mapToGlobal'), \
patch('PyQt4.QtGui.QMenu.exec_'):
mocked_item = MagicMock()
mocked_item.parent.return_value = None
mocked_item_at_method.return_value = mocked_item
# We want 1 to be returned for the position
mocked_item.data.return_value = 1
# A service item without capabilities.
service_item = ServiceItem()
service_item.add_capability(ItemCapabilities.CanMaintain)
self.service_manager.service_items = [{'service_item': service_item}]
q_point = None
# Mocked actions.
self.service_manager.edit_action.setVisible = MagicMock()
self.service_manager.create_custom_action.setVisible = MagicMock()
self.service_manager.maintain_action.setVisible = MagicMock()
self.service_manager.notes_action.setVisible = MagicMock()
self.service_manager.time_action.setVisible = MagicMock()
self.service_manager.auto_start_action.setVisible = MagicMock()
# WHEN: Show the context menu.
self.service_manager.context_menu(q_point)
# THEN: The following actions should be not visible.
self.service_manager.edit_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.create_custom_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.maintain_action.setVisible.assert_called_with(True), \
'The action should be set visible.'
self.service_manager.notes_action.setVisible.assert_called_with(True), 'The action should be set visible.'
self.service_manager.time_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.auto_start_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
def loopy_context_menu_test(self):
"""
Test the context_menu() method with a loop
"""
# GIVEN: A service item added
self.service_manager.setup_ui(self.service_manager)
with patch('PyQt4.QtGui.QTreeWidget.itemAt') as mocked_item_at_method, \
patch('PyQt4.QtGui.QWidget.mapToGlobal'), \
patch('PyQt4.QtGui.QMenu.exec_'):
mocked_item = MagicMock()
mocked_item.parent.return_value = None
mocked_item_at_method.return_value = mocked_item
# We want 1 to be returned for the position
mocked_item.data.return_value = 1
# A service item without capabilities.
service_item = ServiceItem()
service_item.add_capability(ItemCapabilities.CanLoop)
service_item._raw_frames.append("One")
service_item._raw_frames.append("Two")
self.service_manager.service_items = [{'service_item': service_item}]
q_point = None
# Mocked actions.
self.service_manager.edit_action.setVisible = MagicMock()
self.service_manager.create_custom_action.setVisible = MagicMock()
self.service_manager.maintain_action.setVisible = MagicMock()
self.service_manager.notes_action.setVisible = MagicMock()
self.service_manager.time_action.setVisible = MagicMock()
self.service_manager.auto_start_action.setVisible = MagicMock()
# WHEN: Show the context menu.
self.service_manager.context_menu(q_point)
# THEN: The following actions should be not visible.
self.service_manager.edit_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.create_custom_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.maintain_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.notes_action.setVisible.assert_called_with(True), 'The action should be set visible.'
self.service_manager.time_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.auto_start_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
def start_time_context_menu_test(self):
"""
Test the context_menu() method with a start time
"""
# GIVEN: A service item added
self.service_manager.setup_ui(self.service_manager)
with patch('PyQt4.QtGui.QTreeWidget.itemAt') as mocked_item_at_method, \
patch('PyQt4.QtGui.QWidget.mapToGlobal'), \
patch('PyQt4.QtGui.QMenu.exec_'):
mocked_item = MagicMock()
mocked_item.parent.return_value = None
mocked_item_at_method.return_value = mocked_item
# We want 1 to be returned for the position
mocked_item.data.return_value = 1
# A service item without capabilities.
service_item = ServiceItem()
service_item.add_capability(ItemCapabilities.HasVariableStartTime)
self.service_manager.service_items = [{'service_item': service_item}]
q_point = None
# Mocked actions.
self.service_manager.edit_action.setVisible = MagicMock()
self.service_manager.create_custom_action.setVisible = MagicMock()
self.service_manager.maintain_action.setVisible = MagicMock()
self.service_manager.notes_action.setVisible = MagicMock()
self.service_manager.time_action.setVisible = MagicMock()
self.service_manager.auto_start_action.setVisible = MagicMock()
# WHEN: Show the context menu.
self.service_manager.context_menu(q_point)
# THEN: The following actions should be not visible.
self.service_manager.edit_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.create_custom_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.maintain_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.notes_action.setVisible.assert_called_with(True), 'The action should be set visible.'
self.service_manager.time_action.setVisible.assert_called_with(True), \
'The action should be set visible.'
self.service_manager.auto_start_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
def auto_start_context_menu_test(self):
"""
Test the context_menu() method with can auto start
"""
# GIVEN: A service item added
self.service_manager.setup_ui(self.service_manager)
with patch('PyQt4.QtGui.QTreeWidget.itemAt') as mocked_item_at_method, \
patch('PyQt4.QtGui.QWidget.mapToGlobal'), \
patch('PyQt4.QtGui.QMenu.exec_'):
mocked_item = MagicMock()
mocked_item.parent.return_value = None
mocked_item_at_method.return_value = mocked_item
# We want 1 to be returned for the position
mocked_item.data.return_value = 1
# A service item without capabilities.
service_item = ServiceItem()
service_item.add_capability(ItemCapabilities.CanAutoStartForLive)
self.service_manager.service_items = [{'service_item': service_item}]
q_point = None
# Mocked actions.
self.service_manager.edit_action.setVisible = MagicMock()
self.service_manager.create_custom_action.setVisible = MagicMock()
self.service_manager.maintain_action.setVisible = MagicMock()
self.service_manager.notes_action.setVisible = MagicMock()
self.service_manager.time_action.setVisible = MagicMock()
self.service_manager.auto_start_action.setVisible = MagicMock()
self.service_manager.rename_action.setVisible = MagicMock()
# WHEN: Show the context menu.
self.service_manager.context_menu(q_point)
# THEN: The following actions should be not visible.
self.service_manager.edit_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.create_custom_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.maintain_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.notes_action.setVisible.assert_called_with(True), 'The action should be set visible.'
self.service_manager.time_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
self.service_manager.auto_start_action.setVisible.assert_called_with(True), \
'The action should be set visible.'
self.service_manager.rename_action.setVisible.assert_called_once_with(False), \
'The action should be set invisible.'
def click_on_new_service_test(self):
"""
Test the on_new_service event handler is called by the UI
"""
# GIVEN: An initial form
mocked_event = MagicMock()
self.service_manager.on_new_service_clicked = mocked_event
self.service_manager.setup_ui(self.service_manager)
# WHEN displaying the UI and pressing cancel
new_service = self.service_manager.toolbar.actions['newService']
new_service.trigger()
assert mocked_event.call_count == 1, 'The on_new_service_clicked method should have been called once'
| gpl-2.0 |
foss-transportationmodeling/rettina-server | flask/lib/python2.7/ntpath.py | 127 | 18457 | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
try:
# The genericpath.isdir implementation uses os.stat and checks the mode
# attribute to tell whether or not the path is a directory.
# This is overkill on Windows - just pass the path to GetFileAttributes
# and check the attribute from there.
from nt import _isdir as isdir
except ImportError:
# Use genericpath.isdir as imported above.
pass
| apache-2.0 |
jelly/calibre | src/calibre/utils/text2int.py | 1 | 2235 | #!/usr/bin/env python2
__author__ = "stackoverflow community"
__docformat__ = 'restructuredtext en'
"""
Takes english numeric words and converts them to integers.
Returns False if the word isn't a number.
implementation courtesy of the stackoverflow community:
http://stackoverflow.com/questions/493174/is-there-a-way-to-convert-number-words-to-integers-python
"""
import re
numwords = {}
def text2int(textnum):
if not numwords:
units = ["zero", "one", "two", "three", "four", "five", "six",
"seven", "eight", "nine", "ten", "eleven", "twelve",
"thirteen", "fourteen", "fifteen", "sixteen", "seventeen",
"eighteen", "nineteen"]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty",
"seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion",
'quadrillion', 'quintillion', 'sexillion', 'septillion',
'octillion', 'nonillion', 'decillion']
numwords["and"] = (1, 0)
for idx, word in enumerate(units):
numwords[word] = (1, idx)
for idx, word in enumerate(tens):
numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales):
numwords[word] = (10 ** (idx * 3 or 2), 0)
ordinal_words = {'first':1, 'second':2, 'third':3, 'fifth':5,
'eighth':8, 'ninth':9, 'twelfth':12}
ordinal_endings = [('ieth', 'y'), ('th', '')]
current = result = 0
tokens = re.split(r"[\s-]+", textnum)
for word in tokens:
if word in ordinal_words:
scale, increment = (1, ordinal_words[word])
else:
for ending, replacement in ordinal_endings:
if word.endswith(ending):
word = "%s%s" % (word[:-len(ending)], replacement)
if word not in numwords:
# raise Exception("Illegal word: " + word)
return False
scale, increment = numwords[word]
if scale > 1:
current = max(1, current)
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
| gpl-3.0 |
dirtycold/git-cola | extras/qtpy/qtpy/_patch/qcombobox.py | 3 | 4140 | # The code below, as well as the associated test were adapted from
# qt-helpers, which was released under a 3-Clause BSD license:
#
# Copyright (c) 2015, Chris Beaumont and Thomas Robitaille
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Glue project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def patch_qcombobox(QComboBox):
"""
In PySide, using Python objects as userData in QComboBox causes
Segmentation faults under certain conditions. Even in cases where it
doesn't, findData does not work correctly. Likewise, findData also does not
work correctly with Python objects when using PyQt4. On the other hand,
PyQt5 deals with this case correctly. We therefore patch QComboBox when
using PyQt4 and PySide to avoid issues.
"""
from qtpy.QtGui import QIcon
from qtpy.QtCore import Qt, QObject
class userDataWrapper():
"""
This class is used to wrap any userData object. If we don't do this,
then certain types of objects can cause segmentation faults or issues
depending on whether/how __getitem__ is defined.
"""
def __init__(self, data):
self.data = data
_addItem = QComboBox.addItem
def addItem(self, *args, **kwargs):
if len(args) == 3 or (not isinstance(args[0], QIcon)
and len(args) == 2):
args, kwargs['userData'] = args[:-1], args[-1]
if 'userData' in kwargs:
kwargs['userData'] = userDataWrapper(kwargs['userData'])
_addItem(self, *args, **kwargs)
_insertItem = QComboBox.insertItem
def insertItem(self, *args, **kwargs):
if len(args) == 4 or (not isinstance(args[1], QIcon)
and len(args) == 3):
args, kwargs['userData'] = args[:-1], args[-1]
if 'userData' in kwargs:
kwargs['userData'] = userDataWrapper(kwargs['userData'])
_insertItem(self, *args, **kwargs)
_setItemData = QComboBox.setItemData
def setItemData(self, index, value, role=Qt.UserRole):
value = userDataWrapper(value)
_setItemData(self, index, value, role=role)
_itemData = QComboBox.itemData
def itemData(self, index, role=Qt.UserRole):
userData = _itemData(self, index, role=role)
if isinstance(userData, userDataWrapper):
userData = userData.data
return userData
def findData(self, value):
for i in range(self.count()):
if self.itemData(i) == value:
return i
return -1
QComboBox.addItem = addItem
QComboBox.insertItem = insertItem
QComboBox.setItemData = setItemData
QComboBox.itemData = itemData
QComboBox.findData = findData | gpl-2.0 |
ganugapav/pipe | setup.py | 4 | 1923 | # -*- coding: utf-8 -*-
import sys
import re
from os import path as p
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
def read(filename, parent=None):
parent = (parent or __file__)
try:
with open(p.join(p.dirname(parent), filename)) as f:
return f.read()
except IOError:
return ''
def parse_requirements(filename, parent=None, dep=False):
parent = (parent or __file__)
filepath = p.join(p.dirname(parent), filename)
content = read(filename, parent)
for line_number, line in enumerate(content.splitlines(), 1):
candidate = line.strip()
if candidate.startswith('-r'):
for item in parse_requirements(candidate[2:].strip(), filepath, dep):
yield item
elif not dep and '#egg=' in candidate:
yield re.sub('.*#egg=(.*)-(.*)', r'\1==\2', candidate)
else:
yield candidate.replace('-e ', '')
# Avoid byte-compiling the shipped template
sys.dont_write_bytecode = True
setup(
name='pipe2py',
version='0.23.0',
description=(
'A project to compile Yahoo! Pipes into Python. '
'The pipe2py package can compile a Yahoo! Pipe into pure Python source'
' code, or it can interpret the pipe on-the-fly. It supports embedded '
'pipes too.'
),
long_description=read('README.rst'),
url='http://ggaughan.github.com/pipe2py/',
license = 'GPL2',
author='Greg Gaughan',
author_email='[email protected]',
packages=find_packages(exclude=['tests']),
package_data={'templates': 'templates/*.txt'},
include_package_data=True,
classifiers=[],
keywords='',
scripts=[p.join('bin', 'compile')],
install_requires=parse_requirements('requirements.txt'),
dependency_links=list(parse_requirements('requirements.txt', dep=True)),
)
| gpl-2.0 |
linked67/p2pool-phicoin | SOAPpy/Config.py | 289 | 7622 | """
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Config.py 1298 2006-11-07 00:54:15Z sanxiyn $'
from version import __version__
import socket
from types import *
from NS import NS
################################################################################
# Configuration class
################################################################################
class SOAPConfig:
__readonly = ('SSLserver', 'SSLclient', 'GSIserver', 'GSIclient')
class SSLconfig:
__slots__ = ('key_file', 'cert_file')
key_file = None
cert_file = None
def __init__(self, config = None, **kw):
d = self.__dict__
if config:
if not isinstance(config, SOAPConfig):
raise AttributeError, \
"initializer must be SOAPConfig instance"
s = config.__dict__
for k, v in s.items():
if k[0] != '_':
d[k] = v
else:
# Setting debug also sets returnFaultInfo,
# dumpHeadersIn, dumpHeadersOut, dumpSOAPIn, and dumpSOAPOut
self.debug = 0
self.dumpFaultInfo = 1
# Setting namespaceStyle sets typesNamespace, typesNamespaceURI,
# schemaNamespace, and schemaNamespaceURI
self.namespaceStyle = '1999'
self.strictNamespaces = 0
self.typed = 1
self.buildWithNamespacePrefix = 1
self.returnAllAttrs = 0
# Strict checking of range for floats and doubles
self.strict_range = 0
# Default encoding for dictionary keys
self.dict_encoding = 'ascii'
# New argument name handling mechanism. See
# README.MethodParameterNaming for details
self.specialArgs = 1
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
self.unwrap_results = 1
# Automatically convert SOAP complex types, and
# (recursively) public contents into the corresponding
# python types. (Private subobjects have names that start
# with '_'.)
#
# Conversions:
# - faultType --> raise python exception
# - arrayType --> array
# - compoundType --> dictionary
#
self.simplify_objects = 0
# Per-class authorization method. If this is set, before
# calling a any class method, the specified authorization
# method will be called. If it returns 1, the method call
# will proceed, otherwise the call will throw with an
# authorization error.
self.authMethod = None
# Globus Support if pyGlobus.io available
try:
from pyGlobus import io;
d['GSIserver'] = 1
d['GSIclient'] = 1
except:
d['GSIserver'] = 0
d['GSIclient'] = 0
# Server SSL support if M2Crypto.SSL available
try:
from M2Crypto import SSL
d['SSLserver'] = 1
except:
d['SSLserver'] = 0
# Client SSL support if socket.ssl available
try:
from socket import ssl
d['SSLclient'] = 1
except:
d['SSLclient'] = 0
# Cert support
if d['SSLclient'] or d['SSLserver']:
d['SSL'] = self.SSLconfig()
for k, v in kw.items():
if k[0] != '_':
setattr(self, k, v)
def __setattr__(self, name, value):
if name in self.__readonly:
raise AttributeError, "readonly configuration setting"
d = self.__dict__
if name in ('typesNamespace', 'typesNamespaceURI',
'schemaNamespace', 'schemaNamespaceURI'):
if name[-3:] == 'URI':
base, uri = name[:-3], 1
else:
base, uri = name, 0
if type(value) == StringType:
if NS.NSMAP.has_key(value):
n = (value, NS.NSMAP[value])
elif NS.NSMAP_R.has_key(value):
n = (NS.NSMAP_R[value], value)
else:
raise AttributeError, "unknown namespace"
elif type(value) in (ListType, TupleType):
if uri:
n = (value[1], value[0])
else:
n = (value[0], value[1])
else:
raise AttributeError, "unknown namespace type"
d[base], d[base + 'URI'] = n
try:
d['namespaceStyle'] = \
NS.STMAP_R[(d['typesNamespace'], d['schemaNamespace'])]
except:
d['namespaceStyle'] = ''
elif name == 'namespaceStyle':
value = str(value)
if not NS.STMAP.has_key(value):
raise AttributeError, "unknown namespace style"
d[name] = value
n = d['typesNamespace'] = NS.STMAP[value][0]
d['typesNamespaceURI'] = NS.NSMAP[n]
n = d['schemaNamespace'] = NS.STMAP[value][1]
d['schemaNamespaceURI'] = NS.NSMAP[n]
elif name == 'debug':
d[name] = \
d['returnFaultInfo'] = \
d['dumpHeadersIn'] = \
d['dumpHeadersOut'] = \
d['dumpSOAPIn'] = \
d['dumpSOAPOut'] = value
else:
d[name] = value
Config = SOAPConfig()
| gpl-3.0 |
acuros/heking | jinja2/tests.py | 638 | 3444 | # -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, mapping_types
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, mapping_types)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, (int, float, complex))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'mapping': test_mapping,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped
}
| apache-2.0 |
ekwoodrich/nirha | nirhalib/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py | 714 | 4741 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| apache-2.0 |
qtekfun/htcDesire820Kernel | external/chromium_org/tools/deep_memory_profiler/dmprof.py | 93 | 2471 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The Deep Memory Profiler analyzer script.
See http://dev.chromium.org/developers/deep-memory-profiler for details.
"""
import logging
import sys
from lib.exceptions import ParsingException
import subcommands
LOGGER = logging.getLogger('dmprof')
def main():
COMMANDS = {
'buckets': subcommands.BucketsCommand,
'cat': subcommands.CatCommand,
'csv': subcommands.CSVCommand,
'expand': subcommands.ExpandCommand,
'json': subcommands.JSONCommand,
'list': subcommands.ListCommand,
'map': subcommands.MapCommand,
'pprof': subcommands.PProfCommand,
'stacktrace': subcommands.StacktraceCommand,
'upload': subcommands.UploadCommand,
}
if len(sys.argv) < 2 or (not sys.argv[1] in COMMANDS):
sys.stderr.write("""Usage: dmprof <command> [options] [<args>]
Commands:
buckets Dump a bucket list with resolving symbols
cat Categorize memory usage (under development)
csv Classify memory usage in CSV
expand Show all stacktraces contained in the specified component
json Classify memory usage in JSON
list Classify memory usage in simple listing format
map Show history of mapped regions
pprof Format the profile dump so that it can be processed by pprof
stacktrace Convert runtime addresses to symbol names
upload Upload dumped files
Quick Reference:
dmprof buckets <first-dump>
dmprof cat <first-dump>
dmprof csv [-p POLICY] <first-dump>
dmprof expand <dump> <policy> <component> <depth>
dmprof json [-p POLICY] <first-dump>
dmprof list [-p POLICY] <first-dump>
dmprof map <first-dump> <policy>
dmprof pprof [-c COMPONENT] <dump> <policy>
dmprof stacktrace <dump>
dmprof upload [--gsutil path/to/gsutil] <first-dump> <destination-gs-path>
""")
sys.exit(1)
action = sys.argv.pop(1)
LOGGER.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
try:
errorcode = COMMANDS[action]().do(sys.argv)
except ParsingException, e:
errorcode = 1
sys.stderr.write('Exit by parsing error: %s\n' % e)
return errorcode
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
isyippee/nova | nova/api/openstack/compute/legacy_v2/servers.py | 4 | 48830 | # Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import re
import sys
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import netutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova import objects
from nova import policy
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the'
' relevant server API calls such as create, rebuild'
' or rescue, If the hypervisor does not support'
' password injection then the password returned will'
' not be correct'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
CREATE_EXCEPTIONS = {
exception.InvalidMetadataSize: exc.HTTPRequestEntityTooLarge,
exception.ImageNotFound: exc.HTTPBadRequest,
exception.FlavorNotFound: exc.HTTPBadRequest,
exception.KeypairNotFound: exc.HTTPBadRequest,
exception.ConfigDriveInvalidValue: exc.HTTPBadRequest,
exception.ImageNotActive: exc.HTTPBadRequest,
exception.FlavorDiskTooSmall: exc.HTTPBadRequest,
exception.FlavorMemoryTooSmall: exc.HTTPBadRequest,
exception.NetworkNotFound: exc.HTTPBadRequest,
exception.PortNotFound: exc.HTTPBadRequest,
exception.FixedIpAlreadyInUse: exc.HTTPBadRequest,
exception.SecurityGroupNotFound: exc.HTTPBadRequest,
exception.InstanceUserDataTooLarge: exc.HTTPBadRequest,
exception.InstanceUserDataMalformed: exc.HTTPBadRequest,
exception.ImageNUMATopologyIncomplete: exc.HTTPBadRequest,
exception.ImageNUMATopologyForbidden: exc.HTTPBadRequest,
exception.ImageNUMATopologyAsymmetric: exc.HTTPBadRequest,
exception.ImageNUMATopologyCPUOutOfRange: exc.HTTPBadRequest,
exception.ImageNUMATopologyCPUDuplicates: exc.HTTPBadRequest,
exception.ImageNUMATopologyCPUsUnassigned: exc.HTTPBadRequest,
exception.ImageNUMATopologyMemoryOutOfRange: exc.HTTPBadRequest,
exception.PortInUse: exc.HTTPConflict,
exception.InstanceExists: exc.HTTPConflict,
exception.NoUniqueMatch: exc.HTTPConflict,
exception.Invalid: exc.HTTPBadRequest,
}
CREATE_EXCEPTIONS_MSGS = {
exception.ImageNotFound: _("Can not find requested image"),
exception.FlavorNotFound: _("Invalid flavorRef provided."),
exception.KeypairNotFound: _("Invalid key_name provided."),
exception.ConfigDriveInvalidValue: _("Invalid config_drive provided."),
}
class Controller(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, ext_mgr=None, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
search_opts.pop('status', None)
if 'status' in req.GET.keys():
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
else:
# Convert deleted filter value to a valid boolean.
# Return non-deleted servers if an invalid value
# is passed with deleted filter.
search_opts['deleted'] = strutils.bool_from_string(
search_opts['deleted'], default=False)
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
all_tenants = common.is_all_tenants(search_opts)
# use the boolean from here on out so remove the entry from search_opts
# if it's present
search_opts.pop('all_tenants', None)
elevated = None
if all_tenants:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
elevated = context.elevated()
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
# Sorting by multiple keys and directions is conditionally enabled
sort_keys, sort_dirs = None, None
if self.ext_mgr.is_loaded('os-server-sort-keys'):
sort_keys, sort_dirs = common.get_sort_params(req.params)
expected_attrs = None
if is_detail:
# merge our expected attrs with what the view builder needs for
# showing details
expected_attrs = self._view_builder.get_show_expected_attrs(
expected_attrs)
try:
instance_list = self.compute_api.get_all(elevated or context,
search_opts=search_opts, limit=limit, marker=marker,
want_objects=True, expected_attrs=expected_attrs,
sort_keys=sort_keys, sort_dirs=sort_dirs)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found", search_opts['flavor'])
instance_list = objects.InstanceList()
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid, is_detail=False):
"""Utility function for looking up an instance by uuid.
:param context: request context for auth
:param req: HTTP request. The instance is cached in this request.
:param instance_uuid: UUID of the server instance to get
:param is_detail: True if you plan on showing the details of the
instance in the response, False otherwise.
"""
expected_attrs = ['flavor']
if is_detail:
expected_attrs = self._view_builder.get_show_expected_attrs(
expected_attrs)
instance = common.get_instance(self.compute_api, context,
instance_uuid,
expected_attrs=expected_attrs)
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_injected_files(self, personality):
"""Create a list of injected files from the personality attribute.
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
if self._decode_base64(contents) is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
try:
request.port_id = network.get('port', None)
except ValueError:
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % network.get('port')
raise exc.HTTPBadRequest(explanation=msg)
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument : port")
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if (not request.port_id and not
uuidutils.is_uuid_like(request.network_id)):
br_uuid = request.network_id.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % request.network_id
raise exc.HTTPBadRequest(explanation=msg)
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
try:
request.address = network.get('fixed_ip', None)
except ValueError:
msg = (_("Invalid fixed IP address (%s)") %
network.get('fixed_ip'))
raise exc.HTTPBadRequest(explanation=msg)
# duplicate networks are allowed only for neutron v2.0
if (not utils.is_neutron() and request.network_id and
request.network_id in network_uuids):
expl = (_("Duplicate networks"
" (%s) are not allowed") %
request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def _validate_access_ipv4(self, address):
if not netutils.is_valid_ipv4(address):
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
if not netutils.is_valid_ipv6(address):
expl = _('accessIPv6 is not proper IPv6 format')
raise exc.HTTPBadRequest(explanation=expl)
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
instance = self._get_server(context, req, id, is_detail=True)
return self._view_builder.show(req, instance)
def _extract(self, server_dict, ext_name, key):
if self.ext_mgr.is_loaded(ext_name):
return server_dict.get(key)
return None
def _validate_user_data(self, user_data):
if user_data and self._decode_base64(user_data) is None:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
return user_data
def _extract_bdm(self, server_dict, image_uuid_specified):
legacy_bdm = True
block_device_mapping_v2 = None
if not self.ext_mgr.is_loaded('os-volumes'):
return legacy_bdm, None
block_device_mapping = server_dict.get('block_device_mapping', [])
if not isinstance(block_device_mapping, list):
msg = _('block_device_mapping must be a list')
raise exc.HTTPBadRequest(explanation=msg)
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot'):
# Consider the new data format for block device mapping
block_device_mapping_v2 = server_dict.get(
'block_device_mapping_v2', [])
# NOTE (ndipanov): Disable usage of both legacy and new
# block device format in the same request
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
if not isinstance(block_device_mapping_v2, list):
msg = _('block_device_mapping_v2 must be a list')
raise exc.HTTPBadRequest(explanation=msg)
# Assume legacy format
legacy_bdm = not bool(block_device_mapping_v2)
try:
block_device_mapping_v2 = [
block_device.BlockDeviceDict.from_api(bdm_dict,
image_uuid_specified)
for bdm_dict in block_device_mapping_v2]
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
bdm = (block_device_mapping or block_device_mapping_v2)
return legacy_bdm, bdm
@staticmethod
def _resolve_exception(matches):
"""We want the most specific exception class."""
while len(matches) > 1:
first = matches[0]
second = matches[1]
if issubclass(first, second):
del matches[1]
else:
del matches[0]
return matches[0]
@staticmethod
def _handle_create_exception(*exc_info):
"""The `CREATE_EXCEPTIONS` dict containing the relationships between
the nova exceptions and the webob exception classes to be raised is
defined at the top of this file.
"""
error = exc_info[1]
err_cls = error.__class__
cls_to_raise = CREATE_EXCEPTIONS.get(err_cls)
if cls_to_raise is None:
# The error is a subclass of one of the dict keys
to_raise = [val for key, val in CREATE_EXCEPTIONS.items()
if isinstance(error, key)]
if len(to_raise) > 1:
cls_to_raise = Controller._resolve_exception(to_raise)
elif not to_raise:
# Not any of the expected exceptions, so re-raise
six.reraise(*exc_info)
else:
cls_to_raise = to_raise[0]
for key, val in CREATE_EXCEPTIONS_MSGS.items():
if isinstance(error, key):
raise cls_to_raise(explanation=CREATE_EXCEPTIONS_MSGS[key])
raise cls_to_raise(explanation=error.format_message())
def _determine_requested_networks(self, server_dict):
requested_networks = None
if (self.ext_mgr.is_loaded('os-networks')
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
if not isinstance(requested_networks, list):
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
requested_networks = self._get_requested_networks(
requested_networks)
return requested_networks
@wsgi.response(202)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
if self.ext_mgr.is_loaded('os-config-drive'):
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
if self.ext_mgr.is_loaded('os-security-groups'):
security_groups = server_dict.get('security_groups')
if security_groups is not None:
try:
sg_names = [sg['name'] for sg in security_groups
if sg.get('name')]
except AttributeError:
msg = _("Invalid input for field/attribute %(path)s."
" Value: %(value)s. %(message)s") % {
'path': 'security_groups',
'value': security_groups,
'message': ''
}
raise exc.HTTPBadRequest(explanation=msg)
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = self._determine_requested_networks(server_dict)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
flavor_id = self._flavor_id_from_req_data(body)
# optional openstack extensions:
key_name = self._extract(server_dict, 'os-keypairs', 'key_name')
availability_zone = self._extract(server_dict, 'os-availability-zone',
'availability_zone')
user_data = self._extract(server_dict, 'os-user-data', 'user_data')
self._validate_user_data(user_data)
image_uuid_specified = bool(image_uuid)
legacy_bdm, block_device_mapping = self._extract_bdm(server_dict,
image_uuid_specified)
ret_resv_id = False
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = 1
max_count = 1
if self.ext_mgr.is_loaded('os-multiple-create'):
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count', 1)
max_count = server_dict.get('max_count', min_count)
try:
min_count = utils.validate_integer(
min_count, "min_count", min_value=1)
max_count = utils.validate_integer(
max_count, "max_count", min_value=1)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = {}
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
scheduler_hints = server_dict.get('scheduler_hints', {})
check_server_group_quota = self.ext_mgr.is_loaded(
'os-server-group-quotas')
try:
_get_inst_type = flavors.get_flavor_by_flavor_id
inst_type = _get_inst_type(flavor_id, ctxt=context,
read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm,
check_server_group_quota=check_server_group_quota)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % error
raise exc.HTTPBadRequest(explanation=msg)
except Exception:
# The remaining cases can be handled in a standard fashion.
self._handle_create_exception(*sys.exc_info())
# If the caller wanted a reservation_id, return it
if ret_resv_id:
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = (
access_ipv4 and access_ipv4.strip() or None)
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = (
access_ipv6 and access_ipv6.strip() or None)
if 'auto_disk_config' in body['server']:
auto_disk_config = strutils.bool_from_string(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(ctxt, req, id, is_detail=True)
try:
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
# Note instance.save can throw a NotFound exception
instance.save()
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(204)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize', id)
@wsgi.response(202)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize', id)
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot', id)
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeDisk as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize', instance_id)
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.NoValidHost,
exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete', id)
def _image_ref_from_req_data(self, data):
try:
return six.text_type(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _image_uuid_from_href(self, image_href):
if not image_href:
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, data):
"""Get image data from the request or raise appropriate
exceptions
If no image is supplied - checks to see if there is
block devices set and proper extesions loaded.
"""
image_ref = data['server'].get('imageRef')
bdm = data['server'].get('block_device_mapping')
bdm_v2 = data['server'].get('block_device_mapping_v2')
if (not image_ref and (
(bdm and self.ext_mgr.is_loaded('os-volumes')) or
(bdm_v2 and
self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot')))):
return ''
else:
image_href = self._image_ref_from_req_data(data)
image_uuid = self._image_uuid_from_href(image_href)
return image_uuid
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
try:
return common.get_id_from_href(flavor_ref)
except ValueError:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.action('changePassword')
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not body.get('changePassword')
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = self._get_server_admin_password(body['changePassword'])
server = self._get_server(context, req, id)
try:
self.compute_api.set_admin_password(context, server, password)
except exception.InstancePasswordSetFailed as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as e:
raise common.raise_http_conflict_for_instance_invalid_state(
e, 'changePassword', id)
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
six.iteritems(metadata)
except AttributeError:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
flavor_ref = str(body["resize"]["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
body = body['rebuild']
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(body)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
kwargs = {}
# take the preserve_ephemeral value into account only when the
# corresponding extension is active
if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild')
and 'preserve_ephemeral' in body):
kwargs['preserve_ephemeral'] = strutils.bool_from_string(
body['preserve_ephemeral'], strict=True)
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
if 'name' in body:
self._validate_server_name(body['name'])
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs.pop('files_to_inject')
files_to_inject = self._get_injected_files(personality)
else:
files_to_inject = None
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
files_to_inject=files_to_inject,
**kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild', id)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id, is_detail=True)
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
policy.enforce(context,
'compute:snapshot_volume_backed',
{'project_id': context.project_id,
'user_id': context.user_id})
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage', id)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
url_prefix = self._view_builder._update_glance_link_prefix(
req.application_url)
image_ref = os.path.join(url_prefix,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
def create_resource(ext_mgr):
return wsgi.Resource(Controller(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
| apache-2.0 |
kartikgupta0909/build-mozharness | scripts/sourcetool.py | 11 | 7615 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""sourcetool.py
Port of tools/buildfarm/utils/hgtool.py.
TODO: sourcetool.py currently ignores work_dir completely.
Maybe we should use it instead of dest ?
Maybe I need to rethink work_dir?
"""
import os
import sys
sys.path.insert(1, os.path.dirname(sys.path[0]))
from mozharness.base.config import parse_config_file
from mozharness.base.script import BaseScript
from mozharness.base.vcs.mercurial import MercurialVCS
# These variables allow us to create a sourcetool.py that will [hopefully]
# work with a selection of VCSs.
#
# To avoid needing to specify --vcs VCS, softlink sourcetool.py to
# VCStool.py (where VCS is the VCS you want to use, and is a member of
# VCS_CHOICES).
VCS_CHOICES = ['hg', ]
VCS_DEFAULT = None
VCS_REQUIRED_OPTION = "--vcs VCS "
VCS_NAME = os.path.basename(sys.argv[0]).replace('tool.py', '')
if VCS_NAME in VCS_CHOICES:
VCS_DEFAULT = VCS_NAME
VCS_REQUIRED_OPTION = ""
SOURCE_TOOL_USAGE = """Usage:
%%prog [options] %srepo [dest]
%%prog %s--repo REPOSITORY [options]
%%prog [-h|--help]""" % (VCS_REQUIRED_OPTION, VCS_REQUIRED_OPTION)
# SourceTool {{{1
class SourceTool(BaseScript):
# These options were chosen with an eye towards backwards
# compatibility with the existing hgtool.
#
# TODO: get rid of env options, or at least remove HG from the names.
config_options = [[
["--rev", "-r"],
{"action": "store",
"dest": "vcs_revision",
"default": os.environ.get('HG_REV'),
"help": "Specify which revision to update to."
}
], [
["--branch", "-b"],
{"action": "store",
"dest": "vcs_branch",
"default": os.environ.get('HG_BRANCH', 'default'),
"help": "Specify which branch to update to."
}
], [
["--vcs", ],
{"action": "store",
"type": "choice",
"dest": "vcs",
"default": VCS_DEFAULT,
"choices": VCS_CHOICES,
"help": "Specify which VCS to use."
}
], [
["--props-file", "-p"],
{"action": "store",
"dest": "vcs_propsfile",
"default": os.environ.get('PROPERTIES_FILE'),
"help": "build json file containing revision information"
}
], [
# TODO --tbox and --no-tbox should DIAF once we fix bug 630538.
["--tbox", ],
{"action": "store_true",
"dest": "tbox_output",
"default": bool(os.environ.get('PROPERTIES_FILE')),
"help": "Output TinderboxPrint messages."
}
], [
["--no-tbox", ],
{"action": "store_false",
"dest": "tbox_output",
"help": "Don't output TinderboxPrint messages."
}
], [
["--repo", ],
{"action": "store",
"dest": "vcs_repo",
"help": "Specify the VCS repo."
}
], [
["--dest", ],
{"action": "store",
"dest": "vcs_dest",
"help": "Specify the destination directory (optional)"
}
], [
# TODO Are the shared options HG-specific?
# I think there are, or we can create, similar behavior in other
# VCSs.
["--shared-dir", '-s'],
{"action": "store",
"dest": "vcs_shared_dir",
"default": os.environ.get('HG_SHARE_BASE_DIR'),
"help": "clone to a shared directory"
}
], [
["--allow-unshared-local-clones", ],
{"action": "store_true",
"dest": "vcs_allow_unshared_local_clones",
"default": False,
"help": "Allow unshared checkouts if --shared-dir is specified"
}
], [
["--check-outgoing", ],
{"action": "store_true",
"dest": "vcs_strip_outgoing",
"default": False,
"help": "check for and clobber outgoing changesets"
}
]]
def __init__(self, require_config_file=False):
BaseScript.__init__(self, config_options=self.config_options,
all_actions=['source', ],
usage=SOURCE_TOOL_USAGE,
require_config_file=require_config_file)
def _pre_config_lock(self, rw_config):
# This is a workaround for legacy compatibility with the original
# hgtool.py.
#
# Since we need to read the buildbot json props, as well as parse
# additional commandline arguments that aren't specified via
# options, we call this function before locking the config.
#
# rw_config is the BaseConfig object that parsed the options;
# self.config is the soon-to-be-locked runtime configuration.
#
# This is a powerful way to hack the config before locking;
# we need to be careful not to abuse it.
args = rw_config.args
c = self.config
if c.get('vcs') is None:
self.fatal("Must specify --vcs!\n\n%s" %
rw_config.config_parser.format_help())
if c.get('vcs_repo') is None:
if len(args) not in (1, 2):
self.fatal("""Invalid number of arguments!
You need to either specify --repo or specify it after the options:
%s""" % rw_config.config_parser.get_usage())
self.config['vcs_repo'] = args[0]
if len(args) == 2:
self.config['vcs_dest'] = args[1]
elif not self.config.get('vcs_dest'):
self.config['vcs_dest'] = os.path.basename(self.config['vcs_repo'])
# This is a buildbot-specific props file.
if self.config.get('vcs_propsfile'):
js = parse_config_file(self.config['vcs_propsfile'])
if self.config.get('vcs_revision') is None:
self.config['vcs_revision'] = js['sourcestamp']['revision']
if self.config.get('vcs_branch') is None:
self.config['vcs_branch'] = js['sourcestamp']['branch']
def source(self):
vcs_obj = None
if self.config['vcs'] == 'hg':
vcs_obj = MercurialVCS(
log_obj=self.log_obj,
#
# Torn between creating a smaller, more flexible config per
# helper object, or passing the read-only master config as
# vcs_obj.config and creating a smaller vcs_obj.vcs_config.
#
# Deciding on the latter for now, while reserving the right
# to change my mind later.
config=self.config,
vcs_config={
'repo': self.config['vcs_repo'],
'dest': self.config['vcs_dest'],
'branch': self.config.get('vcs_branch'),
'revision': self.config.get('vcs_revision'),
'vcs_share_base': self.config.get('vcs_shared_dir'),
'allow_unshared_local_clones': self.config.get('vcs_allow_unshared_local_clones'),
'halt_on_failure': self.config.get('halt_on_failure', True),
}
)
else:
self.fatal("I don't know how to handle vcs '%s'!" % self.config['vcs'])
got_revision = vcs_obj.ensure_repo_and_revision()
self.info("Got revision %s\n" % got_revision)
# __main__ {{{1
if __name__ == '__main__':
source_tool = SourceTool()
source_tool.run_and_exit()
| mpl-2.0 |
feroda/odoo | addons/document/test_cindex.py | 444 | 1553 | #!/usr/bin/python
import sys
import os
import glob
import time
import logging
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG)
parser = OptionParser()
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-C", "--content",
action="store_true", dest="docontent", default=False,
help="Disect content, rather than the file.")
parser.add_option("--delay",
action="store_true", dest="delay", default=False,
help="delay after the operation, to inspect child processes")
(options, args) = parser.parse_args()
import content_index, std_index
from content_index import cntIndex
for fname in args:
try:
if options.docontent:
fp = open(fname,'rb')
content = fp.read()
fp.close()
res = cntIndex.doIndex(content, fname, None, None, True)
else:
res = cntIndex.doIndex(None, fname, None, fname,True)
if options.verbose:
for line in res[:5]:
print line
if options.delay:
time.sleep(30)
except Exception,e:
import traceback
tb_s = reduce(lambda x, y: x+y, traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback))
except KeyboardInterrupt:
print "Keyboard interrupt"
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hryamzik/ansible | lib/ansible/plugins/lookup/redis.py | 38 | 3102 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: redis
author:
- Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
- Ansible Core
version_added: "2.5"
short_description: fetch data from Redis
description:
- This lookup returns a list of results from a Redis DB corresponding to a list of items given to it
requirements:
- redis (python library https://github.com/andymccurdy/redis-py/)
options:
_terms:
description: list of keys to query
host:
description: location of Redis host
default: '127.0.0.1'
env:
- name: ANSIBLE_REDIS_HOST
ini:
- section: lookup_redis
key: host
port:
description: port on which Redis is listening on
default: 6379A
type: int
env:
- name: ANSIBLE_REDIS_PORT
ini:
- section: lookup_redis
key: port
socket:
description: path to socket on which to query Redis, this option overrides host and port options when set.
type: path
env:
- name: ANSIBLE_REDIS_SOCKET
ini:
- section: lookup_redis
key: socket
"""
EXAMPLES = """
- name: query redis for somekey (default or configured settings used)
debug: msg="{{ lookup('redis', 'somekey'}}"
- name: query redis for list of keys and non-default host and port
debug: msg="{{ lookup('redis', item, host='myredis.internal.com', port=2121) }}"
loop: '{{list_of_redis_keys}}'
- name: use list directly
debug: msg="{{ lookup('redis', 'key1', 'key2', 'key3') }}"
- name: use list directly with a socket
debug: msg="{{ lookup('redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
"""
RETURN = """
_raw:
description: value(s) stored in Redis
"""
import os
HAVE_REDIS = False
try:
import redis
HAVE_REDIS = True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
# get options
self.set_options(direct=kwargs)
# setup connection
host = self.get_option('host')
port = self.get_option('port')
socket = self.get_option('socket')
if socket is None:
conn = redis.Redis(host=host, port=port)
else:
conn = redis.Redis(unix_socket_path=socket)
ret = []
for term in terms:
try:
res = conn.get(term)
if res is None:
res = ""
ret.append(res)
except Exception:
ret.append("") # connection failed or key not found
return ret
| gpl-3.0 |
cloudcopy/seahub | seahub/forms.py | 2 | 7725 | # encoding: utf-8
from django.conf import settings
from django import forms
from django.utils.translation import ugettext_lazy as _
from seaserv import seafserv_threaded_rpc, is_valid_filename
from pysearpc import SearpcError
from seahub.base.accounts import User
from seahub.constants import DEFAULT_USER, GUEST_USER
class AddUserForm(forms.Form):
"""
Form for adding a user.
"""
email = forms.EmailField()
role = forms.ChoiceField(choices=[(DEFAULT_USER, DEFAULT_USER),
(GUEST_USER, GUEST_USER)])
password1 = forms.CharField(widget=forms.PasswordInput())
password2 = forms.CharField(widget=forms.PasswordInput())
def clean_email(self):
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
raise forms.ValidationError(_("A user with this email already exists."))
except User.DoesNotExist:
return self.cleaned_data['email']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two passwords didn't match."))
return self.cleaned_data
class RepoCreateForm(forms.Form):
"""
Form for creating repo and org repo.
"""
repo_name = forms.CharField(max_length=settings.MAX_FILE_NAME,
error_messages={
'required': _(u'Name can\'t be empty'),
'max_length': _(u'Name is too long (maximum is 255 characters)')
})
repo_desc = forms.CharField(max_length=100, error_messages={
'required': _(u'Description can\'t be empty'),
'max_length': _(u'Description is too long (maximum is 100 characters)')
})
encryption = forms.CharField(max_length=1)
uuid = forms.CharField(required=False)
magic_str = forms.CharField(required=False)
encrypted_file_key = forms.CharField(required=False)
def clean_repo_name(self):
repo_name = self.cleaned_data['repo_name']
if not is_valid_filename(repo_name):
error_msg = _(u"Name %s is not valid") % repo_name
raise forms.ValidationError(error_msg)
else:
return repo_name
def clean(self):
encryption = self.cleaned_data['encryption']
if int(encryption) == 0:
return self.cleaned_data
uuid = self.cleaned_data['uuid']
magic_str = self.cleaned_data['magic_str']
encrypted_file_key = self.cleaned_data['encrypted_file_key']
if not (uuid and magic_str and encrypted_file_key):
raise forms.ValidationError(_("Argument missing"))
return self.cleaned_data
class SharedRepoCreateForm(RepoCreateForm):
"""
Used for creating group repo and public repo
"""
permission = forms.ChoiceField(choices=(('rw', 'read-write'), ('r', 'read-only')))
class RepoRenameDirentForm(forms.Form):
"""
Form for rename a file/dir.
"""
oldname = forms.CharField(error_messages={'required': _("Oldname is required")})
newname = forms.CharField(max_length=settings.MAX_FILE_NAME,
error_messages={
'max_length': _("It's too long."),
'required': _("It's required."),
})
def clean_newname(self):
newname = self.cleaned_data['newname']
try:
if not is_valid_filename(newname):
error_msg = _(u'Name "%s" is not valid') % newname
raise forms.ValidationError(error_msg)
else:
return newname
except SearpcError, e:
raise forms.ValidationError(str(e))
class RepoNewDirentForm(forms.Form):
"""
Form for create a new empty dir or a new empty file.
"""
dirent_name = forms.CharField(max_length=settings.MAX_FILE_NAME,
error_messages={
'max_length': _("It's too long."),
'required': _("It's required."),
})
def clean_dirent_name(self):
dirent_name = self.cleaned_data['dirent_name']
try:
if not is_valid_filename(dirent_name):
error_msg = _(u'Name "%s" is not valid') % dirent_name
raise forms.ValidationError(error_msg)
else:
return dirent_name
except SearpcError, e:
raise forms.ValidationError(str(e))
class RepoPassowrdForm(forms.Form):
"""
Form for user to decrypt a repo in repo page.
"""
repo_id = forms.CharField(error_messages={'required': _('Repo id is required')})
username = forms.CharField(error_messages={'required': _('Username is required')})
password = forms.CharField(error_messages={'required': _('Password can\'t be empty')})
def clean(self):
if 'password' in self.cleaned_data:
repo_id = self.cleaned_data['repo_id']
username = self.cleaned_data['username']
password = self.cleaned_data['password']
try:
seafserv_threaded_rpc.set_passwd(repo_id, username, password)
except SearpcError, e:
if e.msg == 'Bad arguments':
raise forms.ValidationError(_(u'Bad url format'))
# elif e.msg == 'Repo is not encrypted':
# return HttpResponseRedirect(reverse('repo',
# args=[self.repo_id]))
elif e.msg == 'Incorrect password':
raise forms.ValidationError(_(u'Wrong password'))
elif e.msg == 'Internal server error':
raise forms.ValidationError(_(u'Internal server error'))
else:
raise forms.ValidationError(_(u'Decrypt library error'))
class SetUserQuotaForm(forms.Form):
"""
Form for setting user quota.
"""
email = forms.CharField(error_messages={'required': _('Email is required')})
space_quota = forms.IntegerField(min_value=0,
error_messages={'required': _('Space quota can\'t be empty'),
'min_value': _('Space quota is too low (minimum value is 0)')})
share_quota = forms.IntegerField(min_value=0, required = False,
error_messages={'min_value': _('Share quota is too low (minimum value is 0)')})
class RepoSettingForm(forms.Form):
"""
Form for saving repo settings.
"""
repo_name = forms.CharField(error_messages={'required': _('Library name is required')})
repo_desc = forms.CharField(error_messages={'required': _('Library description is required')})
days = forms.IntegerField(required=False,
error_messages={'invalid': _('Please enter a number')})
def clean_repo_name(self):
repo_name = self.cleaned_data['repo_name']
if not is_valid_filename(repo_name):
error_msg = _(u"Name %s is not valid") % repo_name
raise forms.ValidationError(error_msg)
else:
return repo_name
class BatchAddUserForm(forms.Form):
"""
Form for importing users from CSV file.
"""
file = forms.FileField()
| apache-2.0 |
n0trax/ansible | lib/ansible/modules/network/aci/aci_epg.py | 22 | 6341 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_epg
short_description: Manage End Point Groups (EPG) on Cisco ACI fabrics (fv:AEPg)
description:
- Manage End Point Groups (EPG) on Cisco ACI fabrics.
- More information from the internal APIC class
I(fv:AEPg) at U(https://developer.cisco.com/media/mim-ref/MO-fvAEPg.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob Mcgill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
notes:
- The C(tenant) and C(app_profile) used must exist before using this module in your playbook.
The M(aci_tenant) and M(aci_ap) modules can be used for this.
options:
tenant:
description:
- Name of an existing tenant.
aliases: [ tenant_name ]
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
required: yes
aliases: [ app_proifle, app_profile_name ]
epg:
description:
- Name of the end point group.
required: yes
aliases: [ name, epg_name ]
bd:
description:
- Name of the bridge domain being associated with the EPG.
required: yes
aliases: [ bd_name, bridge_domain ]
priority:
description:
- QoS class.
choices: [ level1, level2, level3, unspecified ]
default: unspecified
intra_epg_isolation:
description:
- Intra EPG Isolation.
choices: [ enforced, unenforced ]
default: unenforced
description:
description:
- Description for the EPG.
aliases: [ descr ]
fwd_control:
description:
- The forwarding control used by the EPG.
- The APIC defaults new EPGs to C(none).
choices: [ none, proxy-arp ]
default: none
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new EPG
aci_epg:
hostname: apic
username: admin
password: SomeSecretPassword
tenant: production
ap: intranet
epg: web_epg
description: Web Intranet EPG
bd: prod_bd
aci_epg:
hostname: apic
username: admin
password: SomeSecretPassword
tenant: production
ap: ticketing
epg: "{{ item.epg }}"
description: Ticketing EPG
bd: "{{ item.bd }}"
priority: unspecified
intra_epg_isolation: unenforced
state: present
with_items:
- epg: web
bd: web_bd
- epg: database
bd: database_bd
- name: Remove an EPG
aci_epg:
hostname: apic
username: admin
password: SomeSecretPassword
validate_certs: false
tenant: production
app_profile: intranet
epg: web_epg
state: absent
- name: Query an EPG
aci_epg:
hostname: apic
username: admin
password: SomeSecretPassword
tenant: production
ap: ticketing
epg: web_epg
state: query
- name: Query all EPGs
aci_epg:
hostname: apic
username: admin
password: SomeSecretPassword
state: query
- name: Query all EPGs with a Specific Name
aci_epg:
hostname: apic
username: admin
password: SomeSecretPassword
validate_certs: false
epg: web_epg
state: query
- name: Query all EPGs of an App Profile
aci_epg:
hostname: apic
username: admin
password: SomeSecretPassword
validate_certs: false
ap: ticketing
state: query
'''
RETURN = r'''
#
'''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec
argument_spec.update(
epg=dict(type='str', aliases=['name', 'epg_name']),
bd=dict(type='str', aliases=['bd_name', 'bridge_domain']),
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),
tenant=dict(type='str', aliases=['tenant_name']),
description=dict(type='str', aliases=['descr']),
priority=dict(type='str', choices=['level1', 'level2', 'level3', 'unspecified']),
intra_epg_isolation=dict(choices=['enforced', 'unenforced']),
fwd_control=dict(type='str', choices=['none', 'proxy-arp']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['ap', 'epg', 'tenant']],
['state', 'present', ['ap', 'epg', 'tenant']],
],
)
epg = module.params['epg']
bd = module.params['bd']
description = module.params['description']
priority = module.params['priority']
intra_epg_isolation = module.params['intra_epg_isolation']
fwd_control = module.params['fwd_control']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(root_class="tenant", subclass_1="ap", subclass_2="epg", child_classes=['fvRsBd'])
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='fvAEPg',
class_config=dict(
name=epg,
descr=description,
prio=priority,
pcEnfPref=intra_epg_isolation,
fwdCtrl=fwd_control,
),
child_configs=[
dict(fvRsBd=dict(attributes=dict(tnFvBDName=bd))),
],
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='fvAEPg')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| gpl-3.0 |
egustafson/sandbox | Python/grpc-basic/client.py | 1 | 1047 | #!/bin/env python
from __future__ import print_function
import logging
import grpc
from demo import demo_pb2
from demo import demo_pb2_grpc
def run():
with grpc.insecure_channel('localhost:50051') as channel:
stub = demo_pb2_grpc.DemoServiceStub(channel)
for ii in range(2) :
## Req / Resp (single objects)
req = demo_pb2.HeartbeatRequest(request_id="req-id-{}".format(ii))
logging.info("--- ListenHeartbeat() --------->")
resp = stub.ListenHeartbeat(req)
logging.info("<-- received: {}".format(resp.note))
## 1-Req / Stream Resp
logging.info("--- StreamHeartbeat() --------->")
req = demo_pb2.HeartbeatRequest(request_id="stream-1")
for resp in stub.StreamHeartbeat(req):
logging.info("<-- received: {}".format(resp.note))
if __name__ == '__main__':
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG)
logging.info('starting client')
run()
logging.info('done.')
| apache-2.0 |
jcsp/manila | manila/api/contrib/share_actions.py | 4 | 7415 | # Copyright 2013 NetApp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import six
import webob
from manila.api import extensions
from manila.api.openstack import wsgi
from manila import exception
from manila.i18n import _
from manila import share
authorize = extensions.extension_authorizer('share', 'services')
class ShareActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ShareActionsController, self).__init__(*args, **kwargs)
self.share_api = share.API()
@staticmethod
def _validate_common_name(access):
"""Validate common name passed by user.
'access' is used as the certificate's CN (common name)
to which access is allowed or denied by the backend.
The standard allows for just about any string in the
common name. The meaning of a string depends on its
interpretation and is limited to 64 characters.
"""
if len(access) == 0 or len(access) > 64:
exc_str = _('Invalid CN (common name). Must be 1-64 chars long')
raise webob.exc.HTTPBadRequest(explanation=exc_str)
@staticmethod
def _validate_username(access):
valid_username_re = '[\w\.\-_\`;\'\{\}\[\]\\\\]{4,32}$'
username = access
if not re.match(valid_username_re, username):
exc_str = ('Invalid user or group name. Must be 4-32 characters '
'and consist of alphanumeric characters and '
'special characters ]{.-_\'`;}[\\')
raise webob.exc.HTTPBadRequest(explanation=exc_str)
@staticmethod
def _validate_ip_range(ip_range):
ip_range = ip_range.split('/')
exc_str = ('Supported ip format examples:\n'
'\t10.0.0.2, 10.0.0.0/24')
if len(ip_range) > 2:
raise webob.exc.HTTPBadRequest(explanation=exc_str)
if len(ip_range) == 2:
try:
prefix = int(ip_range[1])
if prefix < 0 or prefix > 32:
raise ValueError()
except ValueError:
msg = 'IP prefix should be in range from 0 to 32'
raise webob.exc.HTTPBadRequest(explanation=msg)
ip_range = ip_range[0].split('.')
if len(ip_range) != 4:
raise webob.exc.HTTPBadRequest(explanation=exc_str)
for item in ip_range:
try:
if 0 <= int(item) <= 255:
continue
raise ValueError()
except ValueError:
raise webob.exc.HTTPBadRequest(explanation=exc_str)
@wsgi.action('os-allow_access')
def _allow_access(self, req, id, body):
"""Add share access rule."""
context = req.environ['manila.context']
access_data = body['os-allow_access']
share = self.share_api.get(context, id)
access_type = access_data['access_type']
access_to = access_data['access_to']
if access_type == 'ip':
self._validate_ip_range(access_to)
elif access_type == 'user':
self._validate_username(access_to)
elif access_type == 'cert':
self._validate_common_name(access_to.strip())
else:
exc_str = _("Only 'ip','user',or'cert' access types "
"are supported.")
raise webob.exc.HTTPBadRequest(explanation=exc_str)
try:
access = self.share_api.allow_access(
context, share, access_type, access_to,
access_data.get('access_level'))
except exception.ShareAccessExists as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
return {'access': access}
@wsgi.action('os-deny_access')
def _deny_access(self, req, id, body):
"""Remove access rule."""
context = req.environ['manila.context']
access_id = body['os-deny_access']['access_id']
try:
access = self.share_api.access_get(context, access_id)
if access.share_id != id:
raise exception.NotFound()
share = self.share_api.get(context, id)
except exception.NotFound as error:
raise webob.exc.HTTPNotFound(explanation=six.text_type(error))
self.share_api.deny_access(context, share, access)
return webob.Response(status_int=202)
@wsgi.action('os-access_list')
def _access_list(self, req, id, body):
"""list access rules."""
context = req.environ['manila.context']
share = self.share_api.get(context, id)
access_list = self.share_api.access_get_all(context, share)
return {'access_list': access_list}
@wsgi.action('os-extend')
def _extend(self, req, id, body):
"""Extend size of share."""
context = req.environ['manila.context']
share, size = self._get_valid_resize_parameters(
context, id, body, 'os-extend')
try:
self.share_api.extend(context, share, size)
except (exception.InvalidInput, exception.InvalidShare) as e:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(e))
except exception.ShareSizeExceedsAvailableQuota as e:
raise webob.exc.HTTPForbidden(explanation=six.text_type(e))
return webob.Response(status_int=202)
@wsgi.action('os-shrink')
def _shrink(self, req, id, body):
"""Shrink size of share."""
context = req.environ['manila.context']
share, size = self._get_valid_resize_parameters(
context, id, body, 'os-shrink')
try:
self.share_api.shrink(context, share, size)
except (exception.InvalidInput, exception.InvalidShare) as e:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(e))
return webob.Response(status_int=202)
def _get_valid_resize_parameters(self, context, id, body, action):
try:
share = self.share_api.get(context, id)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=six.text_type(e))
try:
size = int(body[action]['new_size'])
except (KeyError, ValueError, TypeError):
msg = _("New share size must be specified as an integer.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return share, size
# def create_resource():
# return wsgi.Resource(ShareActionsController())
class Share_actions(extensions.ExtensionDescriptor):
"""Enable share actions."""
name = 'ShareActions'
alias = 'share-actions'
updated = '2012-08-14T00:00:00+00:00'
def get_controller_extensions(self):
controller = ShareActionsController()
extension = extensions.ControllerExtension(self, 'shares',
controller)
return [extension]
| apache-2.0 |
tiagochiavericosta/edx-platform | lms/djangoapps/instructor_analytics/distributions.py | 174 | 5760 | """
Profile Distributions
Aggregate sums for values of fields in students profiles.
For example:
The distribution in a course for gender might look like:
'gender': {
'type': 'EASY_CHOICE',
'data': {
'no_data': 1234,
'm': 5678,
'o': 2134,
'f': 5678
},
'display_names': {
'no_data': 'No Data',
'm': 'Male',
'o': 'Other',
'f': 'Female'
}
"""
from django.db.models import Count
from student.models import CourseEnrollment, UserProfile
# choices with a restricted domain, e.g. level_of_education
_EASY_CHOICE_FEATURES = ('gender', 'level_of_education')
# choices with a larger domain e.g. year_of_birth
_OPEN_CHOICE_FEATURES = ('year_of_birth',)
AVAILABLE_PROFILE_FEATURES = _EASY_CHOICE_FEATURES + _OPEN_CHOICE_FEATURES
DISPLAY_NAMES = {
'gender': 'Gender',
'level_of_education': 'Level of Education',
'year_of_birth': 'Year Of Birth',
}
class ProfileDistribution(object):
"""
Container for profile distribution data
`feature` is the name of the distribution feature
`feature_display_name` is the display name of feature
`data` is a dictionary of the distribution
`type` is either 'EASY_CHOICE' or 'OPEN_CHOICE'
`choices_display_names` is a dict if the distribution is an 'EASY_CHOICE'
"""
class ValidationError(ValueError):
""" Error thrown if validation fails. """
pass
def __init__(self, feature):
self.feature = feature
self.feature_display_name = DISPLAY_NAMES.get(feature, feature)
# to be set later
self.type = None
self.data = None
self.choices_display_names = None
def validate(self):
"""
Validate this profile distribution.
Throws ProfileDistribution.ValidationError
"""
def validation_assert(predicate):
""" Throw a ValidationError if false. """
if not predicate:
raise ProfileDistribution.ValidationError()
validation_assert(isinstance(self.feature, str))
validation_assert(self.feature in DISPLAY_NAMES)
validation_assert(isinstance(self.feature_display_name, str))
validation_assert(self.type in ['EASY_CHOICE', 'OPEN_CHOICE'])
validation_assert(isinstance(self.data, dict))
if self.type == 'EASY_CHOICE':
validation_assert(isinstance(self.choices_display_names, dict))
def profile_distribution(course_id, feature):
"""
Retrieve distribution of students over a given feature.
feature is one of AVAILABLE_PROFILE_FEATURES.
Returns a ProfileDistribution instance.
NOTE: no_data will appear as a key instead of None/null to adhere to the json spec.
data types are EASY_CHOICE or OPEN_CHOICE
"""
if feature not in AVAILABLE_PROFILE_FEATURES:
raise ValueError(
"unsupported feature requested for distribution '{}'".format(
feature)
)
prd = ProfileDistribution(feature)
if feature in _EASY_CHOICE_FEATURES:
prd.type = 'EASY_CHOICE'
if feature == 'gender':
raw_choices = UserProfile.GENDER_CHOICES
elif feature == 'level_of_education':
raw_choices = UserProfile.LEVEL_OF_EDUCATION_CHOICES
# short name and display name (full) of the choices.
choices = [(short, full)
for (short, full) in raw_choices] + [('no_data', 'No Data')]
def get_filter(feature, value):
""" Get the orm filter parameters for a feature. """
return {
'gender': {'user__profile__gender': value},
'level_of_education': {'user__profile__level_of_education': value},
}[feature]
def get_count(feature, value):
""" Get the count of enrolled students matching the feature value. """
return CourseEnrollment.objects.filter(
course_id=course_id,
is_active=True,
**get_filter(feature, value)
).count()
distribution = {}
for (short, full) in choices:
# handle no data case
if short == 'no_data':
distribution['no_data'] = 0
distribution['no_data'] += get_count(feature, None)
distribution['no_data'] += get_count(feature, '')
else:
distribution[short] = get_count(feature, short)
prd.data = distribution
prd.choices_display_names = dict(choices)
elif feature in _OPEN_CHOICE_FEATURES:
prd.type = 'OPEN_CHOICE'
profiles = UserProfile.objects.filter(
user__courseenrollment__course_id=course_id,
user__courseenrollment__is_active=True
)
query_distribution = profiles.values(
feature).annotate(Count(feature)).order_by()
# query_distribution is of the form [{'featureval': 'value1', 'featureval__count': 4},
# {'featureval': 'value2', 'featureval__count': 2}, ...]
distribution = dict((vald[feature], vald[feature + '__count'])
for vald in query_distribution)
# distribution is of the form {'value1': 4, 'value2': 2, ...}
# change none to no_data for valid json key
if None in distribution:
# django does not properly count NULL values when using annotate Count
# so
# distribution['no_data'] = distribution.pop(None)
# would always be 0.
# Correctly count null values
distribution['no_data'] = profiles.filter(
**{feature: None}
).count()
prd.data = distribution
prd.validate()
return prd
| agpl-3.0 |
JoaoVasques/aws-devtool | eb/macosx/python3/lib/aws/requests/packages/chardet/gb2312freq.py | 323 | 36001 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = ( \
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
| apache-2.0 |
jean/sentry | src/sentry/rules/conditions/event_attribute.py | 2 | 7365 | """
sentry.rules.conditions.tagged_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import json
from collections import OrderedDict
from django import forms
from sentry.rules.conditions.base import EventCondition
class MatchType(object):
EQUAL = 'eq'
NOT_EQUAL = 'ne'
STARTS_WITH = 'sw'
ENDS_WITH = 'ew'
CONTAINS = 'co'
NOT_CONTAINS = 'nc'
IS_SET = 'is'
NOT_SET = 'ns'
MATCH_CHOICES = OrderedDict(
[
(MatchType.EQUAL, 'equals'),
(MatchType.NOT_EQUAL, 'does not equal'),
(MatchType.STARTS_WITH, 'starts with'),
(MatchType.ENDS_WITH, 'ends with'),
(MatchType.CONTAINS, 'contains'),
(MatchType.NOT_CONTAINS, 'does not contain'),
(MatchType.IS_SET, 'is set'),
(MatchType.NOT_SET, 'is not set'),
]
)
ATTR_CHOICES = [
'message',
'platform',
'environment',
'type',
'exception.type',
'exception.value',
'user.id',
'user.email',
'user.username',
'user.ip_address',
'http.method',
'http.url',
'stacktrace.code',
'stacktrace.module',
'stacktrace.filename',
]
class FixedTypeaheadInput(forms.TextInput):
def __init__(self, choices, *args, **kwargs):
super(FixedTypeaheadInput, self).__init__(*args, **kwargs)
self.attrs['data-choices'] = json.dumps(choices)
self.attrs['class'] = self.attrs.get('class', '') + ' typeahead'
class EventAttributeForm(forms.Form):
attribute = forms.CharField(
widget=FixedTypeaheadInput(
attrs={'style': 'width:200px',
'placeholder': 'i.e. exception.type'},
choices=[{
'id': a,
'text': a
} for a in ATTR_CHOICES],
)
)
match = forms.ChoiceField(
MATCH_CHOICES.items(), widget=forms.Select(
attrs={'style': 'width:150px'},
)
)
value = forms.CharField(
widget=forms.TextInput(
attrs={'placeholder': 'value'},
), required=False
)
class EventAttributeCondition(EventCondition):
"""
Attributes are a mapping of <logical-key>.<property>.
For example:
- message
- platform
- exception.{type,value}
- user.{id,ip_address,email,FIELD}
- http.{method,url}
- stacktrace.{code,module,filename}
- extra.{FIELD}
"""
# TODO(dcramer): add support for stacktrace.vars.[name]
form_cls = EventAttributeForm
label = u'An event\'s {attribute} value {match} {value}'
def _get_attribute_values(self, event, attr):
# TODO(dcramer): we should validate attributes (when we can) before
path = attr.split('.')
if path[0] in ('message', 'platform'):
if len(path) != 1:
return []
return [getattr(event, path[0])]
elif path[0] == 'environment':
return [event.get_tag('environment')]
elif path[0] == 'type':
return [event.data['type']]
elif len(path) == 1:
return []
elif path[0] == 'extra':
path.pop(0)
value = event.data['extra']
while path:
bit = path.pop(0)
value = value.get(bit)
if not value:
return []
if isinstance(value, (list, tuple)):
return value
return [value]
elif len(path) != 2:
return []
elif path[0] == 'exception':
if path[1] not in ('type', 'value'):
return []
return [
getattr(e, path[1]) for e in event.interfaces['sentry.interfaces.Exception'].values
]
elif path[0] == 'user':
if path[1] in ('id', 'ip_address', 'email', 'username'):
return [getattr(event.interfaces['sentry.interfaces.User'], path[1])]
return [getattr(event.interfaces['sentry.interfaces.User'].data, path[1])]
elif path[0] == 'http':
if path[1] not in ('url', 'method'):
return []
return [getattr(event.interfaces['sentry.interfaces.Http'], path[1])]
elif path[0] == 'stacktrace':
stacks = event.interfaces.get('sentry.interfaces.Stacktrace')
if stacks:
stacks = [stacks]
else:
stacks = [
e.stacktrace for e in event.interfaces['sentry.interfaces.Exception'].values
if e.stacktrace
]
result = []
for st in stacks:
for frame in st.frames:
if path[1] in ('filename', 'module'):
result.append(getattr(frame, path[1]))
elif path[1] == 'code':
if frame.pre_context:
result.extend(frame.pre_context)
if frame.context_line:
result.append(frame.context_line)
if frame.post_context:
result.extend(frame.post_context)
return result
return []
def render_label(self):
data = {
'attribute': self.data['attribute'],
'value': self.data['value'],
'match': MATCH_CHOICES[self.data['match']],
}
return self.label.format(**data)
def passes(self, event, state, **kwargs):
attr = self.get_option('attribute')
match = self.get_option('match')
value = self.get_option('value')
if not (attr and match and value):
return False
value = value.lower()
attr = attr.lower()
try:
attribute_values = self._get_attribute_values(event, attr)
except KeyError:
attribute_values = []
attribute_values = [v.lower() for v in attribute_values if v is not None]
if match == MatchType.EQUAL:
for a_value in attribute_values:
if a_value == value:
return True
return False
elif match == MatchType.NOT_EQUAL:
for a_value in attribute_values:
if a_value == value:
return False
return True
elif match == MatchType.STARTS_WITH:
for a_value in attribute_values:
if a_value.startswith(value):
return True
return False
elif match == MatchType.ENDS_WITH:
for a_value in attribute_values:
if a_value.endswith(value):
return True
return False
elif match == MatchType.CONTAINS:
for a_value in attribute_values:
if value in a_value:
return True
return False
elif match == MatchType.NOT_CONTAINS:
for a_value in attribute_values:
if value in a_value:
return False
return True
elif match == MatchType.IS_SET:
return bool(attribute_values)
elif match == MatchType.NOT_SET:
return not attribute_values
| bsd-3-clause |
TheTimmy/spack | var/spack/repos/builtin/packages/namd/package.py | 3 | 5455 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import platform
import shutil
import sys
import os
from spack import *
class Namd(MakefilePackage):
"""NAMDis a parallel molecular dynamics code designed for
high-performance simulation of large biomolecular systems."""
homepage = "http://www.ks.uiuc.edu/Research/namd/"
url = "file://{0}/NAMD_2.12_Source.tar.gz".format(os.getcwd())
version('2.12', '2a1191909b1ab03bf0205971ad4d8ee9')
variant('fftw', default='3', values=('none', '2', '3', 'mkl'),
description='Enable the use of FFTW/FFTW3/MKL FFT')
variant('interface', default='none', values=('none', 'tcl', 'python'),
description='Enables TCL and/or python interface')
depends_on('charm')
depends_on('fftw@:2.99', when="fftw=2")
depends_on('fftw@3:', when="fftw=3")
depends_on('intel-mkl', when="fftw=mkl")
depends_on('tcl', when='interface=tcl')
depends_on('tcl', when='interface=python')
depends_on('python', when='interface=python')
def _copy_arch_file(self, lib):
config_filename = 'arch/{0}.{1}'.format(self.arch, lib)
shutil.copy('arch/Linux-x86_64.{0}'.format(lib),
config_filename)
if lib == 'tcl':
filter_file(r'-ltcl8\.5',
'-ltcl{0}'.format(self.spec['tcl'].version.up_to(2)),
config_filename)
def _append_option(self, opts, lib):
if lib != 'python':
self._copy_arch_file(lib)
spec = self.spec
opts.extend([
'--with-{0}'.format(lib),
'--{0}-prefix'.format(lib), spec[lib].prefix
])
@property
def arch(self):
plat = sys.platform
if plat.startswith("linux"):
plat = "linux"
march = platform.machine()
return '{0}-{1}'.format(plat, march)
@property
def build_directory(self):
return '{0}-spack'.format(self.arch)
def edit(self, spec, prefix):
with working_dir('arch'):
with open('{0}.arch'.format(self.build_directory), 'w') as fh:
# this options are take from the default provided
# configuration files
optims_opts = {
'gcc': '-m64 -O3 -fexpensive-optimizations -ffast-math',
'intel': '-O2 -ip'
}
optim_opts = optims_opts[self.compiler.name] \
if self.compiler.name in optims_opts else ''
fh.write('\n'.join([
'NAMD_ARCH = {0}'.format(self.arch),
'CHARMARCH = ',
'CXX = {0.cxx} {0.cxx11_flag}'.format(
self.compiler),
'CXXOPTS = {0}'.format(optim_opts),
'CC = {0}'.format(self.compiler.cc),
'COPTS = {0}'.format(optim_opts),
''
]))
self._copy_arch_file('base')
opts = ['--charm-base', spec['charm'].prefix]
fftw_version = spec.variants['fftw'].value
if fftw_version == 'none':
opts.append('--without-fftw')
elif fftw_version == 'mkl':
self._append_option(opts, 'mkl')
else:
_fftw = 'fftw{0}'.format('' if fftw_version == '2' else '3')
self._copy_arch_file(_fftw)
opts.extend(['--with-{0}'.format(_fftw),
'--fftw-prefix', spec['fftw'].prefix])
interface_type = spec.variants['interface'].value
if interface_type != 'none':
self._append_option(opts, 'tcl')
if interface_type == 'python':
self._append_option(opts, 'python')
else:
opts.extend([
'--without-tcl',
'--without-python'
])
config = Executable('./config')
config(self.build_directory, *opts)
def install(self, spec, prefix):
with working_dir(self.build_directory):
mkdirp(prefix.bin)
install('namd2', prefix.bin)
# I'm not sure this is a good idea or if an autoload of the charm
# module would not be better.
install('charmrun', prefix.bin)
| lgpl-2.1 |
ArvinPan/opencog | opencog/nlp/anaphora/agents/testingAgent.py | 12 | 1525 |
from __future__ import print_function
from pprint import pprint
from pln.examples.deduction import deduction_agent
from opencog.atomspace import types, AtomSpace, TruthValue
from hobbs import HobbsAgent
from dumpAgent import dumpAgent
from opencog.scheme_wrapper import load_scm,scheme_eval_h, __init__
__author__ = 'Hujie Wang'
'''
This agent is purely for testing purposes, which can be used to test hobbsAgent in a standalone atomspace environment.
'''
atomspace = AtomSpace()
__init__(atomspace)
data=["opencog/scm/config.scm",
"opencog/scm/core_types.scm",
"spacetime/spacetime_types.scm",
"opencog/nlp/types/nlp_types.scm",
"opencog/dynamics/attention/attention_types.scm",
"opencog/embodiment/embodiment_types.scm",
"opencog/scm/apply.scm",
"opencog/scm/file-utils.scm",
"opencog/scm/persistence.scm",
#"opencog/scm/repl-shell.scm",
"opencog/scm/utilities.scm",
"opencog/scm/av-tv.scm",
"opencog/nlp/scm/type-definitions.scm",
"opencog/nlp/scm/config.scm",
"opencog/nlp/scm/file-utils.scm",
"opencog/nlp/scm/nlp-utils.scm",
"opencog/nlp/scm/disjunct-list.scm",
"opencog/nlp/scm/processing-utils.scm",
"opencog/nlp/anaphora/tests/atomspace.log"
]
#status2 = load_scm(atomspace, "opencog/nlp/anaphora/tests/atomspace.scm")
for item in data:
load_scm(atomspace, item)
#init=initAgent()
#init.run(atomspace)
dump=dumpAgent()
dump.run(atomspace)
hobbsAgent = HobbsAgent()
hobbsAgent.run(atomspace)
| agpl-3.0 |
zhouzhenghui/python-for-android | python-modules/twisted/twisted/python/procutils.py | 61 | 1380 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utilities for dealing with processes.
"""
import os
def which(name, flags=os.X_OK):
"""Search PATH for executable files with the given name.
On newer versions of MS-Windows, the PATHEXT environment variable will be
set to the list of file extensions for files considered executable. This
will normally include things like ".EXE". This fuction will also find files
with the given name ending with any of these extensions.
On MS-Windows the only flag that has any meaning is os.F_OK. Any other
flags will be ignored.
@type name: C{str}
@param name: The name for which to search.
@type flags: C{int}
@param flags: Arguments to L{os.access}.
@rtype: C{list}
@param: A list of the full paths to files found, in the
order in which they were found.
"""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, flags):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, flags):
result.append(pext)
return result
| apache-2.0 |
SeaFalcon/Musicool_Pr | lib/werkzeug/routing.py | 72 | 61818 | # -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
method is raised.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import posixpath
from pprint import pformat
from urlparse import urljoin
from werkzeug.urls import url_encode, url_decode, url_quote
from werkzeug.utils import redirect, format_string
from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed
from werkzeug._internal import _get_environ
from werkzeug.datastructures import ImmutableDict, MultiDict
_rule_re = re.compile(r'''
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
''', re.VERBOSE)
_simple_rule_re = re.compile(r'<([^>]+)>')
_converter_args_re = re.compile(r'''
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
\w+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
''', re.VERBOSE|re.UNICODE)
_PYTHON_CONSTANTS = {
'None': None,
'True': True,
'False': False
}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in '"\'':
value = value[1:-1]
return unicode(value)
def parse_converter_args(argstr):
argstr += ','
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group('stringval')
if value is None:
value = item.group('value')
value = _pythonize(value)
if not item.group('name'):
args.append(value)
else:
name = item.group('name')
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data['static']:
yield None, None, data['static']
variable = data['variable']
converter = data['converter'] or 'default'
if variable in used_names:
raise ValueError('variable name %r used twice.' % variable)
used_names.add(variable)
yield converter, data['args'] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if '>' in remaining or '<' in remaining:
raise ValueError('malformed url rule: %r' % rule)
yield None, None, remaining
def get_converter(map, name, args):
"""Create a new converter for the given arguments or raise
exception if the converter does not exist.
:internal:
"""
if not name in map.converters:
raise LookupError('the converter %r does not exist' % name)
if args:
args, kwargs = parse_converter_args(args)
else:
args = ()
kwargs = {}
return map.converters[name](map, *args, **kwargs)
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 301
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException):
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in rule.defaults.iteritems():
if isinstance(value, basestring):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, basestring):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes
)
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(self, string, defaults=None, subdomain=None, methods=None,
build_only=False, endpoint=None, strict_slashes=None,
redirect_to=None, alias=False, host=None):
if not string.startswith('/'):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = not string.endswith('/')
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
self.methods = set([x.upper() for x in methods])
if 'HEAD' not in self.methods and 'GET' in self.methods:
self.methods.add('HEAD')
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._weights = None
def empty(self):
"""Return an unbound copy of this rule. This can be useful if you
want to reuse an already bound URL for another map."""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return Rule(self.rule, defaults, self.subdomain, self.methods,
self.build_only, self.endpoint, self.strict_slashes,
self.redirect_to, self.alias, self.host)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError('url rule %r already bound to map %r' %
(self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, 'rule not bound'
if self.map.host_matching:
domain_rule = self.host or ''
else:
domain_rule = self.subdomain or ''
self._trace = []
self._converters = {}
self._weights = []
regex_parts = []
def _build_regex(rule):
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split('/'):
if part:
self._weights.append((0, -len(part)))
else:
convobj = get_converter(self.map, converter, arguments)
regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._weights.append((1, convobj.weight))
self.arguments.add(str(variable))
_build_regex(domain_rule)
regex_parts.append('\\|')
self._trace.append((False, '|'))
_build_regex(self.is_leaf and self.rule or self.rule.rstrip('/'))
if not self.is_leaf:
self._trace.append((False, '/'))
if self.build_only:
return
regex = r'^%s%s$' % (
u''.join(regex_parts),
(not self.is_leaf or not self.strict_slashes) and \
'(?<!/)(?P<__suffix__>/?)' or ''
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path(method)"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if self.strict_slashes and not self.is_leaf and \
not groups.pop('__suffix__'):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups['__suffix__']
result = {}
for name, value in groups.iteritems():
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
tmp = []
add = tmp.append
processed = set(self.arguments)
for is_dynamic, data in self._trace:
if is_dynamic:
try:
add(self._converters[data].to_url(values[data]))
except ValidationError:
return
processed.add(data)
else:
add(url_quote(data, self.map.charset, safe='/:|'))
domain_part, url = (u''.join(tmp)).split('|', 1)
if append_unknown:
query_vars = MultiDict(values)
for key in processed:
if key in query_vars:
del query_vars[key]
if query_vars:
url += '?' + url_encode(query_vars, self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key)
return domain_part, url
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return not self.build_only and self.defaults and \
self.endpoint == rule.endpoint and self != rule and \
self.arguments == rule.arguments
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if method is not None and self.methods is not None \
and method not in self.methods:
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure taht either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in defaults.iteritems():
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. The more complex rules come first so the second argument is the
negative length of the number of weights.
3. lastly we order by the actual weights.
:internal:
"""
return bool(self.arguments), -len(self._weights), self._weights
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return self.alias and 1 or 0, -len(self.arguments), \
-len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._trace == other._trace
def __ne__(self, other):
return not self.__eq__(other)
def __unicode__(self):
return self.rule
def __str__(self):
charset = self.map is not None and self.map.charset or 'utf-8'
return unicode(self).encode(charset)
def __repr__(self):
if self.map is None:
return '<%s (unbound)>' % self.__class__.__name__
charset = self.map is not None and self.map.charset or 'utf-8'
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append('<%s>' % data)
else:
tmp.append(data)
return '<%s %r%s -> %s>' % (
self.__class__.__name__,
(u''.join(tmp).encode(charset)).lstrip('|'),
self.methods is not None and ' (%s)' % \
', '.join(self.methods) or '',
self.endpoint
)
class BaseConverter(object):
"""Base class for all converters."""
regex = '[^/]+'
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
return url_quote(value, self.map.charset)
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = '{%d}' % int(length)
else:
if maxlength is None:
maxlength = ''
else:
maxlength = int(maxlength)
length = '{%s,%s}' % (
int(minlength),
maxlength
)
self.regex = '[^/]' + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = '[^/].*?'
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None):
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
def to_python(self, value):
if (self.fixed_digits and len(value) != self.fixed_digits):
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or \
(self.max is not None and value > self.max):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ('%%0%sd' % self.fixed_digits) % value
return str(value)
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule('/page/<int:page>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param fixed_digits: the number of fixed digits in the URL. If you set
this to ``4`` for example, the application will
only match if the url looks like ``/0001/``. The
default is variable length.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+'
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule('/probability/<float:probability>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+\.\d+'
num_convert = float
def __init__(self, map, min=None, max=None):
NumberConverter.__init__(self, map, 0, min, max)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
'default': UnicodeConverter,
'string': UnicodeConverter,
'any': AnyConverter,
'path': PathConverter,
'int': IntegerConverter,
'float': FloatConverter
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: .. versionadded:: 0.6
#: a dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(self, server_name, script_name=None, subdomain=None,
url_scheme='http', default_method='GET', path_info=None,
query_args=None):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError('host matching enabled and a '
'subdomain was provided')
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = '/'
if isinstance(server_name, unicode):
server_name = server_name.encode('idna')
return MapAdapter(self, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
if server_name is None:
if 'HTTP_HOST' in environ:
server_name = environ['HTTP_HOST']
else:
server_name = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
server_name += ':' + environ['SERVER_PORT']
elif subdomain is None and not self.host_matching:
server_name = server_name.lower()
if 'HTTP_HOST' in environ:
wsgi_server_name = environ.get('HTTP_HOST')
else:
wsgi_server_name = environ.get('SERVER_NAME')
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + environ['SERVER_PORT']
wsgi_server_name = wsgi_server_name.lower()
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
return Map.bind(self, server_name, environ.get('SCRIPT_NAME'),
subdomain, environ['wsgi.url_scheme'],
environ['REQUEST_METHOD'], environ.get('PATH_INFO'),
query_args=environ.get('QUERY_STRING', ''))
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if self._remap:
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in self._rules_by_endpoint.itervalues():
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return '%s(%s)' % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(self, map, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args=None):
self.map = map
self.server_name = server_name
if not script_name.endswith('/'):
script_name += '/'
self.script_name = script_name
self.subdomain = subdomain
self.url_scheme = url_scheme
self.path_info = path_info or u''
self.default_method = default_method
self.query_args = query_args
def dispatch(self, view_func, path_info=None, method=None,
catch_http_exceptions=False):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect, e:
return e
return view_func(endpoint, args)
except HTTPException, e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False,
query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. The will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
if not isinstance(path_info, unicode):
path_info = path_info.decode(self.map.charset,
self.map.encoding_errors)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u'%s|/%s' % (self.map.host_matching and self.server_name or
self.subdomain, path_info.lstrip('/'))
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path)
except RequestSlash:
raise RequestRedirect(self.make_redirect_url(
path_info + '/', query_args))
except RequestAliasRedirect, e:
raise RequestRedirect(self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args))
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv,
query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, basestring):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match,
rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(str(urljoin('%s://%s%s%s' % (
self.url_scheme,
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name
), redirect_url)))
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method='--')
except MethodNotAllowed, e:
return e.valid_methods
except HTTPException, e:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return domain_part
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
return (subdomain and subdomain + '.' or '') + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and \
r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(
path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, basestring):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ''
if query_args:
suffix = '?' + self.encode_query_args(query_args)
return str('%s://%s/%s%s' % (
self.url_scheme,
self.get_host(domain_part),
posixpath.join(self.script_name[:-1].lstrip('/'),
url_quote(path_info.lstrip('/'), self.map.charset)),
suffix
))
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(endpoint, values, method, append_unknown=False,
force_external=True)
if query_args:
url += '?' + self.encode_query_args(query_args)
assert url != path, 'detected invalid alias setting. No canonical ' \
'URL found'
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(self, endpoint, values=None, method=None, force_external=False,
append_unknown=True):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
valueiter = values.iteritems(multi=True)
else:
valueiter = values.iteritems()
values = dict((k, v) for k, v in valueiter if v is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name) or
(not self.map.host_matching and domain_part == self.subdomain)):
return str(urljoin(self.script_name, './' + path.lstrip('/')))
return str('%s://%s%s/%s' % (
self.url_scheme,
host,
self.script_name[:-1],
path.lstrip('/')
))
| apache-2.0 |
toomoresuch/pysonengine | eggs/ipython-0.10.1-py2.6.egg/IPython/kernel/core/message_cache.py | 7 | 2531 | # encoding: utf-8
"""Storage for the responses from the interpreter."""
__docformat__ = "restructuredtext en"
#-------------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
class IMessageCache(object):
""" Storage for the response from the interpreter.
"""
def add_message(self, i, message):
""" Add a message dictionary to the cache.
Parameters
----------
i : int
message : dict
"""
def get_message(self, i=None):
""" Get the message from the cache.
Parameters
----------
i : int, optional
The number of the message. If not provided, return the
highest-numbered message.
Returns
-------
message : dict
Raises
------
IndexError if the message does not exist in the cache.
"""
class SimpleMessageCache(object):
""" Simple dictionary-based, in-memory storage of the responses from the
interpreter.
"""
def __init__(self):
self.cache = {}
def add_message(self, i, message):
""" Add a message dictionary to the cache.
Parameters
----------
i : int
message : dict
"""
self.cache[i] = message
def get_message(self, i=None):
""" Get the message from the cache.
Parameters
----------
i : int, optional
The number of the message. If not provided, return the
highest-numbered message.
Returns
-------
message : dict
Raises
------
IndexError if the message does not exist in the cache.
"""
if i is None:
keys = self.cache.keys()
if len(keys) == 0:
raise IndexError("index %r out of range" % i)
else:
i = max(self.cache.keys())
try:
return self.cache[i]
except KeyError:
# IndexError is more appropriate, here.
raise IndexError("index %r out of range" % i)
| mit |
chris-chris/tensorflow | tensorflow/contrib/layers/python/layers/utils.py | 71 | 10875 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions used by layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from collections import OrderedDict
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
__all__ = ['collect_named_outputs',
'constant_value',
'static_cond',
'smart_cond',
'get_variable_collections',
'two_element_tuple',
'n_positive_integers',
'last_dimension',
'first_dimension']
NamedOutputs = namedtuple('NamedOutputs', ['name', 'outputs'])
def collect_named_outputs(collections, alias, outputs):
"""Add `Tensor` outputs tagged with alias to collections.
It is useful to collect end-points or tags for summaries. Example of usage:
logits = collect_named_outputs('end_points', 'inception_v3/logits', logits)
assert 'inception_v3/logits' in logits.aliases
Args:
collections: A collection or list of collections. If None skip collection.
alias: String to append to the list of aliases of outputs, for example,
'inception_v3/conv1'.
outputs: Tensor, an output tensor to collect
Returns:
The outputs Tensor to allow inline call.
"""
if collections:
append_tensor_alias(outputs, alias)
ops.add_to_collections(collections, outputs)
return outputs
def append_tensor_alias(tensor, alias):
"""Append an alias to the list of aliases of the tensor.
Args:
tensor: A `Tensor`.
alias: String, to add to the list of aliases of the tensor.
Returns:
The tensor with a new alias appended to its list of aliases.
"""
# Remove ending '/' if present.
if alias[-1] == '/':
alias = alias[:-1]
if hasattr(tensor, 'aliases'):
tensor.aliases.append(alias)
else:
tensor.aliases = [alias]
return tensor
def gather_tensors_aliases(tensors):
"""Given a list of tensors, gather their aliases.
Args:
tensors: A list of `Tensors`.
Returns:
A list of strings with the aliases of all tensors.
"""
aliases = []
for tensor in tensors:
aliases += get_tensor_aliases(tensor)
return aliases
def get_tensor_aliases(tensor):
"""Get a list with the aliases of the input tensor.
If the tensor does not have any alias, it would default to its its op.name or
its name.
Args:
tensor: A `Tensor`.
Returns:
A list of strings with the aliases of the tensor.
"""
if hasattr(tensor, 'aliases'):
aliases = tensor.aliases
else:
if tensor.name[-2:] == ':0':
# Use op.name for tensor ending in :0
aliases = [tensor.op.name]
else:
aliases = [tensor.name]
return aliases
def convert_collection_to_dict(collection):
"""Returns an OrderedDict of Tensors with their aliases as keys.
Args:
collection: A collection.
Returns:
An OrderedDict of {alias: tensor}
"""
return OrderedDict((alias, tensor)
for tensor in ops.get_collection(collection)
for alias in get_tensor_aliases(tensor))
def constant_value(value_or_tensor_or_var, dtype=None):
"""Returns value if value_or_tensor_or_var has a constant value.
Args:
value_or_tensor_or_var: A value, a `Tensor` or a `Variable`.
dtype: Optional `tf.dtype`, if set it would check it has the right
dtype.
Returns:
The constant value or None if it not constant.
Raises:
ValueError: if value_or_tensor_or_var is None or the tensor_variable has the
wrong dtype.
"""
if value_or_tensor_or_var is None:
raise ValueError('value_or_tensor_or_var cannot be None')
value = value_or_tensor_or_var
if isinstance(value_or_tensor_or_var, (ops.Tensor, variables.Variable)):
if dtype and value_or_tensor_or_var.dtype != dtype:
raise ValueError('It has the wrong type %s instead of %s' % (
value_or_tensor_or_var.dtype, dtype))
if isinstance(value_or_tensor_or_var, variables.Variable):
value = None
else:
value = tensor_util.constant_value(value_or_tensor_or_var)
return value
def static_cond(pred, fn1, fn2):
"""Return either fn1() or fn2() based on the boolean value of `pred`.
Same signature as `control_flow_ops.cond()` but requires pred to be a bool.
Args:
pred: A value determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pred is false.
Returns:
Tensors returned by the call to either `fn1` or `fn2`.
Raises:
TypeError: if `fn1` or `fn2` is not callable.
"""
if not callable(fn1):
raise TypeError('fn1 must be callable.')
if not callable(fn2):
raise TypeError('fn2 must be callable.')
if pred:
return fn1()
else:
return fn2()
def smart_cond(pred, fn1, fn2, name=None):
"""Return either fn1() or fn2() based on the boolean predicate/value `pred`.
If `pred` is bool or has a constant value it would use `static_cond`,
otherwise it would use `tf.cond`.
Args:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The callable to be performed if pred is true.
fn2: The callable to be performed if pred is false.
name: Optional name prefix when using tf.cond
Returns:
Tensors returned by the call to either `fn1` or `fn2`.
"""
pred_value = constant_value(pred)
if pred_value is not None:
# Use static_cond if pred has a constant value.
return static_cond(pred_value, fn1, fn2)
else:
# Use dynamic cond otherwise.
return control_flow_ops.cond(pred, fn1, fn2, name)
def get_variable_collections(variables_collections, name):
if isinstance(variables_collections, dict):
variable_collections = variables_collections.get(name, None)
else:
variable_collections = variables_collections
return variable_collections
def first_dimension(shape, min_rank=1):
"""Returns the first dimension of shape while checking it has min_rank.
Args:
shape: A `TensorShape`.
min_rank: Integer, minimum rank of shape.
Returns:
The value of the first dimension.
Raises:
ValueError: if inputs don't have at least min_rank dimensions, or if the
first dimension value is not defined.
"""
dims = shape.dims
if dims is None:
raise ValueError('dims of shape must be known but is None')
if len(dims) < min_rank:
raise ValueError('rank of shape must be at least %d not: %d' % (min_rank,
len(dims)))
value = dims[0].value
if value is None:
raise ValueError('first dimension shape must be known but is None')
return value
def last_dimension(shape, min_rank=1):
"""Returns the last dimension of shape while checking it has min_rank.
Args:
shape: A `TensorShape`.
min_rank: Integer, minimum rank of shape.
Returns:
The value of the last dimension.
Raises:
ValueError: if inputs don't have at least min_rank dimensions, or if the
last dimension value is not defined.
"""
dims = shape.dims
if dims is None:
raise ValueError('dims of shape must be known but is None')
if len(dims) < min_rank:
raise ValueError('rank of shape must be at least %d not: %d' % (min_rank,
len(dims)))
value = dims[-1].value
if value is None:
raise ValueError('last dimension shape must be known but is None')
return value
def two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a `TensorShape`.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tensor_shape.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
def n_positive_integers(n, value):
"""Converts `value` to a sequence of `n` positive integers.
`value` may be either be a sequence of values convertible to `int`, or a
single value convertible to `int`, in which case the resulting integer is
duplicated `n` times. It may also be a TensorShape of rank `n`.
Args:
n: Length of sequence to return.
value: Either a single value convertible to a positive `int` or an
`n`-element sequence of values convertible to a positive `int`.
Returns:
A tuple of `n` positive integers.
Raises:
TypeError: If `n` is not convertible to an integer.
ValueError: If `n` or `value` are invalid.
"""
n_orig = n
n = int(n)
if n < 1 or n != n_orig:
raise ValueError('n must be a positive integer')
try:
value = int(value)
except (TypeError, ValueError):
sequence_len = len(value)
if sequence_len != n:
raise ValueError(
'Expected sequence of %d positive integers, but received %r' %
(n, value))
try:
values = tuple(int(x) for x in value)
except:
raise ValueError(
'Expected sequence of %d positive integers, but received %r' %
(n, value))
for x in values:
if x < 1:
raise ValueError('expected positive integer, but received %d' % x)
return values
if value < 1:
raise ValueError('expected positive integer, but received %d' % value)
return (value,) * n
| apache-2.0 |
Reat0ide/plugin.video.pelisalacarta | lib/elementtree/SgmlopXMLTreeBuilder.py | 107 | 3209 | #
# ElementTree
# $Id$
#
# A simple XML tree builder, based on the sgmlop library.
#
# Note that this version does not support namespaces. This may be
# changed in future versions.
#
# history:
# 2004-03-28 fl created
#
# Copyright (c) 1999-2004 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to build element trees from XML, based on the SGMLOP parser.
# <p>
# The current version does not support XML namespaces.
# <p>
# This tree builder requires the <b>sgmlop</b> extension module
# (available from
# <a href='http://effbot.org/downloads'>http://effbot.org/downloads</a>).
##
import ElementTree
##
# ElementTree builder for XML source data, based on the SGMLOP parser.
#
# @see elementtree.ElementTree
class TreeBuilder:
def __init__(self, html=0):
try:
import sgmlop
except ImportError:
raise RuntimeError("sgmlop parser not available")
self.__builder = ElementTree.TreeBuilder()
if html:
import htmlentitydefs
self.entitydefs.update(htmlentitydefs.entitydefs)
self.__parser = sgmlop.XMLParser()
self.__parser.register(self)
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
self.__parser.feed(data)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
self.__parser.close()
self.__parser = None
return self.__builder.close()
def finish_starttag(self, tag, attrib):
self.__builder.start(tag, attrib)
def finish_endtag(self, tag):
self.__builder.end(tag)
def handle_data(self, data):
self.__builder.data(data)
| gpl-3.0 |
maohongyuan/kbengine | kbe/res/scripts/common/Lib/os.py | 83 | 33763 | r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix, nt or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# be happy if someone already created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
if not exist_ok or e.errno != errno.EEXIST or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir is global in this module due
# to earlier import-*.
names = listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
return
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
| lgpl-3.0 |
asen6/amartyasenguptadotcom | django/core/handlers/base.py | 55 | 11479 | import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
from django.utils.log import getLogger
logger = getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404, e:
logger.warning('Not Found: %s' % request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
try:
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
logger.warning('Forbidden (Permission denied): %s' % request.path,
extra={
'status_code': 403,
'request': request
})
response = http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
logger.error('Internal Server Error: %s' % request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request':request
}
)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
raise exc_info[1], None, exc_info[2]
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| bsd-3-clause |
sivel/ansible-modules-extras | cloud/amazon/ecs_task.py | 23 | 11892 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ecs_task
short_description: run, start or stop a task in ecs
description:
- Creates or deletes instances of task definitions.
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ json, boto, botocore, boto3 ]
options:
operation:
description:
- Which task operation to execute
required: True
choices: ['run', 'start', 'stop']
cluster:
description:
- The name of the cluster to run the task on
required: False
task_definition:
description:
- The task definition to start or run
required: False
overrides:
description:
- A dictionary of values to pass to the new instances
required: False
count:
description:
- How many new instances to start
required: False
task:
description:
- The task to stop
required: False
container_instances:
description:
- The list of container instances on which to deploy the task
required: False
started_by:
description:
- A value showing who or what started the task (for informational purposes)
required: False
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple example of run task
- name: Run task
ecs_task:
operation: run
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
count: 1
started_by: ansible_user
register: task_output
# Simple example of start task
- name: Start a task
ecs_task:
operation: start
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
container_instances:
- arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
started_by: ansible_user
register: task_output
- name: Stop a task
ecs_task:
operation: stop
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
'''
RETURN = '''
task:
description: details about the tast that was started
returned: success
type: complex
contains:
taskArn:
description: The Amazon Resource Name (ARN) that identifies the task.
returned: always
type: string
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
returned: only when details is true
type: string
taskDefinitionArn:
description: The Amazon Resource Name (ARN) of the task definition.
returned: only when details is true
type: string
containerInstanceArn:
description: The Amazon Resource Name (ARN) of the container running the task.
returned: only when details is true
type: string
overrides:
description: The container overrides set for this task.
returned: only when details is true
type: list of complex
lastStatus:
description: The last recorded status of the task.
returned: only when details is true
type: string
desiredStatus:
description: The desired status of the task.
returned: only when details is true
type: string
containers:
description: The container details.
returned: only when details is true
type: list of complex
startedBy:
description: The used who started the task.
returned: only when details is true
type: string
stoppedReason:
description: The reason why the task was stopped.
returned: only when details is true
type: string
createdAt:
description: The timestamp of when the task was created.
returned: only when details is true
type: string
startedAt:
description: The timestamp of when the task was started.
returned: only when details is true
type: string
stoppedAt:
description: The timestamp of when the task was stopped.
returned: only when details is true
type: string
'''
try:
import boto
import botocore
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
class EcsExecManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Can't authorize connection - %s " % str(e))
def list_tasks(self, cluster_name, service_name, status):
response = self.ecs.list_tasks(
cluster=cluster_name,
family=service_name,
desiredStatus=status
)
if len(response['taskArns'])>0:
for c in response['taskArns']:
if c.endswith(service_name):
return c
return None
def run_task(self, cluster, task_definition, overrides, count, startedBy):
if overrides is None:
overrides = dict()
response = self.ecs.run_task(
cluster=cluster,
taskDefinition=task_definition,
overrides=overrides,
count=count,
startedBy=startedBy)
# include tasks and failures
return response['tasks']
def start_task(self, cluster, task_definition, overrides, container_instances, startedBy):
args = dict()
if cluster:
args['cluster'] = cluster
if task_definition:
args['taskDefinition']=task_definition
if overrides:
args['overrides']=overrides
if container_instances:
args['containerInstances']=container_instances
if startedBy:
args['startedBy']=startedBy
response = self.ecs.start_task(**args)
# include tasks and failures
return response['tasks']
def stop_task(self, cluster, task):
response = self.ecs.stop_task(cluster=cluster, task=task)
return response['task']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
operation=dict(required=True, choices=['run', 'start', 'stop'] ),
cluster=dict(required=False, type='str' ), # R S P
task_definition=dict(required=False, type='str' ), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int' ), # R
task=dict(required=False, type='str' ), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str' ) # R S
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
# Validate Requirements
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
# Validate Inputs
if module.params['operation'] == 'run':
if not 'task_definition' in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if not 'task_definition' in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if not 'container_instances' in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if not 'task' in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if not 'task_definition' in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task']=existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'])
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task']=existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by']
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task']=existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
glwu/python-for-android | python3-alpha/extra_modules/gdata/tlslite/utils/jython_compat.py | 46 | 5352 | """Miscellaneous functions to mask Python/Jython differences."""
import os
import sha
if os.name != "java":
BaseException = Exception
from sets import Set
import array
import math
def createByteArraySequence(seq):
return array.array('B', seq)
def createByteArrayZeros(howMany):
return array.array('B', [0] * howMany)
def concatArrays(a1, a2):
return a1+a2
def bytesToString(bytes):
return bytes.tostring()
def stringToBytes(s):
bytes = createByteArrayZeros(0)
bytes.fromstring(s)
return bytes
def numBits(n):
if n==0:
return 0
return int(math.floor(math.log(n, 2))+1)
class CertChainBase: pass
class SelfTestBase: pass
class ReportFuncBase: pass
#Helper functions for working with sets (from Python 2.3)
def iterSet(set):
return iter(set)
def getListFromSet(set):
return list(set)
#Factory function for getting a SHA1 object
def getSHA1(s):
return sha.sha(s)
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))
return newStr
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
import java
import jarray
BaseException = java.lang.Exception
def createByteArraySequence(seq):
if isinstance(seq, type("")): #If it's a string, convert
seq = [ord(c) for c in seq]
return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed
def createByteArrayZeros(howMany):
return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed
def concatArrays(a1, a2):
l = list(a1)+list(a2)
return createByteArraySequence(l)
#WAY TOO SLOW - MUST BE REPLACED------------
def bytesToString(bytes):
return "".join([chr(b) for b in bytes])
def stringToBytes(s):
bytes = createByteArrayZeros(len(s))
for count, c in enumerate(s):
bytes[count] = ord(c)
return bytes
#WAY TOO SLOW - MUST BE REPLACED------------
def numBits(n):
if n==0:
return 0
n= 1 * n; #convert to long, if it isn't already
return n.__tojava__(java.math.BigInteger).bitLength()
#This properly creates static methods for Jython
class staticmethod:
def __init__(self, anycallable): self.__call__ = anycallable
#Properties are not supported for Jython
class property:
def __init__(self, anycallable): pass
#True and False have to be specially defined
#False = 0
#True = 1
class StopIteration(Exception): pass
def enumerate(collection):
return list(zip(list(range(len(collection))), collection))
class Set:
def __init__(self, seq=None):
self.values = {}
if seq:
for e in seq:
self.values[e] = None
def add(self, e):
self.values[e] = None
def discard(self, e):
if e in list(self.values.keys()):
del(self.values[e])
def union(self, s):
ret = Set()
for e in list(self.values.keys()):
ret.values[e] = None
for e in list(s.values.keys()):
ret.values[e] = None
return ret
def issubset(self, other):
for e in list(self.values.keys()):
if e not in list(other.values.keys()):
return False
return True
def __bool__( self):
return len(list(self.values.keys()))
def __contains__(self, e):
return e in list(self.values.keys())
def iterSet(set):
return list(set.values.keys())
def getListFromSet(set):
return list(set.values.keys())
"""
class JCE_SHA1:
def __init__(self, s=None):
self.md = java.security.MessageDigest.getInstance("SHA1")
if s:
self.update(s)
def update(self, s):
self.md.update(s)
def copy(self):
sha1 = JCE_SHA1()
sha1.md = self.md.clone()
return sha1
def digest(self):
digest = self.md.digest()
bytes = jarray.zeros(20, 'h')
for count in xrange(20):
x = digest[count]
if x < 0: x += 256
bytes[count] = x
return bytes
"""
#Factory function for getting a SHA1 object
#The JCE_SHA1 class is way too slow...
#the sha.sha object we use instead is broken in the jython 2.1
#release, and needs to be patched
def getSHA1(s):
#return JCE_SHA1(s)
return sha.sha(s)
#Adjust the string to an array of bytes
def stringToJavaByteArray(s):
bytes = jarray.zeros(len(s), 'b')
for count, c in enumerate(s):
x = ord(c)
if x >= 128: x -= 256
bytes[count] = x
return bytes
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))
return newStr
| apache-2.0 |
PhenomanSolutions/formula1 | node_modules/node-gyp/gyp/pylib/gyp/input.py | 578 | 116086 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
# Open the build file for read ('r') with universal-newlines mode ('U')
# to make sure platform specific newlines ('\r\n' or '\r') are converted to '\n'
# which otherwise will fail eval()
build_file_contents = open(build_file_path, 'rU').read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| mit |
caktus/django-timepiece | timepiece/entries/admin.py | 3 | 2057 | from django.contrib import admin
from timepiece.entries.models import (
Activity, ActivityGroup, Entry, Location, ProjectHours)
class ActivityAdmin(admin.ModelAdmin):
model = Activity
list_display = ('code', 'name', 'billable')
list_filter = ('billable',)
class ActivityGroupAdmin(admin.ModelAdmin):
model = ActivityGroup
list_display = ('name',)
list_filter = ('activities',)
filter_horizontal = ('activities',)
class EntryAdmin(admin.ModelAdmin):
model = Entry
list_display = ('user', '_project', 'location', 'project_type',
'activity', 'start_time', 'end_time', 'hours',
'is_closed', 'is_paused')
list_filter = ['activity', 'project__type', 'user', 'project']
search_fields = ['user__first_name', 'user__last_name', 'project__name',
'activity__name', 'comments']
date_hierarchy = 'start_time'
ordering = ('-start_time',)
def project_type(self, entry):
return entry.project.type
def _project(self, obj):
"""Use a proxy to avoid an infinite loop from ordering."""
return obj.__str__()
_project.admin_order_field = 'project__name'
_project.short_description = 'Project'
class LocationAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
class ProjectHoursAdmin(admin.ModelAdmin):
list_display = ('_user', '_project', 'week_start', 'hours', 'published')
def _user(self, obj):
return obj.user.get_name_or_username()
_user.short_description = 'User'
_user.admin_order_field = 'user__last_name'
def _project(self, obj):
"""Use a proxy to avoid an infinite loop from ordering."""
return obj.project.__str__()
_project.admin_order_field = 'project__name'
_project.short_description = 'Project'
admin.site.register(Activity, ActivityAdmin)
admin.site.register(ActivityGroup, ActivityGroupAdmin)
admin.site.register(Entry, EntryAdmin)
admin.site.register(Location, LocationAdmin)
admin.site.register(ProjectHours, ProjectHoursAdmin)
| mit |
rsivapr/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 7 | 7404 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import pylab as pl
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name,
train, test,
coverages, xgrid, ygrid):
"""
create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
points = dict(test=test, train=train)
for label, pts in points.iteritems():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=["bradypus_variegatus_0",
"microryzomys_minutus_0"]):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
pl.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
pl.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
pl.xticks([])
pl.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
pl.contourf(X, Y, Z, levels=levels, cmap=pl.cm.Reds)
pl.colorbar(format='%.2f')
# scatter training/testing points
pl.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
pl.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
pl.legend()
pl.title(species.name)
pl.axis('equal')
# Compute AUC w.r.t. background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
pl.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
pl.show()
| bsd-3-clause |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/django/core/management/commands/loaddata.py | 67 | 13922 | from __future__ import unicode_literals
import glob
import gzip
import os
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.utils import parse_apps_and_model_labels
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.glob import glob_escape
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = (
"No database fixture specified. Please provide the path of at least "
"one fixture in the command line."
)
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+', help='Fixture labels.')
parser.add_argument(
'--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to load fixtures into. Defaults to the "default" database.',
)
parser.add_argument(
'--app', action='store', dest='app_label', default=None,
help='Only look for fixtures in the specified app.',
)
parser.add_argument(
'--ignorenonexistent', '-i', action='store_true', dest='ignore', default=False,
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.',
)
parser.add_argument(
'-e', '--exclude', dest='exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude. Can be used multiple times.',
)
def handle(self, *fixture_labels, **options):
self.ignore = options['ignore']
self.using = options['database']
self.app_label = options['app_label']
self.verbosity = options['verbosity']
self.excluded_models, self.excluded_apps = parse_apps_and_model_labels(options['exclude'])
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
# Django's test suite repeatedly tries to load initial_data fixtures
# from apps that don't have any fixtures. Because disabling constraint
# checks can be expensive on some database (especially MSSQL), bail
# out early if no fixtures are found.
for fixture_label in fixture_labels:
if self.find_fixtures(fixture_label):
break
else:
return
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
self.load_label(fixture_label)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count:
self.stdout.write(
"Installed %d object(s) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_count)
)
else:
self.stdout.write(
"Installed %d object(s) (of %d) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_object_count, self.fixture_count)
)
def load_label(self, fixture_label):
"""
Loads fixtures files for a given label.
"""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write(
"Installing %s fixture '%s' from %s."
% (ser_fmt, fixture_name, humanize(fixture_dir))
)
objects = serializers.deserialize(
ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore,
)
for obj in objects:
objects_in_fixture += 1
if (obj.object._meta.app_config in self.excluded_apps or
type(obj.object) in self.excluded_models):
continue
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
except (DatabaseError, IntegrityError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': force_text(e)
},)
raise
if objects and show_progress:
self.stdout.write('') # add a newline after progress indicator
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@lru_cache.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""
Finds fixture files for a given label.
"""
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats.keys()) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = (
'.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts)
)
targets = set('.'.join((fixture_name, suffix)) for suffix in suffixes)
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
path = os.path.join(fixture_dir, fixture_name)
for candidate in glob.iglob(glob_escape(path) + '*'):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
raise CommandError("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(list(fixture_dirs))
dirs.append('')
dirs = [upath(os.path.abspath(os.path.realpath(d))) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Splits fixture name in name, serialization format, compression format.
"""
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % (''.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
| apache-2.0 |
richardnpaul/FWL-Website | lib/python2.7/site-packages/psycopg2/tests/test_types_basic.py | 30 | 16842 | #!/usr/bin/env python
#
# types_basic.py - tests for basic types conversions
#
# Copyright (C) 2004-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import decimal
import sys
from functools import wraps
import testutils
from testutils import unittest, ConnectingTestCase, decorate_all_tests
import psycopg2
from psycopg2.extensions import b
class TypesBasicTests(ConnectingTestCase):
"""Test that all type conversions are working."""
def execute(self, *args):
curs = self.conn.cursor()
curs.execute(*args)
return curs.fetchone()[0]
def testQuoting(self):
s = "Quote'this\\! ''ok?''"
self.failUnless(self.execute("SELECT %s AS foo", (s,)) == s,
"wrong quoting: " + s)
def testUnicode(self):
s = u"Quote'this\\! ''ok?''"
self.failUnless(self.execute("SELECT %s AS foo", (s,)) == s,
"wrong unicode quoting: " + s)
def testNumber(self):
s = self.execute("SELECT %s AS foo", (1971,))
self.failUnless(s == 1971, "wrong integer quoting: " + str(s))
s = self.execute("SELECT %s AS foo", (1971L,))
self.failUnless(s == 1971L, "wrong integer quoting: " + str(s))
def testBoolean(self):
x = self.execute("SELECT %s as foo", (False,))
self.assert_(x is False)
x = self.execute("SELECT %s as foo", (True,))
self.assert_(x is True)
def testDecimal(self):
s = self.execute("SELECT %s AS foo", (decimal.Decimal("19.10"),))
self.failUnless(s - decimal.Decimal("19.10") == 0,
"wrong decimal quoting: " + str(s))
s = self.execute("SELECT %s AS foo", (decimal.Decimal("NaN"),))
self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s))
self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s))
s = self.execute("SELECT %s AS foo", (decimal.Decimal("infinity"),))
self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s))
self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s))
s = self.execute("SELECT %s AS foo", (decimal.Decimal("-infinity"),))
self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s))
self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s))
def testFloatNan(self):
try:
float("nan")
except ValueError:
return self.skipTest("nan not available on this platform")
s = self.execute("SELECT %s AS foo", (float("nan"),))
self.failUnless(str(s) == "nan", "wrong float quoting: " + str(s))
self.failUnless(type(s) == float, "wrong float conversion: " + repr(s))
def testFloatInf(self):
try:
self.execute("select 'inf'::float")
except psycopg2.DataError:
return self.skipTest("inf::float not available on the server")
except ValueError:
return self.skipTest("inf not available on this platform")
s = self.execute("SELECT %s AS foo", (float("inf"),))
self.failUnless(str(s) == "inf", "wrong float quoting: " + str(s))
self.failUnless(type(s) == float, "wrong float conversion: " + repr(s))
s = self.execute("SELECT %s AS foo", (float("-inf"),))
self.failUnless(str(s) == "-inf", "wrong float quoting: " + str(s))
def testBinary(self):
if sys.version_info[0] < 3:
s = ''.join([chr(x) for x in range(256)])
b = psycopg2.Binary(s)
buf = self.execute("SELECT %s::bytea AS foo", (b,))
self.assertEqual(s, str(buf))
else:
s = bytes(range(256))
b = psycopg2.Binary(s)
buf = self.execute("SELECT %s::bytea AS foo", (b,))
self.assertEqual(s, buf.tobytes())
def testBinaryNone(self):
b = psycopg2.Binary(None)
buf = self.execute("SELECT %s::bytea AS foo", (b,))
self.assertEqual(buf, None)
def testBinaryEmptyString(self):
# test to make sure an empty Binary is converted to an empty string
if sys.version_info[0] < 3:
b = psycopg2.Binary('')
self.assertEqual(str(b), "''::bytea")
else:
b = psycopg2.Binary(bytes([]))
self.assertEqual(str(b), "''::bytea")
def testBinaryRoundTrip(self):
# test to make sure buffers returned by psycopg2 are
# understood by execute:
if sys.version_info[0] < 3:
s = ''.join([chr(x) for x in range(256)])
buf = self.execute("SELECT %s::bytea AS foo", (psycopg2.Binary(s),))
buf2 = self.execute("SELECT %s::bytea AS foo", (buf,))
self.assertEqual(s, str(buf2))
else:
s = bytes(range(256))
buf = self.execute("SELECT %s::bytea AS foo", (psycopg2.Binary(s),))
buf2 = self.execute("SELECT %s::bytea AS foo", (buf,))
self.assertEqual(s, buf2.tobytes())
def testArray(self):
s = self.execute("SELECT %s AS foo", ([[1,2],[3,4]],))
self.failUnlessEqual(s, [[1,2],[3,4]])
s = self.execute("SELECT %s AS foo", (['one', 'two', 'three'],))
self.failUnlessEqual(s, ['one', 'two', 'three'])
def testEmptyArrayRegression(self):
# ticket #42
import datetime
curs = self.conn.cursor()
curs.execute("create table array_test (id integer, col timestamp without time zone[])")
curs.execute("insert into array_test values (%s, %s)", (1, [datetime.date(2011,2,14)]))
curs.execute("select col from array_test where id = 1")
self.assertEqual(curs.fetchone()[0], [datetime.datetime(2011, 2, 14, 0, 0)])
curs.execute("insert into array_test values (%s, %s)", (2, []))
curs.execute("select col from array_test where id = 2")
self.assertEqual(curs.fetchone()[0], [])
def testEmptyArray(self):
s = self.execute("SELECT '{}' AS foo")
self.failUnlessEqual(s, [])
s = self.execute("SELECT '{}'::text[] AS foo")
self.failUnlessEqual(s, [])
s = self.execute("SELECT %s AS foo", ([],))
self.failUnlessEqual(s, [])
s = self.execute("SELECT 1 != ALL(%s)", ([],))
self.failUnlessEqual(s, True)
# but don't break the strings :)
s = self.execute("SELECT '{}'::text AS foo")
self.failUnlessEqual(s, "{}")
def testArrayEscape(self):
ss = ['', '\\', '"', '\\\\', '\\"']
for s in ss:
r = self.execute("SELECT %s AS foo", (s,))
self.failUnlessEqual(s, r)
r = self.execute("SELECT %s AS foo", ([s],))
self.failUnlessEqual([s], r)
r = self.execute("SELECT %s AS foo", (ss,))
self.failUnlessEqual(ss, r)
def testArrayMalformed(self):
curs = self.conn.cursor()
ss = ['', '{', '{}}', '{' * 20 + '}' * 20]
for s in ss:
self.assertRaises(psycopg2.DataError,
psycopg2.extensions.STRINGARRAY, b(s), curs)
@testutils.skip_from_python(3)
def testTypeRoundtripBuffer(self):
o1 = buffer("".join(map(chr, range(256))))
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1), type(o2))
# Test with an empty buffer
o1 = buffer("")
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1), type(o2))
self.assertEqual(str(o1), str(o2))
@testutils.skip_from_python(3)
def testTypeRoundtripBufferArray(self):
o1 = buffer("".join(map(chr, range(256))))
o1 = [o1]
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1[0]), type(o2[0]))
self.assertEqual(str(o1[0]), str(o2[0]))
@testutils.skip_before_python(3)
def testTypeRoundtripBytes(self):
o1 = bytes(range(256))
o2 = self.execute("select %s;", (o1,))
self.assertEqual(memoryview, type(o2))
# Test with an empty buffer
o1 = bytes([])
o2 = self.execute("select %s;", (o1,))
self.assertEqual(memoryview, type(o2))
@testutils.skip_before_python(3)
def testTypeRoundtripBytesArray(self):
o1 = bytes(range(256))
o1 = [o1]
o2 = self.execute("select %s;", (o1,))
self.assertEqual(memoryview, type(o2[0]))
@testutils.skip_before_python(2, 6)
def testAdaptBytearray(self):
o1 = bytearray(range(256))
o2 = self.execute("select %s;", (o1,))
if sys.version_info[0] < 3:
self.assertEqual(buffer, type(o2))
else:
self.assertEqual(memoryview, type(o2))
self.assertEqual(len(o1), len(o2))
for c1, c2 in zip(o1, o2):
self.assertEqual(c1, ord(c2))
# Test with an empty buffer
o1 = bytearray([])
o2 = self.execute("select %s;", (o1,))
self.assertEqual(len(o2), 0)
if sys.version_info[0] < 3:
self.assertEqual(buffer, type(o2))
else:
self.assertEqual(memoryview, type(o2))
@testutils.skip_before_python(2, 7)
def testAdaptMemoryview(self):
o1 = memoryview(bytearray(range(256)))
o2 = self.execute("select %s;", (o1,))
if sys.version_info[0] < 3:
self.assertEqual(buffer, type(o2))
else:
self.assertEqual(memoryview, type(o2))
# Test with an empty buffer
o1 = memoryview(bytearray([]))
o2 = self.execute("select %s;", (o1,))
if sys.version_info[0] < 3:
self.assertEqual(buffer, type(o2))
else:
self.assertEqual(memoryview, type(o2))
def testByteaHexCheckFalsePositive(self):
# the check \x -> x to detect bad bytea decode
# may be fooled if the first char is really an 'x'
o1 = psycopg2.Binary(b('x'))
o2 = self.execute("SELECT %s::bytea AS foo", (o1,))
self.assertEqual(b('x'), o2[0])
def testNegNumber(self):
d1 = self.execute("select -%s;", (decimal.Decimal('-1.0'),))
self.assertEqual(1, d1)
f1 = self.execute("select -%s;", (-1.0,))
self.assertEqual(1, f1)
i1 = self.execute("select -%s;", (-1,))
self.assertEqual(1, i1)
l1 = self.execute("select -%s;", (-1L,))
self.assertEqual(1, l1)
def testGenericArray(self):
a = self.execute("select '{1,2,3}'::int4[]")
self.assertEqual(a, [1,2,3])
a = self.execute("select array['a','b','''']::text[]")
self.assertEqual(a, ['a','b',"'"])
@testutils.skip_before_postgres(8, 2)
def testGenericArrayNull(self):
def caster(s, cur):
if s is None: return "nada"
return int(s) * 2
base = psycopg2.extensions.new_type((23,), "INT4", caster)
array = psycopg2.extensions.new_array_type((1007,), "INT4ARRAY", base)
psycopg2.extensions.register_type(array, self.conn)
a = self.execute("select '{1,2,3}'::int4[]")
self.assertEqual(a, [2,4,6])
a = self.execute("select '{1,2,NULL}'::int4[]")
self.assertEqual(a, [2,4,'nada'])
class AdaptSubclassTest(unittest.TestCase):
def test_adapt_subtype(self):
from psycopg2.extensions import adapt
class Sub(str): pass
s1 = "hel'lo"
s2 = Sub(s1)
self.assertEqual(adapt(s1).getquoted(), adapt(s2).getquoted())
def test_adapt_most_specific(self):
from psycopg2.extensions import adapt, register_adapter, AsIs
class A(object): pass
class B(A): pass
class C(B): pass
register_adapter(A, lambda a: AsIs("a"))
register_adapter(B, lambda b: AsIs("b"))
try:
self.assertEqual(b('b'), adapt(C()).getquoted())
finally:
del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote]
del psycopg2.extensions.adapters[B, psycopg2.extensions.ISQLQuote]
@testutils.skip_from_python(3)
def test_no_mro_no_joy(self):
from psycopg2.extensions import adapt, register_adapter, AsIs
class A: pass
class B(A): pass
register_adapter(A, lambda a: AsIs("a"))
try:
self.assertRaises(psycopg2.ProgrammingError, adapt, B())
finally:
del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote]
@testutils.skip_before_python(3)
def test_adapt_subtype_3(self):
from psycopg2.extensions import adapt, register_adapter, AsIs
class A: pass
class B(A): pass
register_adapter(A, lambda a: AsIs("a"))
try:
self.assertEqual(b("a"), adapt(B()).getquoted())
finally:
del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote]
class ByteaParserTest(unittest.TestCase):
"""Unit test for our bytea format parser."""
def setUp(self):
try:
self._cast = self._import_cast()
except Exception, e:
self._cast = None
self._exc = e
def _import_cast(self):
"""Use ctypes to access the C function.
Raise any sort of error: we just support this where ctypes works as
expected.
"""
import ctypes
lib = ctypes.cdll.LoadLibrary(psycopg2._psycopg.__file__)
cast = lib.typecast_BINARY_cast
cast.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.py_object]
cast.restype = ctypes.py_object
return cast
def cast(self, buffer):
"""Cast a buffer from the output format"""
l = buffer and len(buffer) or 0
rv = self._cast(buffer, l, None)
if rv is None:
return None
if sys.version_info[0] < 3:
return str(rv)
else:
return rv.tobytes()
def test_null(self):
rv = self.cast(None)
self.assertEqual(rv, None)
def test_blank(self):
rv = self.cast(b(''))
self.assertEqual(rv, b(''))
def test_blank_hex(self):
# Reported as problematic in ticket #48
rv = self.cast(b('\\x'))
self.assertEqual(rv, b(''))
def test_full_hex(self, upper=False):
buf = ''.join(("%02x" % i) for i in range(256))
if upper: buf = buf.upper()
buf = '\\x' + buf
rv = self.cast(b(buf))
if sys.version_info[0] < 3:
self.assertEqual(rv, ''.join(map(chr, range(256))))
else:
self.assertEqual(rv, bytes(range(256)))
def test_full_hex_upper(self):
return self.test_full_hex(upper=True)
def test_full_escaped_octal(self):
buf = ''.join(("\\%03o" % i) for i in range(256))
rv = self.cast(b(buf))
if sys.version_info[0] < 3:
self.assertEqual(rv, ''.join(map(chr, range(256))))
else:
self.assertEqual(rv, bytes(range(256)))
def test_escaped_mixed(self):
import string
buf = ''.join(("\\%03o" % i) for i in range(32))
buf += string.ascii_letters
buf += ''.join('\\' + c for c in string.ascii_letters)
buf += '\\\\'
rv = self.cast(b(buf))
if sys.version_info[0] < 3:
tgt = ''.join(map(chr, range(32))) \
+ string.ascii_letters * 2 + '\\'
else:
tgt = bytes(range(32)) + \
(string.ascii_letters * 2 + '\\').encode('ascii')
self.assertEqual(rv, tgt)
def skip_if_cant_cast(f):
@wraps(f)
def skip_if_cant_cast_(self, *args, **kwargs):
if self._cast is None:
return self.skipTest("can't test bytea parser: %s - %s"
% (self._exc.__class__.__name__, self._exc))
return f(self, *args, **kwargs)
return skip_if_cant_cast_
decorate_all_tests(ByteaParserTest, skip_if_cant_cast)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
enng0227/masterfirefoxos | masterfirefoxos/base/helpers.py | 2 | 1808 | import os
from datetime import datetime
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static as static_helper
from django.utils.translation import activate as dj_activate, get_language
from feincms.module.medialibrary.models import MediaFile
from feincms.templatetags.feincms_tags import feincms_render_region
from jingo import register
from jinja2 import Markup
from sorl.thumbnail import get_thumbnail
static = register.function(static_helper)
@register.function
def render_region(feincms_page, region, request):
return Markup(feincms_render_region(None, feincms_page, region, request))
@register.function
def current_year():
return datetime.now().strftime('%Y')
@register.function
def activate(language):
dj_activate(language)
return ''
@register.function
def active_version(request):
slug = request.path.split('/')[2]
for version, data in settings.VERSIONS_LOCALE_MAP.items():
if data['slug'] == slug:
return version
@register.function
def get_image_url(img, geometry=None, locale=None):
if not locale:
locale = get_language()
url = img.file.url
basename = os.path.basename(img.file.name).rsplit('.')[0]
query = MediaFile.objects.filter(
file__startswith='medialibrary/' + basename + '.',
categories__title=locale)
if query.exists():
img = query.first()
url = img.file.url
if geometry:
img = get_thumbnail(img.file, geometry, quality=90)
url = img.url
# AWS S3 urls contain AWS_ACCESS_KEY_ID, Expiration and other
# params. We don't need them.
return url.split('?')[0]
@register.function
def include_pontoon(request):
return request.get_host() == getattr(settings, 'LOCALIZATION_HOST', None)
| mpl-2.0 |
wfxiang08/green | green/test/test_suite.py | 2 | 2328 | from __future__ import unicode_literals
from __future__ import print_function
import copy
import unittest
try:
from unittest.mock import MagicMock
except:
from mock import MagicMock
from green.suite import GreenTestSuite
from green.config import default_args
class TestGreenTestSuite(unittest.TestCase):
def test_empty(self):
"""
An empty suite can be instantiated.
"""
GreenTestSuite()
def test_defaultArgs(self):
"""
Passing in default arguments causes attributes to be set.
"""
gts = GreenTestSuite(args=default_args)
self.assertEqual(gts.allow_stdout, default_args.allow_stdout)
def test_shouldStop(self):
"""
When result.shouldStop == True, the suite should exit early.
"""
mock_test = MagicMock()
gts = GreenTestSuite(args=default_args)
gts._tests = (mock_test,)
mock_result = MagicMock()
mock_result.shouldStop = True
gts.run(mock_result)
def test_failedSetup(self):
"""
When class setup fails, we skip to the next test.
"""
mock_test = MagicMock()
mock_test.__iter__.side_effect = TypeError
gts = GreenTestSuite(args=default_args)
gts._tests = (mock_test,)
mock_result = MagicMock()
mock_result._moduleSetUpFailed = True
mock_result.shouldStop = False
gts.run(mock_result)
def test_addTest_testPattern(self):
"""
Setting test_pattern will cause a test to be filtered.
"""
mock_test = MagicMock()
mock_test._testMethodName = 'test_hello'
mock_test2 = MagicMock()
mock_test2._testMethodName = 'test_goodbye'
args = copy.deepcopy(default_args)
args.test_pattern = '_good*'
gts = GreenTestSuite(args=args)
gts.addTest(mock_test)
self.assertEqual(gts._tests, [])
gts.addTest(mock_test2)
self.assertEqual(gts._tests, [mock_test2])
def test_allow_stdout(self):
"""
The allow_stdout setting should not get ignored.
"""
class Object(object):
pass
args = Object()
args.allow_stdout = True
gts = GreenTestSuite(args=args)
self.assertEqual(gts.allow_stdout, True)
| mit |
asnir/airflow | airflow/security/kerberos.py | 22 | 4527 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
import sys
import time
import socket
from airflow import configuration
LOG = logging.getLogger(__name__)
NEED_KRB181_WORKAROUND = None
def renew_from_kt():
# The config is specified in seconds. But we ask for that same amount in
# minutes to give ourselves a large renewal buffer.
renewal_lifetime = "%sm" % configuration.getint('kerberos', 'reinit_frequency')
principal = configuration.get('kerberos', 'principal').replace("_HOST", socket.getfqdn())
cmdv = [configuration.get('kerberos', 'kinit_path'),
"-r", renewal_lifetime,
"-k", # host ticket
"-t", configuration.get('kerberos', 'keytab'), # specify keytab
"-c", configuration.get('kerberos', 'ccache'), # specify credentials cache
principal]
LOG.info("Reinitting kerberos from keytab: " + " ".join(cmdv))
subp = subprocess.Popen(cmdv,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
bufsize=-1,
universal_newlines=True)
subp.wait()
if subp.returncode != 0:
LOG.error("Couldn't reinit from keytab! `kinit' exited with %s.\n%s\n%s" % (
subp.returncode,
"\n".join(subp.stdout.readlines()),
"\n".join(subp.stderr.readlines())))
sys.exit(subp.returncode)
global NEED_KRB181_WORKAROUND
if NEED_KRB181_WORKAROUND is None:
NEED_KRB181_WORKAROUND = detect_conf_var()
if NEED_KRB181_WORKAROUND:
# (From: HUE-640). Kerberos clock have seconds level granularity. Make sure we
# renew the ticket after the initial valid time.
time.sleep(1.5)
perform_krb181_workaround()
def perform_krb181_workaround():
cmdv = [configuration.get('kerberos', 'kinit_path'),
"-c", configuration.get('kerberos', 'ccache'),
"-R"] # Renew ticket_cache
LOG.info("Renewing kerberos ticket to work around kerberos 1.8.1: " +
" ".join(cmdv))
ret = subprocess.call(cmdv)
if ret != 0:
principal = "%s/%s" % (configuration.get('kerberos', 'principal'), socket.getfqdn())
fmt_dict = dict(princ=principal,
ccache=configuration.get('kerberos', 'principal'))
LOG.error("Couldn't renew kerberos ticket in order to work around "
"Kerberos 1.8.1 issue. Please check that the ticket for "
"'%(princ)s' is still renewable:\n"
" $ kinit -f -c %(ccache)s\n"
"If the 'renew until' date is the same as the 'valid starting' "
"date, the ticket cannot be renewed. Please check your KDC "
"configuration, and the ticket renewal policy (maxrenewlife) "
"for the '%(princ)s' and `krbtgt' principals." % fmt_dict)
sys.exit(ret)
def detect_conf_var():
"""Return true if the ticket cache contains "conf" information as is found
in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the
Sun Java Krb5LoginModule in Java6, so we need to take an action to work
around it.
"""
ticket_cache = configuration.get('kerberos', 'ccache')
with open(ticket_cache, 'rb') as f:
# Note: this file is binary, so we check against a bytearray.
return b'X-CACHECONF:' in f.read()
def run():
if configuration.get('kerberos','keytab') is None:
LOG.debug("Keytab renewer not starting, no keytab configured")
sys.exit(0)
while True:
renew_from_kt()
time.sleep(configuration.getint('kerberos', 'reinit_frequency'))
| apache-2.0 |
Netflix-Skunkworks/cloudaux | cloudaux/tests/cloudaux/test_cloudaux.py | 1 | 1193 | """
.. module: cloudaux.tests.cloudaux.test_cloudaux
:platform: Unix
:copyright: (c) 2019 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Patrick Kelley <[email protected]>
"""
from cloudaux import CloudAux
def test_cloudaux():
conn_one = {
"account_number": "111111111111",
"assume_role": "role_one",
"region": "us-east-1",
"session_name": "conn_one"
}
conn_two = {
"account_number": "222222222222",
"assume_role": "role_two",
"region": "us-east-2",
"session_name": "conn_two"
}
ca_one = CloudAux(**conn_one)
ca_two = CloudAux(**conn_two)
assert ca_one.conn_details["account_number"] == "111111111111"
assert ca_one.conn_details["assume_role"] == "role_one"
assert ca_one.conn_details["region"] == "us-east-1"
assert ca_one.conn_details["session_name"] == "conn_one"
assert ca_two.conn_details["account_number"] == "222222222222"
assert ca_two.conn_details["assume_role"] == "role_two"
assert ca_two.conn_details["region"] == "us-east-2"
assert ca_two.conn_details["session_name"] == "conn_two"
| apache-2.0 |
EvanK/ansible | lib/ansible/plugins/lookup/dnstxt.py | 57 | 2685 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: dnstxt
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "0.9"
short_description: query a domain(s)'s DNS txt fields
requirements:
- dns/dns.resolver (python library)
description:
- Uses a python library to return the DNS TXT record for a domain.
options:
_terms:
description: domain or list of domains to query TXT records from
required: True
type: list
"""
EXAMPLES = """
- name: show txt entry
debug: msg="{{lookup('dnstxt', ['test.example.com'])}}"
- name: iterate over txt entries
debug: msg="{{item}}"
with_dnstxt:
- 'test.example.com'
- 'other.example.com'
- 'last.example.com'
- name: iterate of a comma delimited DNS TXT entry
debug: msg="{{item}}"
with_dnstxt: "{{lookup('dnstxt', ['test.example.com']).split(',')}}"
"""
RETURN = """
_list:
description:
- values returned by the DNS TXT record.
type: list
"""
HAVE_DNS = False
try:
import dns.resolver
from dns.exception import DNSException
HAVE_DNS = True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.plugins.lookup import LookupBase
# ==============================================================
# DNSTXT: DNS TXT records
#
# key=domainname
# TODO: configurable resolver IPs
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if HAVE_DNS is False:
raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
ret = []
for term in terms:
domain = term.split()[0]
string = []
try:
answers = dns.resolver.query(domain, 'TXT')
for rdata in answers:
s = rdata.to_text()
string.append(s[1:-1]) # Strip outside quotes on TXT rdata
except dns.resolver.NXDOMAIN:
string = 'NXDOMAIN'
except dns.resolver.Timeout:
string = ''
except dns.resolver.NoAnswer:
string = ''
except DNSException as e:
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
ret.append(''.join(string))
return ret
| gpl-3.0 |
grlee77/pywt | pywt/_cwt.py | 3 | 7713 | from math import floor, ceil
from ._extensions._pywt import (DiscreteContinuousWavelet, ContinuousWavelet,
Wavelet, _check_dtype)
from ._functions import integrate_wavelet, scale2frequency
__all__ = ["cwt"]
import numpy as np
try:
# Prefer scipy.fft (new in SciPy 1.4)
import scipy.fft
fftmodule = scipy.fft
next_fast_len = fftmodule.next_fast_len
except ImportError:
try:
import scipy.fftpack
fftmodule = scipy.fftpack
next_fast_len = fftmodule.next_fast_len
except ImportError:
fftmodule = np.fft
# provide a fallback so scipy is an optional requirement
def next_fast_len(n):
"""Round up size to the nearest power of two.
Given a number of samples `n`, returns the next power of two
following this number to take advantage of FFT speedup.
This fallback is less efficient than `scipy.fftpack.next_fast_len`
"""
return 2**ceil(np.log2(n))
def cwt(data, scales, wavelet, sampling_period=1., method='conv', axis=-1):
"""
cwt(data, scales, wavelet)
One dimensional Continuous Wavelet Transform.
Parameters
----------
data : array_like
Input signal
scales : array_like
The wavelet scales to use. One can use
``f = scale2frequency(wavelet, scale)/sampling_period`` to determine
what physical frequency, ``f``. Here, ``f`` is in hertz when the
``sampling_period`` is given in seconds.
wavelet : Wavelet object or name
Wavelet to use
sampling_period : float
Sampling period for the frequencies output (optional).
The values computed for ``coefs`` are independent of the choice of
``sampling_period`` (i.e. ``scales`` is not scaled by the sampling
period).
method : {'conv', 'fft'}, optional
The method used to compute the CWT. Can be any of:
- ``conv`` uses ``numpy.convolve``.
- ``fft`` uses frequency domain convolution.
- ``auto`` uses automatic selection based on an estimate of the
computational complexity at each scale.
The ``conv`` method complexity is ``O(len(scale) * len(data))``.
The ``fft`` method is ``O(N * log2(N))`` with
``N = len(scale) + len(data) - 1``. It is well suited for large size
signals but slightly slower than ``conv`` on small ones.
axis: int, optional
Axis over which to compute the CWT. If not given, the last axis is
used.
Returns
-------
coefs : array_like
Continuous wavelet transform of the input signal for the given scales
and wavelet. The first axis of ``coefs`` corresponds to the scales.
The remaining axes match the shape of ``data``.
frequencies : array_like
If the unit of sampling period are seconds and given, than frequencies
are in hertz. Otherwise, a sampling period of 1 is assumed.
Notes
-----
Size of coefficients arrays depends on the length of the input array and
the length of given scales.
Examples
--------
>>> import pywt
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.arange(512)
>>> y = np.sin(2*np.pi*x/32)
>>> coef, freqs=pywt.cwt(y,np.arange(1,129),'gaus1')
>>> plt.matshow(coef) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
----------
>>> import pywt
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + np.real(np.exp(-7*(t-0.4)**2)*np.exp(1j*2*np.pi*2*(t-0.4)))
>>> widths = np.arange(1, 31)
>>> cwtmatr, freqs = pywt.cwt(sig, widths, 'mexh')
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
# accept array_like input; make a copy to ensure a contiguous array
dt = _check_dtype(data)
data = np.asarray(data, dtype=dt)
dt_cplx = np.result_type(dt, np.complex64)
if not isinstance(wavelet, (ContinuousWavelet, Wavelet)):
wavelet = DiscreteContinuousWavelet(wavelet)
if np.isscalar(scales):
scales = np.array([scales])
if not np.isscalar(axis):
raise ValueError("axis must be a scalar.")
dt_out = dt_cplx if wavelet.complex_cwt else dt
out = np.empty((np.size(scales),) + data.shape, dtype=dt_out)
precision = 10
int_psi, x = integrate_wavelet(wavelet, precision=precision)
int_psi = np.conj(int_psi) if wavelet.complex_cwt else int_psi
# convert int_psi, x to the same precision as the data
dt_psi = dt_cplx if int_psi.dtype.kind == 'c' else dt
int_psi = np.asarray(int_psi, dtype=dt_psi)
x = np.asarray(x, dtype=data.real.dtype)
if method == 'fft':
size_scale0 = -1
fft_data = None
elif not method == 'conv':
raise ValueError("method must be 'conv' or 'fft'")
if data.ndim > 1:
# move axis to be transformed last (so it is contiguous)
data = data.swapaxes(-1, axis)
# reshape to (n_batch, data.shape[-1])
data_shape_pre = data.shape
data = data.reshape((-1, data.shape[-1]))
for i, scale in enumerate(scales):
step = x[1] - x[0]
j = np.arange(scale * (x[-1] - x[0]) + 1) / (scale * step)
j = j.astype(int) # floor
if j[-1] >= int_psi.size:
j = np.extract(j < int_psi.size, j)
int_psi_scale = int_psi[j][::-1]
if method == 'conv':
if data.ndim == 1:
conv = np.convolve(data, int_psi_scale)
else:
# batch convolution via loop
conv_shape = list(data.shape)
conv_shape[-1] += int_psi_scale.size - 1
conv_shape = tuple(conv_shape)
conv = np.empty(conv_shape, dtype=dt_out)
for n in range(data.shape[0]):
conv[n, :] = np.convolve(data[n], int_psi_scale)
else:
# The padding is selected for:
# - optimal FFT complexity
# - to be larger than the two signals length to avoid circular
# convolution
size_scale = next_fast_len(
data.shape[-1] + int_psi_scale.size - 1
)
if size_scale != size_scale0:
# Must recompute fft_data when the padding size changes.
fft_data = fftmodule.fft(data, size_scale, axis=-1)
size_scale0 = size_scale
fft_wav = fftmodule.fft(int_psi_scale, size_scale, axis=-1)
conv = fftmodule.ifft(fft_wav * fft_data, axis=-1)
conv = conv[..., :data.shape[-1] + int_psi_scale.size - 1]
coef = - np.sqrt(scale) * np.diff(conv, axis=-1)
if out.dtype.kind != 'c':
coef = coef.real
# transform axis is always -1 due to the data reshape above
d = (coef.shape[-1] - data.shape[-1]) / 2.
if d > 0:
coef = coef[..., floor(d):-ceil(d)]
elif d < 0:
raise ValueError(
"Selected scale of {} too small.".format(scale))
if data.ndim > 1:
# restore original data shape and axis position
coef = coef.reshape(data_shape_pre)
coef = coef.swapaxes(axis, -1)
out[i, ...] = coef
frequencies = scale2frequency(wavelet, scales, precision)
if np.isscalar(frequencies):
frequencies = np.array([frequencies])
frequencies /= sampling_period
return out, frequencies
| mit |
timokoola/finnkinotxt | docutils/readers/standalone.py | 197 | 2340 | # $Id: standalone.py 4802 2006-11-12 18:02:17Z goodger $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Standalone file Reader for the reStructuredText markup syntax.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import frontend, readers
from docutils.transforms import frontmatter, references, misc
class Reader(readers.Reader):
supported = ('standalone',)
"""Contexts this reader supports."""
document = None
"""A single document tree."""
settings_spec = (
'Standalone Reader',
None,
(('Disable the promotion of a lone top-level section title to '
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).',
['--no-doc-title'],
{'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Disable the bibliographic field list transform (enabled by '
'default).',
['--no-doc-info'],
{'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Activate the promotion of lone subsection titles to '
'section subtitles (disabled by default).',
['--section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_true', 'default': 0,
'validator': frontend.validate_boolean}),
('Deactivate the promotion of lone subsection titles.',
['--no-section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_false'}),
))
config_section = 'standalone reader'
config_section_dependencies = ('readers',)
def get_transforms(self):
return readers.Reader.get_transforms(self) + [
references.Substitutions,
references.PropagateTargets,
frontmatter.DocTitle,
frontmatter.SectionSubTitle,
frontmatter.DocInfo,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
references.Footnotes,
references.ExternalTargets,
references.InternalTargets,
references.DanglingReferences,
misc.Transitions,
]
| apache-2.0 |
Zac-HD/home-assistant | homeassistant/components/switch/__init__.py | 3 | 5364 | """
Component to interface with various switches that can be controlled remotely.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/switch/
"""
import asyncio
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE,
ATTR_ENTITY_ID)
from homeassistant.components import group
from homeassistant.util.async import run_callback_threadsafe
DOMAIN = 'switch'
SCAN_INTERVAL = timedelta(seconds=30)
GROUP_NAME_ALL_SWITCHES = 'all switches'
ENTITY_ID_ALL_SWITCHES = group.ENTITY_ID_FORMAT.format('all_switches')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ATTR_TODAY_MWH = "today_mwh"
ATTR_CURRENT_POWER_MWH = "current_power_mwh"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
PROP_TO_ATTR = {
'current_power_mwh': ATTR_CURRENT_POWER_MWH,
'today_power_mw': ATTR_TODAY_MWH,
}
SWITCH_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
_LOGGER = logging.getLogger(__name__)
def is_on(hass, entity_id=None):
"""Return if the switch is on based on the statemachine.
Async friendly.
"""
entity_id = entity_id or ENTITY_ID_ALL_SWITCHES
return hass.states.is_state(entity_id, STATE_ON)
def turn_on(hass, entity_id=None):
"""Turn all or specified switch on."""
run_callback_threadsafe(
hass.loop, async_turn_on, hass, entity_id).result()
@callback
def async_turn_on(hass, entity_id=None):
"""Turn all or specified switch on."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data))
def turn_off(hass, entity_id=None):
"""Turn all or specified switch off."""
run_callback_threadsafe(
hass.loop, async_turn_off, hass, entity_id).result()
@callback
def async_turn_off(hass, entity_id=None):
"""Turn all or specified switch off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data))
def toggle(hass, entity_id=None):
"""Toggle all or specified switch."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for switches."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_SWITCHES)
yield from component.async_setup(config)
@asyncio.coroutine
def async_handle_switch_service(service):
"""Handle calls to the switch services."""
target_switches = component.async_extract_from_service(service)
for switch in target_switches:
if service.service == SERVICE_TURN_ON:
yield from switch.async_turn_on()
elif service.service == SERVICE_TOGGLE:
yield from switch.async_toggle()
else:
yield from switch.async_turn_off()
update_tasks = []
for switch in target_switches:
if not switch.should_poll:
continue
update_coro = hass.loop.create_task(
switch.async_update_ha_state(True))
if hasattr(switch, 'async_update'):
update_tasks.append(update_coro)
else:
yield from update_coro
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_TURN_OFF, async_handle_switch_service,
descriptions.get(SERVICE_TURN_OFF), schema=SWITCH_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_switch_service,
descriptions.get(SERVICE_TURN_ON), schema=SWITCH_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TOGGLE, async_handle_switch_service,
descriptions.get(SERVICE_TOGGLE), schema=SWITCH_SERVICE_SCHEMA)
return True
class SwitchDevice(ToggleEntity):
"""Representation of a switch."""
# pylint: disable=no-self-use
@property
def current_power_mwh(self):
"""Return the current power usage in mWh."""
return None
@property
def today_power_mw(self):
"""Return the today total power usage in mW."""
return None
@property
def is_standby(self):
"""Return true if device is in standby."""
return None
@property
def state_attributes(self):
"""Return the optional state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value:
data[attr] = value
return data
| apache-2.0 |
IV-GII/SocialCookies | ENV1/lib/python2.7/site-packages/django/contrib/admin/templatetags/admin_list.py | 105 | 16417 | from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.util import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{0}</span> ', i+1)
else:
return format_html('<a href="{0}"{1}>{2}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages-1 else ''),
i+1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{0}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{0}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{0}"', ' '.join(th_classes))
if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{0}" alt="{1}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == 'action_checkbox':
row_class = mark_safe(' class="action-checkbox"')
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_class = mark_safe(' class="nowrap"')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_class = mark_safe(' class="nowrap"')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
yield format_html('<{0}{1}><a href="{2}"{3}>{4}</a></{5}>',
table_tag,
row_class,
url,
format_html(' onclick="opener.dismissRelatedLookupPopup(window, '{0}'); return false;"', result_id)
if cl.is_popup else '',
result_repr,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field_by_name(field_name)[0]
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
| gpl-2.0 |
lokirius/python-for-android | python3-alpha/extra_modules/pyxmpp2/mainloop/wait.py | 46 | 2081 | #
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# pylint: disable-msg=W0201
"""Utility functions to wait until a socket (or object implementing .fileno()
in POSIX) is ready for input or output."""
__docformat__ = "restructuredtext en"
import select
if hasattr(select, "poll"):
def wait_for_read(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for reading.
"""
if timeout is not None:
timeout *= 1000
poll = select.poll()
poll.register(socket, select.POLLIN)
events = poll.poll(timeout)
return bool(events)
def wait_for_write(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for writing.
"""
if timeout is not None:
timeout *= 1000
poll = select.poll()
poll.register(socket, select.POLLOUT)
events = poll.poll(timeout)
return bool(events)
else:
def wait_for_read(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for reading.
"""
readable = select.select([socket], [], [], timeout)[0]
return bool(readable)
def wait_for_write(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for writing.
"""
writable = select.select([], [socket], [], timeout)[1]
return writable(writable)
| apache-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sympy/plotting/plot_axes.py | 4 | 8243 | from pyglet.gl import *
from pyglet import font
from plot_object import PlotObject
from util import strided_range, billboard_matrix
from util import get_direction_vectors
from util import dot_product, vec_sub, vec_mag
from sympy.core import S
from sympy.core.compatibility import is_sequence
class PlotAxes(PlotObject):
def __init__(self, *args, **kwargs):
# initialize style parameter
style = kwargs.pop('style', '').lower()
# allow alias kwargs to override style kwarg
if kwargs.pop('none', None) is not None: style = 'none'
if kwargs.pop('frame', None) is not None: style = 'frame'
if kwargs.pop('box', None) is not None: style = 'box'
if kwargs.pop('ordinate', None) is not None: style = 'ordinate'
if style in ['', 'ordinate']:
self._render_object = PlotAxesOrdinate(self)
elif style in ['frame', 'box']:
self._render_object = PlotAxesFrame(self)
elif style in ['none']:
self._render_object = None
else: raise ValueError(("Unrecognized axes "
"style %s.") % (style))
# initialize stride parameter
stride = kwargs.pop('stride', 0.25)
try: stride = eval(stride)
except: pass
if is_sequence(stride):
assert len(stride) == 3
self._stride = stride
else:
self._stride = [stride, stride, stride]
self._tick_length = float(kwargs.pop('tick_length', 0.1))
# setup bounding box and ticks
self._origin = [0,0,0]
self.reset_bounding_box()
def flexible_boolean(input, default):
if input in [True, False]:
return input
if input in ['f','F','false','False']: return False
if input in ['t','T','true','True']: return True
return default
# initialize remaining parameters
self.visible = flexible_boolean(kwargs.pop('visible',''), True)
self._overlay = flexible_boolean(kwargs.pop('overlay',''), True)
self._colored = flexible_boolean(kwargs.pop('colored',''), False)
self._label_axes = flexible_boolean(kwargs.pop('label_axes', ''), False)
self._label_ticks = flexible_boolean(kwargs.pop('label_ticks', ''), True)
# setup label font
self.font_face = kwargs.pop('font_face', 'Arial')
self.font_size = kwargs.pop('font_size', 28)
# this is also used to reinit the
# font on window close/reopen
self.reset_resources()
def reset_resources(self):
self.label_font = None
def reset_bounding_box(self):
self._bounding_box = [[None,None], [None,None], [None,None]]
self._axis_ticks = [[],[],[]]
def draw(self):
if self._render_object:
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT | GL_DEPTH_BUFFER_BIT)
if self._overlay: glDisable(GL_DEPTH_TEST)
self._render_object.draw()
glPopAttrib()
def adjust_bounds(self, child_bounds):
b = self._bounding_box
c = child_bounds
for i in [0,1,2]:
if abs(c[i][0]) is S.Infinity or abs(c[i][1]) is S.Infinity: continue
b[i][0] = [ min([b[i][0], c[i][0]]), c[i][0] ][ b[i][0] is None ]
b[i][1] = [ max([b[i][1], c[i][1]]), c[i][1] ][ b[i][1] is None ]
self._recalculate_axis_ticks(i)
def _recalculate_axis_ticks(self, axis):
b = self._bounding_box
if b[axis][0] is None or b[axis][1] is None:
self._axis_ticks[axis] = []
else:
self._axis_ticks[axis] = strided_range(b[axis][0], b[axis][1], self._stride[axis])
def toggle_visible(self):
self.visible = not self.visible
def toggle_colors(self):
self._colored = not self._colored
class PlotAxesBase(PlotObject):
def __init__(self, parent_axes):
self._p = parent_axes
def draw(self):
color = [ ([0.2,0.1,0.3], [0.2,0.1,0.3], [0.2,0.1,0.3]),
([0.9,0.3,0.5], [0.5,1.0,0.5], [0.3,0.3,0.9]) ][ self._p._colored ]
self.draw_background(color)
self.draw_axis(2, color[2])
self.draw_axis(1, color[1])
self.draw_axis(0, color[0])
def draw_background(self, color):
pass # optional
def draw_axis(self, axis, color):
raise NotImplementedError()
def draw_text(self, text, position, color, scale=1.0):
if len(color) == 3: color = (color[0], color[1], color[2], 1.0)
if self._p.label_font is None:
self._p.label_font = font.load(self._p.font_face,
self._p.font_size,
bold=True, italic=False)
label = font.Text(self._p.label_font, text,
color=color,
valign=font.Text.BASELINE,
halign=font.Text.CENTER)
glPushMatrix()
glTranslatef(*position)
billboard_matrix()
scale_factor = 0.005*scale
glScalef(scale_factor, scale_factor, scale_factor)
glColor4f(0,0,0,0)
label.draw()
glPopMatrix()
def draw_line(self, v, color):
o = self._p._origin
glBegin(GL_LINES)
glColor3f(*color)
glVertex3f(v[0][0] + o[0], v[0][1] + o[1], v[0][2] + o[2])
glVertex3f(v[1][0] + o[0], v[1][1] + o[1], v[1][2] + o[2])
glEnd()
class PlotAxesOrdinate(PlotAxesBase):
def __init__(self, parent_axes):
super(PlotAxesOrdinate, self).__init__(parent_axes)
def draw_axis(self, axis, color):
ticks = self._p._axis_ticks[axis]
radius = self._p._tick_length / 2.0
if len(ticks) < 2: return
# calculate the vector for this axis
axis_lines = [[0,0,0], [0,0,0]]
axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1]
axis_vector = vec_sub( axis_lines[1], axis_lines[0] )
# calculate angle to the z direction vector
pos_z = get_direction_vectors()[2]
d = abs( dot_product(axis_vector, pos_z) )
d = d / vec_mag(axis_vector)
# don't draw labels if we're looking down the axis
labels_visible = abs(d - 1.0) > 0.02
# draw the ticks and labels
for tick in ticks:
self.draw_tick_line(axis, color, radius, tick, labels_visible)
# draw the axis line and labels
self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible)
def draw_axis_line(self, axis, color, a_min, a_max, labels_visible):
axis_line = [[0,0,0], [0,0,0]]
axis_line[0][axis], axis_line[1][axis] = a_min, a_max
self.draw_line(axis_line, color)
if labels_visible: self.draw_axis_line_labels(axis, color, axis_line)
def draw_axis_line_labels(self, axis, color, axis_line):
if not self._p._label_axes: return
axis_labels = [axis_line[0][::], axis_line[1][::]]
axis_labels[0][axis] -= 0.3
axis_labels[1][axis] += 0.3
a_str = ['X', 'Y', 'Z'][axis]
self.draw_text("-" + a_str, axis_labels[0], color)
self.draw_text("+" + a_str, axis_labels[1], color)
def draw_tick_line(self, axis, color, radius, tick, labels_visible):
tick_axis = {0: 1, 1: 0, 2: 1}[axis]
tick_line = [[0,0,0], [0,0,0]]
tick_line[0][axis] = tick_line[1][axis] = tick
tick_line[0][tick_axis], tick_line[1][tick_axis] = -radius, radius
self.draw_line(tick_line, color)
if labels_visible: self.draw_tick_line_label(axis, color, radius, tick)
def draw_tick_line_label(self, axis, color, radius, tick):
if not self._p._label_axes: return
tick_label_vector = [0,0,0]
tick_label_vector[axis] = tick
tick_label_vector[{0: 1, 1: 0, 2: 1}[axis]] = [-1,1,1][axis]*radius*3.5
self.draw_text(str(tick), tick_label_vector, color, scale=0.5)
class PlotAxesFrame(PlotAxesBase):
def __init__(self, parent_axes):
super(PlotAxesFrame, self).__init__(parent_axes)
def draw_background(self, color):
pass
def draw_axis(self, axis, color):
raise NotImplementedError()
| agpl-3.0 |
uruz/django-rest-framework | tests/test_middleware.py | 79 | 1134 |
from django.conf.urls import url
from django.contrib.auth.models import User
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from rest_framework.views import APIView
urlpatterns = [
url(r'^$', APIView.as_view(authentication_classes=(TokenAuthentication,))),
]
class MyMiddleware(object):
def process_response(self, request, response):
assert hasattr(request, 'user'), '`user` is not set on request'
assert request.user.is_authenticated(), '`user` is not authenticated'
return response
class TestMiddleware(APITestCase):
urls = 'tests.test_middleware'
def test_middleware_can_access_user_when_processing_response(self):
user = User.objects.create_user('john', '[email protected]', 'password')
key = 'abcd1234'
Token.objects.create(key=key, user=user)
with self.settings(
MIDDLEWARE_CLASSES=('tests.test_middleware.MyMiddleware',)
):
auth = 'Token ' + key
self.client.get('/', HTTP_AUTHORIZATION=auth)
| bsd-2-clause |
z0by/django | tests/postgres_tests/test_json.py | 284 | 7890 | import datetime
import unittest
from django.core import exceptions, serializers
from django.db import connection
from django.test import TestCase
from . import PostgreSQLTestCase
from .models import JSONModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import JSONField
except ImportError:
pass
def skipUnlessPG94(test):
try:
PG_VERSION = connection.pg_version
except AttributeError:
PG_VERSION = 0
if PG_VERSION < 90400:
return unittest.skip('PostgreSQL >= 9.4 required')(test)
return test
@skipUnlessPG94
class TestSaveLoad(TestCase):
def test_null(self):
instance = JSONModel()
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, None)
def test_empty_object(self):
instance = JSONModel(field={})
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, {})
def test_empty_list(self):
instance = JSONModel(field=[])
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, [])
def test_boolean(self):
instance = JSONModel(field=True)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, True)
def test_string(self):
instance = JSONModel(field='why?')
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, 'why?')
def test_number(self):
instance = JSONModel(field=1)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, 1)
def test_realistic_object(self):
obj = {
'a': 'b',
'c': 1,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
}
instance = JSONModel(field=obj)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, obj)
@skipUnlessPG94
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
JSONModel.objects.create(field=None),
JSONModel.objects.create(field=True),
JSONModel.objects.create(field=False),
JSONModel.objects.create(field='yes'),
JSONModel.objects.create(field=7),
JSONModel.objects.create(field=[]),
JSONModel.objects.create(field={}),
JSONModel.objects.create(field={
'a': 'b',
'c': 1,
}),
JSONModel.objects.create(field={
'a': 'b',
'c': 1,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
'k': {'l': 'm'},
}),
JSONModel.objects.create(field=[1, [2]]),
JSONModel.objects.create(field={
'k': True,
'l': False,
}),
]
def test_exact(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__exact={}),
[self.objs[6]]
)
def test_exact_complex(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__exact={'a': 'b', 'c': 1}),
[self.objs[7]]
)
def test_isnull(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__isnull=True),
[self.objs[0]]
)
def test_contains(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__contains={'a': 'b'}),
[self.objs[7], self.objs[8]]
)
def test_contained_by(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__contained_by={'a': 'b', 'c': 1, 'h': True}),
[self.objs[6], self.objs[7]]
)
def test_has_key(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_key='a'),
[self.objs[7], self.objs[8]]
)
def test_has_keys(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_keys=['a', 'c', 'h']),
[self.objs[8]]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_any_keys=['c', 'l']),
[self.objs[7], self.objs[8], self.objs[10]]
)
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__0=1),
[self.objs[9]]
)
def test_shallow_obj_lookup(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__a='b'),
[self.objs[7], self.objs[8]]
)
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__k__l='m'),
[self.objs[8]]
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__k={'l': 'm'}),
[self.objs[8]]
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__1__0=2),
[self.objs[9]]
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__d__1__f='g'),
[self.objs[8]]
)
def test_deep_lookup_transform(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__c__gt=1),
[]
)
self.assertSequenceEqual(
JSONModel.objects.filter(field__c__lt=5),
[self.objs[7], self.objs[8]]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
JSONModel.objects.filter(id__in=JSONModel.objects.filter(field__c=1)),
self.objs[7:9]
)
@skipUnlessPG94
class TestSerialization(TestCase):
test_data = '[{"fields": {"field": {"a": "b"}}, "model": "postgres_tests.jsonmodel", "pk": null}]'
def test_dumping(self):
instance = JSONModel(field={'a': 'b'})
data = serializers.serialize('json', [instance])
self.assertJSONEqual(data, self.test_data)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
class TestValidation(PostgreSQLTestCase):
def test_not_serializable(self):
field = JSONField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(datetime.timedelta(days=1), None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(cm.exception.message % cm.exception.params, "Value must be valid JSON.")
class TestFormField(PostgreSQLTestCase):
def test_valid(self):
field = forms.JSONField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_valid_empty(self):
field = forms.JSONField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_invalid(self):
field = forms.JSONField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{some badly formed: json}')
self.assertEqual(cm.exception.messages[0], "'{some badly formed: json}' value must be valid JSON.")
def test_formfield(self):
model_field = JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_prepare_value(self):
field = forms.JSONField()
self.assertEqual(field.prepare_value({'a': 'b'}), '{"a": "b"}')
self.assertEqual(field.prepare_value(None), 'null')
| bsd-3-clause |
RudolfCardinal/crate | crate_anon/crateweb/core/constants.py | 1 | 2396 | #!/usr/bin/env python
"""
crate_anon/crateweb/core/constants.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal ([email protected]).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <http://www.gnu.org/licenses/>.
===============================================================================
**Core constants, like field lengths.**
"""
DJANGO_DEFAULT_CONNECTION = 'default' # key to django.db.connections
LEN_ADDRESS = 100
LEN_FIELD_DESCRIPTION = 100
LEN_NAME = 100
LEN_PHONE = 20
LEN_TITLE = 20
MAX_HASH_LENGTH = 128
SCRUBBER_PNG_FILENAME = "scrubber.png"
class SettingsKeys(object):
"""
Keys for the Django ``settings.py`` file -- particularly those that are
optional, for which we use :func:`getattr`.
"""
ARCHIVE_ATTACHMENT_DIR = "ARCHIVE_ATTACHMENT_DIR"
ARCHIVE_CONTEXT = "ARCHIVE_CONTEXT"
ARCHIVE_ROOT_TEMPLATE = "ARCHIVE_ROOT_TEMPLATE"
ARCHIVE_STATIC_DIR = "ARCHIVE_STATIC_DIR"
ARCHIVE_TEMPLATE_CACHE_DIR = "ARCHIVE_TEMPLATE_CACHE_DIR"
ARCHIVE_TEMPLATE_DIR = "ARCHIVE_TEMPLATE_DIR"
CACHE_CONTROL_MAX_AGE_ARCHIVE_ATTACHMENTS = "CACHE_CONTROL_MAX_AGE_ARCHIVE_ATTACHMENTS" # noqa
CACHE_CONTROL_MAX_AGE_ARCHIVE_STATIC = "CACHE_CONTROL_MAX_AGE_ARCHIVE_STATIC" # noqa
CACHE_CONTROL_MAX_AGE_ARCHIVE_TEMPLATES = "CACHE_CONTROL_MAX_AGE_ARCHIVE_TEMPLATES" # noqa
DISABLE_DJANGO_PYODBC_AZURE_CURSOR_FETCHONE_NEXTSET = "DISABLE_DJANGO_PYODBC_AZURE_CURSOR_FETCHONE_NEXTSET" # noqa
NLP_SOURCEDB_MAP = "NLP_SOURCEDB_MAP"
VISZONE_CONTEXT = "VISZONE_CONTEXT"
VISZONE_ROOT_TEMPLATE = "VISZONE_ROOT_TEMPLATE"
VISZONE_STATIC_DIR = "VISZONE_STATIC_DIR"
VISZONE_TEMPLATE_CACHE_DIR = "VISZONE_TEMPLATE_CACHE_DIR"
VISZONE_TEMPLATE_DIR = "VISZONE_TEMPLATE_DIR"
| gpl-3.0 |
casanovainformationservices/LazyLibrarian | cherrypy/tutorial/tut10_http_errors.py | 7 | 2705 | """
Tutorial: HTTP errors
HTTPError is used to return an error response to the client.
CherryPy has lots of options regarding how such errors are
logged, displayed, and formatted.
"""
import os
localDir = os.path.dirname(__file__)
curpath = os.path.normpath(os.path.join(os.getcwd(), localDir))
import cherrypy
class HTTPErrorDemo(object):
# Set a custom response for 403 errors.
_cp_config = {'error_page.403':
os.path.join(curpath, "custom_error.html")}
@cherrypy.expose
def index(self):
# display some links that will result in errors
tracebacks = cherrypy.request.show_tracebacks
if tracebacks:
trace = 'off'
else:
trace = 'on'
return """
<html><body>
<p>Toggle tracebacks <a href="toggleTracebacks">%s</a></p>
<p><a href="/doesNotExist">Click me; I'm a broken link!</a></p>
<p>
<a href="/error?code=403">
Use a custom error page from a file.
</a>
</p>
<p>These errors are explicitly raised by the application:</p>
<ul>
<li><a href="/error?code=400">400</a></li>
<li><a href="/error?code=401">401</a></li>
<li><a href="/error?code=402">402</a></li>
<li><a href="/error?code=500">500</a></li>
</ul>
<p><a href="/messageArg">You can also set the response body
when you raise an error.</a></p>
</body></html>
""" % trace
@cherrypy.expose
def toggleTracebacks(self):
# simple function to toggle tracebacks on and off
tracebacks = cherrypy.request.show_tracebacks
cherrypy.config.update({'request.show_tracebacks': not tracebacks})
# redirect back to the index
raise cherrypy.HTTPRedirect('/')
@cherrypy.expose
def error(self, code):
# raise an error based on the get query
raise cherrypy.HTTPError(status=code)
@cherrypy.expose
def messageArg(self):
message = ("If you construct an HTTPError with a 'message' "
"argument, it wil be placed on the error page "
"(underneath the status line by default).")
raise cherrypy.HTTPError(500, message=message)
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HTTPErrorDemo(), config=tutconf)
| gpl-3.0 |
ibyer/xhtml2pdf | xhtml2pdf/parser.py | 3 | 25074 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html5lib import treebuilders, inputstream
from xhtml2pdf.default import TAGS, STRING, INT, BOOL, SIZE, COLOR, FILE
from xhtml2pdf.default import BOX, POS, MUST, FONT
from xhtml2pdf.util import getSize, getBool, toList, getColor, getAlign
from xhtml2pdf.util import getBox, getPos, pisaTempFile
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import PageBreak, KeepInFrame
from xhtml2pdf.xhtml2pdf_reportlab import PmlRightPageBreak, PmlLeftPageBreak
from xhtml2pdf.tags import * # TODO: Kill wild import!
from xhtml2pdf.tables import * # TODO: Kill wild import!
from xhtml2pdf.util import * # TODO: Kill wild import!
from xml.dom import Node
import copy
import html5lib
import logging
import re
import types
import xhtml2pdf.w3c.cssDOMElementInterface as cssDOMElementInterface
import xml.dom.minidom
CSSAttrCache={}
log = logging.getLogger("xhtml2pdf")
rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I)
class AttrContainer(dict):
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def pisaGetAttributes(c, tag, attributes):
global TAGS
attrs = {}
if attributes:
for k, v in attributes.items():
try:
attrs[str(k)] = str(v) # XXX no Unicode! Reportlab fails with template names
except:
attrs[k] = v
nattrs = {}
if TAGS.has_key(tag):
block, adef = TAGS[tag]
adef["id"] = STRING
# print block, adef
for k, v in adef.items():
nattrs[k] = None
# print k, v
# defaults, wenn vorhanden
if type(v) == types.TupleType:
if v[1] == MUST:
if not attrs.has_key(k):
log.warn(c.warning("Attribute '%s' must be set!", k))
nattrs[k] = None
continue
nv = attrs.get(k, v[1])
dfl = v[1]
v = v[0]
else:
nv = attrs.get(k, None)
dfl = None
if nv is not None:
if type(v) == types.ListType:
nv = nv.strip().lower()
if nv not in v:
#~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v))
log.warn(c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v)))
nv = dfl
elif v == BOOL:
nv = nv.strip().lower()
nv = nv in ("1", "y", "yes", "true", str(k))
elif v == SIZE:
try:
nv = getSize(nv)
except:
log.warn(c.warning("Attribute '%s' expects a size value", k))
elif v == BOX:
nv = getBox(nv, c.pageSize)
elif v == POS:
nv = getPos(nv, c.pageSize)
elif v == INT:
nv = int(nv)
elif v == COLOR:
nv = getColor(nv)
elif v == FILE:
nv = c.getFile(nv)
elif v == FONT:
nv = c.getFontName(nv)
nattrs[k] = nv
#for k in attrs.keys():
# if not nattrs.has_key(k):
# c.warning("attribute '%s' for tag <%s> not supported" % (k, tag))
#else:
# c.warning("tag <%s> is not supported" % tag)
return AttrContainer(nattrs)
attrNames = '''
color
font-family
font-size
font-weight
font-style
text-decoration
line-height
background-color
display
margin-left
margin-right
margin-top
margin-bottom
padding-left
padding-right
padding-top
padding-bottom
border-top-color
border-top-style
border-top-width
border-bottom-color
border-bottom-style
border-bottom-width
border-left-color
border-left-style
border-left-width
border-right-color
border-right-style
border-right-width
text-align
vertical-align
width
height
zoom
page-break-after
page-break-before
list-style-type
list-style-image
white-space
text-indent
-pdf-page-break
-pdf-frame-break
-pdf-next-page
-pdf-keep-with-next
-pdf-outline
-pdf-outline-level
-pdf-outline-open
-pdf-line-spacing
-pdf-keep-in-frame-mode
-pdf-word-wrap
'''.strip().split()
def getCSSAttr(self, cssCascade, attrName, default=NotImplemented):
if attrName in self.cssAttrs:
return self.cssAttrs[attrName]
try:
result = cssCascade.findStyleFor(self.cssElement, attrName, default)
except LookupError:
result = None
# XXX Workaround for inline styles
try:
style = self.cssStyle
except:
style = self.cssStyle = cssCascade.parser.parseInline(self.cssElement.getStyleAttr() or '')[0]
if style.has_key(attrName):
result = style[attrName]
if result == 'inherit':
if hasattr(self.parentNode, 'getCSSAttr'):
result = self.parentNode.getCSSAttr(cssCascade, attrName, default)
elif default is not NotImplemented:
return default
else:
raise LookupError("Could not find inherited CSS attribute value for '%s'" % (attrName,))
if result is not None:
self.cssAttrs[attrName] = result
return result
#TODO: Monkeypatching standard lib should go away.
xml.dom.minidom.Element.getCSSAttr = getCSSAttr
def getCSSAttrCacheKey(node):
_cl = _id = _st = ''
for i in node.attributes.items():
if i[0] == 'class':
_cl = i[1]
elif i[0] == 'id':
_id = i[1]
elif i[0] == 'style':
_st = i[1]
return "%s#%s#%s#%s" % (id(node.parentNode), _cl, _id, _st)
def CSSCollect(node, c):
#node.cssAttrs = {}
#return node.cssAttrs
if c.css:
_key = getCSSAttrCacheKey(node)
if hasattr(node.parentNode, "tagName"):
if node.parentNode.tagName.lower() != "html":
CachedCSSAttr = CSSAttrCache.get(_key, None)
if CachedCSSAttr is not None:
return CachedCSSAttr
node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node)
node.cssAttrs = {}
# node.cssElement.onCSSParserVisit(c.cssCascade.parser)
cssAttrMap = {}
for cssAttrName in attrNames:
try:
cssAttrMap[cssAttrName] = node.getCSSAttr(c.cssCascade, cssAttrName)
#except LookupError:
# pass
except Exception: # TODO: Kill this catch-all!
log.debug("CSS error '%s'", cssAttrName, exc_info=1)
CSSAttrCache[_key] = node.cssAttrs
return node.cssAttrs
def CSS2Frag(c, kw, isBlock):
# COLORS
if c.cssAttr.has_key("color"):
c.frag.textColor = getColor(c.cssAttr["color"])
if c.cssAttr.has_key("background-color"):
c.frag.backColor = getColor(c.cssAttr["background-color"])
# FONT SIZE, STYLE, WEIGHT
if c.cssAttr.has_key("font-family"):
c.frag.fontName = c.getFontName(c.cssAttr["font-family"])
if c.cssAttr.has_key("font-size"):
# XXX inherit
c.frag.fontSize = max(getSize("".join(c.cssAttr["font-size"]), c.frag.fontSize, c.baseFontSize), 1.0)
if c.cssAttr.has_key("line-height"):
leading = "".join(c.cssAttr["line-height"])
c.frag.leading = getSize(leading, c.frag.fontSize)
c.frag.leadingSource = leading
else:
c.frag.leading = getSize(c.frag.leadingSource, c.frag.fontSize)
if c.cssAttr.has_key("-pdf-line-spacing"):
c.frag.leadingSpace = getSize("".join(c.cssAttr["-pdf-line-spacing"]))
# print "line-spacing", c.cssAttr["-pdf-line-spacing"], c.frag.leading
if c.cssAttr.has_key("font-weight"):
value = c.cssAttr["font-weight"].lower()
if value in ("bold", "bolder", "500", "600", "700", "800", "900"):
c.frag.bold = 1
else:
c.frag.bold = 0
for value in toList(c.cssAttr.get("text-decoration", "")):
if "underline" in value:
c.frag.underline = 1
if "line-through" in value:
c.frag.strike = 1
if "none" in value:
c.frag.underline = 0
c.frag.strike = 0
if c.cssAttr.has_key("font-style"):
value = c.cssAttr["font-style"].lower()
if value in ("italic", "oblique"):
c.frag.italic = 1
else:
c.frag.italic = 0
if c.cssAttr.has_key("white-space"):
# normal | pre | nowrap
c.frag.whiteSpace = str(c.cssAttr["white-space"]).lower()
# ALIGN & VALIGN
if c.cssAttr.has_key("text-align"):
c.frag.alignment = getAlign(c.cssAttr["text-align"])
if c.cssAttr.has_key("vertical-align"):
c.frag.vAlign = c.cssAttr["vertical-align"]
# HEIGHT & WIDTH
if c.cssAttr.has_key("height"):
c.frag.height = "".join(toList(c.cssAttr["height"])) # XXX Relative is not correct!
if c.frag.height in ("auto",):
c.frag.height = None
if c.cssAttr.has_key("width"):
# print c.cssAttr["width"]
c.frag.width = "".join(toList(c.cssAttr["width"])) # XXX Relative is not correct!
if c.frag.width in ("auto",):
c.frag.width = None
# ZOOM
if c.cssAttr.has_key("zoom"):
# print c.cssAttr["width"]
zoom = "".join(toList(c.cssAttr["zoom"])) # XXX Relative is not correct!
if zoom.endswith("%"):
zoom = float(zoom[: - 1]) / 100.0
c.frag.zoom = float(zoom)
# MARGINS & LIST INDENT, STYLE
if isBlock:
if c.cssAttr.has_key("margin-top"):
c.frag.spaceBefore = getSize(c.cssAttr["margin-top"], c.frag.fontSize)
if c.cssAttr.has_key("margin-bottom"):
c.frag.spaceAfter = getSize(c.cssAttr["margin-bottom"], c.frag.fontSize)
if c.cssAttr.has_key("margin-left"):
c.frag.bulletIndent = kw["margin-left"] # For lists
kw["margin-left"] += getSize(c.cssAttr["margin-left"], c.frag.fontSize)
c.frag.leftIndent = kw["margin-left"]
# print "MARGIN LEFT", kw["margin-left"], c.frag.bulletIndent
if c.cssAttr.has_key("margin-right"):
kw["margin-right"] += getSize(c.cssAttr["margin-right"], c.frag.fontSize)
c.frag.rightIndent = kw["margin-right"]
# print c.frag.rightIndent
if c.cssAttr.has_key("text-indent"):
c.frag.firstLineIndent = getSize(c.cssAttr["text-indent"], c.frag.fontSize)
if c.cssAttr.has_key("list-style-type"):
c.frag.listStyleType = str(c.cssAttr["list-style-type"]).lower()
if c.cssAttr.has_key("list-style-image"):
c.frag.listStyleImage = c.getFile(c.cssAttr["list-style-image"])
# PADDINGS
if isBlock:
if c.cssAttr.has_key("padding-top"):
c.frag.paddingTop = getSize(c.cssAttr["padding-top"], c.frag.fontSize)
if c.cssAttr.has_key("padding-bottom"):
c.frag.paddingBottom = getSize(c.cssAttr["padding-bottom"], c.frag.fontSize)
if c.cssAttr.has_key("padding-left"):
c.frag.paddingLeft = getSize(c.cssAttr["padding-left"], c.frag.fontSize)
if c.cssAttr.has_key("padding-right"):
c.frag.paddingRight = getSize(c.cssAttr["padding-right"], c.frag.fontSize)
# BORDERS
if isBlock:
if c.cssAttr.has_key("border-top-width"):
# log.debug(c.cssAttr["border-top-width"])
c.frag.borderTopWidth = getSize(c.cssAttr["border-top-width"], c.frag.fontSize)
if c.cssAttr.has_key("border-bottom-width"):
c.frag.borderBottomWidth = getSize(c.cssAttr["border-bottom-width"], c.frag.fontSize)
if c.cssAttr.has_key("border-left-width"):
c.frag.borderLeftWidth = getSize(c.cssAttr["border-left-width"], c.frag.fontSize)
if c.cssAttr.has_key("border-right-width"):
c.frag.borderRightWidth = getSize(c.cssAttr["border-right-width"], c.frag.fontSize)
if c.cssAttr.has_key("border-top-style"):
c.frag.borderTopStyle = c.cssAttr["border-top-style"]
if c.cssAttr.has_key("border-bottom-style"):
c.frag.borderBottomStyle = c.cssAttr["border-bottom-style"]
if c.cssAttr.has_key("border-left-style"):
c.frag.borderLeftStyle = c.cssAttr["border-left-style"]
if c.cssAttr.has_key("border-right-style"):
c.frag.borderRightStyle = c.cssAttr["border-right-style"]
if c.cssAttr.has_key("border-top-color"):
c.frag.borderTopColor = getColor(c.cssAttr["border-top-color"])
if c.cssAttr.has_key("border-bottom-color"):
c.frag.borderBottomColor = getColor(c.cssAttr["border-bottom-color"])
if c.cssAttr.has_key("border-left-color"):
c.frag.borderLeftColor = getColor(c.cssAttr["border-left-color"])
if c.cssAttr.has_key("border-right-color"):
c.frag.borderRightColor = getColor(c.cssAttr["border-right-color"])
def pisaPreLoop(node, context, collect=False):
"""
Collect all CSS definitions
"""
data = u""
if node.nodeType == Node.TEXT_NODE and collect:
data = node.data
elif node.nodeType == Node.ELEMENT_NODE:
name = node.tagName.lower()
# print name, node.attributes.items()
if name in ("style", "link"):
attr = pisaGetAttributes(context, name, node.attributes)
# print " ", attr
media = [x.strip() for x in attr.media.lower().split(",") if x.strip()]
# print repr(media)
if (attr.get("type", "").lower() in ("", "text/css") and (
not media or
"all" in media or
"print" in media or
"pdf" in media)):
if name == "style":
for node in node.childNodes:
data += pisaPreLoop(node, context, collect=True)
context.addCSS(data)
return u""
#collect = True
if name == "link" and attr.href and attr.rel.lower() == "stylesheet":
# print "CSS LINK", attr
context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media)))
# context.addCSS(unicode(file(attr.href, "rb").read(), attr.charset))
#else:
# print node.nodeType
for node in node.childNodes:
result = pisaPreLoop(node, context, collect=collect)
if collect:
data += result
return data
def pisaLoop(node, context, path=[], **kw):
# Initialize KW
if not kw:
kw = {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}
else:
kw = copy.copy(kw)
# indent = len(path) * " " # only used for debug print statements
# TEXT
if node.nodeType == Node.TEXT_NODE:
# print indent, "#", repr(node.data) #, context.frag
context.addFrag(node.data)
# context.text.append(node.value)
# ELEMENT
elif node.nodeType == Node.ELEMENT_NODE:
node.tagName = node.tagName.replace(":", "").lower()
if node.tagName in ("style", "script"):
return
path = copy.copy(path) + [node.tagName]
# Prepare attributes
attr = pisaGetAttributes(context, node.tagName, node.attributes)
# log.debug(indent + "<%s %s>" % (node.tagName, attr) + repr(node.attributes.items())) #, path
# Calculate styles
context.cssAttr = CSSCollect(node, context)
context.node = node
# Block?
PAGE_BREAK = 1
PAGE_BREAK_RIGHT = 2
PAGE_BREAK_LEFT = 3
pageBreakAfter = False
frameBreakAfter = False
display = context.cssAttr.get("display", "inline").lower()
# print indent, node.tagName, display, context.cssAttr.get("background-color", None), attr
isBlock = (display == "block")
if isBlock:
context.addPara()
# Page break by CSS
if context.cssAttr.has_key("-pdf-next-page"):
context.addStory(NextPageTemplate(str(context.cssAttr["-pdf-next-page"])))
if context.cssAttr.has_key("-pdf-page-break"):
if str(context.cssAttr["-pdf-page-break"]).lower() == "before":
context.addStory(PageBreak())
if context.cssAttr.has_key("-pdf-frame-break"):
if str(context.cssAttr["-pdf-frame-break"]).lower() == "before":
context.addStory(FrameBreak())
if str(context.cssAttr["-pdf-frame-break"]).lower() == "after":
frameBreakAfter = True
if context.cssAttr.has_key("page-break-before"):
if str(context.cssAttr["page-break-before"]).lower() == "always":
context.addStory(PageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "right":
context.addStory(PageBreak())
context.addStory(PmlRightPageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "left":
context.addStory(PageBreak())
context.addStory(PmlLeftPageBreak())
if context.cssAttr.has_key("page-break-after"):
if str(context.cssAttr["page-break-after"]).lower() == "always":
pageBreakAfter = PAGE_BREAK
if str(context.cssAttr["page-break-after"]).lower() == "right":
pageBreakAfter = PAGE_BREAK_RIGHT
if str(context.cssAttr["page-break-after"]).lower() == "left":
pageBreakAfter = PAGE_BREAK_LEFT
if display == "none":
# print "none!"
return
# Translate CSS to frags
# Save previous frag styles
context.pushFrag()
# Map styles to Reportlab fragment properties
CSS2Frag(context, kw, isBlock)
# EXTRAS
if context.cssAttr.has_key("-pdf-keep-with-next"):
context.frag.keepWithNext = getBool(context.cssAttr["-pdf-keep-with-next"])
if context.cssAttr.has_key("-pdf-outline"):
context.frag.outline = getBool(context.cssAttr["-pdf-outline"])
if context.cssAttr.has_key("-pdf-outline-level"):
context.frag.outlineLevel = int(context.cssAttr["-pdf-outline-level"])
if context.cssAttr.has_key("-pdf-outline-open"):
context.frag.outlineOpen = getBool(context.cssAttr["-pdf-outline-open"])
if context.cssAttr.has_key("-pdf-word-wrap"):
context.frag.wordWrap = context.cssAttr["-pdf-word-wrap"]
# handle keep-in-frame
keepInFrameMode = None
keepInFrameMaxWidth = 0
keepInFrameMaxHeight = 0
if context.cssAttr.has_key("-pdf-keep-in-frame-mode"):
value = str(context.cssAttr["-pdf-keep-in-frame-mode"]).strip().lower()
if value in ("shrink", "error", "overflow", "truncate"):
keepInFrameMode = value
if context.cssAttr.has_key("-pdf-keep-in-frame-max-width"):
keepInFrameMaxWidth = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-width"]))
if context.cssAttr.has_key("-pdf-keep-in-frame-max-height"):
keepInFrameMaxHeight = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-height"]))
# ignore nested keep-in-frames, tables have their own KIF handling
keepInFrame = keepInFrameMode is not None and context.keepInFrameIndex is None
if keepInFrame:
# keep track of current story index, so we can wrap everythink
# added after this point in a KeepInFrame
context.keepInFrameIndex = len(context.story)
# BEGIN tag
klass = globals().get("pisaTag%s" % node.tagName.replace(":", "").upper(), None)
obj = None
# Static block
elementId = attr.get("id", None)
staticFrame = context.frameStatic.get(elementId, None)
if staticFrame:
context.frag.insideStaticFrame += 1
oldStory = context.swapStory()
# Tag specific operations
if klass is not None:
obj = klass(node, attr)
obj.start(context)
# Visit child nodes
context.fragBlock = fragBlock = copy.copy(context.frag)
for nnode in node.childNodes:
pisaLoop(nnode, context, path, **kw)
context.fragBlock = fragBlock
# END tag
if obj:
obj.end(context)
# Block?
if isBlock:
context.addPara()
# XXX Buggy!
# Page break by CSS
if pageBreakAfter:
context.addStory(PageBreak())
if pageBreakAfter == PAGE_BREAK_RIGHT:
context.addStory(PmlRightPageBreak())
if pageBreakAfter == PAGE_BREAK_LEFT:
context.addStory(PmlLeftPageBreak())
if frameBreakAfter:
context.addStory(FrameBreak())
if keepInFrame:
# get all content added after start of -pdf-keep-in-frame and wrap
# it in a KeepInFrame
substory = context.story[context.keepInFrameIndex:]
context.story = context.story[:context.keepInFrameIndex]
context.story.append(
KeepInFrame(
content=substory,
maxWidth=keepInFrameMaxWidth,
maxHeight=keepInFrameMaxHeight))
context.keepInFrameIndex = None
# Static block, END
if staticFrame:
context.addPara()
for frame in staticFrame:
frame.pisaStaticStory = context.story
context.swapStory(oldStory)
context.frag.insideStaticFrame -= 1
# context.debug(1, indent, "</%s>" % (node.tagName))
# Reset frag style
context.pullFrag()
# Unknown or not handled
else:
# context.debug(1, indent, "???", node, node.nodeType, repr(node))
# Loop over children
for node in node.childNodes:
pisaLoop(node, context, path, **kw)
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None):
"""
- Parse HTML and get miniDOM
- Extract CSS informations, add default CSS, parse CSS
- Handle the document DOM itself and build reportlab story
- Return Context object
"""
CSSAttrCache={}
if xhtml:
#TODO: XHTMLParser doesn't see to exist...
parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom"))
else:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
if type(src) in types.StringTypes:
if type(src) is types.UnicodeType:
encoding = "utf8"
src = src.encode(encoding)
src = pisaTempFile(src, capacity=context.capacity)
# Test for the restrictions of html5lib
if encoding:
# Workaround for html5lib<0.11.1
if hasattr(inputstream, "isValidEncoding"):
if encoding.strip().lower() == "utf8":
encoding = "utf-8"
if not inputstream.isValidEncoding(encoding):
log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding)
else:
if inputstream.codecName(encoding) is None:
log.error("%r is not a valid encoding", encoding)
document = parser.parse(
src,
encoding=encoding)
if xml_output:
xml_output.write(document.toprettyxml(encoding="utf8"))
if default_css:
context.addCSS(default_css)
pisaPreLoop(document, context)
#try:
context.parseCSS()
#except:
# context.cssText = DEFAULT_CSS
# context.parseCSS()
# context.debug(9, pprint.pformat(context.css))
pisaLoop(document, context)
return context
# Shortcuts
HTML2PDF = pisaParser
def XHTML2PDF(*a, **kw):
kw["xhtml"] = True
return HTML2PDF(*a, **kw)
XML2PDF = XHTML2PDF
| apache-2.0 |
tudorvio/tempest | tempest/api/image/v2/test_images_tags_negative.py | 17 | 1823 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib import exceptions as lib_exc
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import test
class ImagesTagsNegativeTest(base.BaseV2ImageTest):
@test.attr(type=['negative'])
@test.idempotent_id('8cd30f82-6f9a-4c6e-8034-c1b51fba43d9')
def test_update_tags_for_non_existing_image(self):
# Update tag with non existing image.
tag = data_utils.rand_name('tag')
non_exist_image = str(uuid.uuid4())
self.assertRaises(lib_exc.NotFound, self.client.add_image_tag,
non_exist_image, tag)
@test.attr(type=['negative'])
@test.idempotent_id('39c023a2-325a-433a-9eea-649bf1414b19')
def test_delete_non_existing_tag(self):
# Delete non existing tag.
body = self.create_image(container_format='bare',
disk_format='raw',
visibility='private'
)
image_id = body['id']
tag = data_utils.rand_name('non-exist-tag')
self.addCleanup(self.client.delete_image, image_id)
self.assertRaises(lib_exc.NotFound, self.client.delete_image_tag,
image_id, tag)
| apache-2.0 |
caisq/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/generator_io.py | 39 | 5651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow generator of dict with numpy arrays (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Container
from types import FunctionType
from types import GeneratorType
from tensorflow.python.estimator.inputs.queues.feeding_functions import _enqueue_data as enqueue_data
from tensorflow.python.util.deprecation import deprecated
@deprecated(None, 'Please use tf.data.')
def generator_input_fn(x,
target_key=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
pad_value=None):
"""Returns input function that returns dicts of numpy arrays
yielded from a generator.
It is assumed that every dict of numpy arrays yielded from the dictionary
represents a single sample. The generator should consume a single epoch of the
data.
This returns a function outputting `features` and `target` based on the dict
of numpy arrays. The dict `features` has the same keys as an element yielded
from x.
Example:
```python
def generator():
for index in range(10):
yield {'height': np.random.randint(32,36),
'age': np.random.randint(18, 80),
'label': np.ones(1)}
with tf.Session() as session:
input_fn = generator_io.generator_input_fn(
generator, target_key="label", batch_size=2, shuffle=False,
num_epochs=1)
```
Args:
x: Generator Function, returns a `Generator` that will yield the data
in `dict` of numpy arrays
target_key: String or Container of Strings, the key or Container of keys of
the numpy arrays in x dictionaries to use as target.
batch_size: Integer, size of batches to return.
num_epochs: Integer, number of epochs to iterate over data. If `None` will
run forever.
shuffle: Boolean, if True shuffles the queue. Avoid shuffle at prediction
time.
queue_capacity: Integer, size of queue to accumulate.
num_threads: Integer, number of threads used for reading and enqueueing.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
Function, that returns a feature `dict` with `Tensors` and an optional
label `dict` with `Tensors`, or if target_key is `str` label is a `Tensor`
Raises:
TypeError: `x` is not `FunctionType`.
TypeError: `x()` is not `GeneratorType`.
TypeError: `next(x())` is not `dict`.
TypeError: `target_key` is not `str` or `target_key` is not `Container`
of `str`.
KeyError: `target_key` not a key or `target_key[index]` not in next(`x()`).
KeyError: `key` mismatch between dicts emitted from `x()`
"""
if not isinstance(x, FunctionType):
raise TypeError(
'x must be generator function; got {}'.format(type(x).__name__))
generator = x()
if not isinstance(generator, GeneratorType):
raise TypeError(
'x() must be generator; got {}'.format(type(generator).__name__))
data = next(generator)
if not isinstance(data, dict):
raise TypeError('x() must yield dict; got {}'.format(type(data).__name__))
input_keys = sorted(next(x()).keys())
if target_key is not None:
if isinstance(target_key, str):
target_key = [target_key]
elif isinstance(target_key, Container):
for item in target_key:
if not isinstance(item, str):
raise TypeError('target_key must be str or Container of str; got {}'.
format(type(item).__name__))
if item not in input_keys:
raise KeyError(
'target_key not in yielded dict. Expected {} keys; got {}'.format(
input_keys, item))
else:
raise TypeError('target_key must be str or Container of str; got {}'.
format(type(target_key).__name__))
def _generator_input_fn():
"""generator input function."""
queue = enqueue_data(
x,
queue_capacity,
shuffle=shuffle,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs,
pad_value=pad_value)
features = (queue.dequeue_many(batch_size)
if num_epochs is None else queue.dequeue_up_to(batch_size))
if not isinstance(features, list):
features = [features]
features = dict(zip(input_keys, features))
if target_key is not None:
if len(target_key) > 1:
target = {key: features.pop(key) for key in target_key}
else:
target = features.pop(target_key[0])
return features, target
return features
return _generator_input_fn
| apache-2.0 |
Hellowlol/plexpy | lib/unidecode/x0c4.py | 253 | 5024 | data = (
'sswals', # 0x00
'sswalt', # 0x01
'sswalp', # 0x02
'sswalh', # 0x03
'sswam', # 0x04
'sswab', # 0x05
'sswabs', # 0x06
'sswas', # 0x07
'sswass', # 0x08
'sswang', # 0x09
'sswaj', # 0x0a
'sswac', # 0x0b
'sswak', # 0x0c
'sswat', # 0x0d
'sswap', # 0x0e
'sswah', # 0x0f
'sswae', # 0x10
'sswaeg', # 0x11
'sswaegg', # 0x12
'sswaegs', # 0x13
'sswaen', # 0x14
'sswaenj', # 0x15
'sswaenh', # 0x16
'sswaed', # 0x17
'sswael', # 0x18
'sswaelg', # 0x19
'sswaelm', # 0x1a
'sswaelb', # 0x1b
'sswaels', # 0x1c
'sswaelt', # 0x1d
'sswaelp', # 0x1e
'sswaelh', # 0x1f
'sswaem', # 0x20
'sswaeb', # 0x21
'sswaebs', # 0x22
'sswaes', # 0x23
'sswaess', # 0x24
'sswaeng', # 0x25
'sswaej', # 0x26
'sswaec', # 0x27
'sswaek', # 0x28
'sswaet', # 0x29
'sswaep', # 0x2a
'sswaeh', # 0x2b
'ssoe', # 0x2c
'ssoeg', # 0x2d
'ssoegg', # 0x2e
'ssoegs', # 0x2f
'ssoen', # 0x30
'ssoenj', # 0x31
'ssoenh', # 0x32
'ssoed', # 0x33
'ssoel', # 0x34
'ssoelg', # 0x35
'ssoelm', # 0x36
'ssoelb', # 0x37
'ssoels', # 0x38
'ssoelt', # 0x39
'ssoelp', # 0x3a
'ssoelh', # 0x3b
'ssoem', # 0x3c
'ssoeb', # 0x3d
'ssoebs', # 0x3e
'ssoes', # 0x3f
'ssoess', # 0x40
'ssoeng', # 0x41
'ssoej', # 0x42
'ssoec', # 0x43
'ssoek', # 0x44
'ssoet', # 0x45
'ssoep', # 0x46
'ssoeh', # 0x47
'ssyo', # 0x48
'ssyog', # 0x49
'ssyogg', # 0x4a
'ssyogs', # 0x4b
'ssyon', # 0x4c
'ssyonj', # 0x4d
'ssyonh', # 0x4e
'ssyod', # 0x4f
'ssyol', # 0x50
'ssyolg', # 0x51
'ssyolm', # 0x52
'ssyolb', # 0x53
'ssyols', # 0x54
'ssyolt', # 0x55
'ssyolp', # 0x56
'ssyolh', # 0x57
'ssyom', # 0x58
'ssyob', # 0x59
'ssyobs', # 0x5a
'ssyos', # 0x5b
'ssyoss', # 0x5c
'ssyong', # 0x5d
'ssyoj', # 0x5e
'ssyoc', # 0x5f
'ssyok', # 0x60
'ssyot', # 0x61
'ssyop', # 0x62
'ssyoh', # 0x63
'ssu', # 0x64
'ssug', # 0x65
'ssugg', # 0x66
'ssugs', # 0x67
'ssun', # 0x68
'ssunj', # 0x69
'ssunh', # 0x6a
'ssud', # 0x6b
'ssul', # 0x6c
'ssulg', # 0x6d
'ssulm', # 0x6e
'ssulb', # 0x6f
'ssuls', # 0x70
'ssult', # 0x71
'ssulp', # 0x72
'ssulh', # 0x73
'ssum', # 0x74
'ssub', # 0x75
'ssubs', # 0x76
'ssus', # 0x77
'ssuss', # 0x78
'ssung', # 0x79
'ssuj', # 0x7a
'ssuc', # 0x7b
'ssuk', # 0x7c
'ssut', # 0x7d
'ssup', # 0x7e
'ssuh', # 0x7f
'ssweo', # 0x80
'ssweog', # 0x81
'ssweogg', # 0x82
'ssweogs', # 0x83
'ssweon', # 0x84
'ssweonj', # 0x85
'ssweonh', # 0x86
'ssweod', # 0x87
'ssweol', # 0x88
'ssweolg', # 0x89
'ssweolm', # 0x8a
'ssweolb', # 0x8b
'ssweols', # 0x8c
'ssweolt', # 0x8d
'ssweolp', # 0x8e
'ssweolh', # 0x8f
'ssweom', # 0x90
'ssweob', # 0x91
'ssweobs', # 0x92
'ssweos', # 0x93
'ssweoss', # 0x94
'ssweong', # 0x95
'ssweoj', # 0x96
'ssweoc', # 0x97
'ssweok', # 0x98
'ssweot', # 0x99
'ssweop', # 0x9a
'ssweoh', # 0x9b
'sswe', # 0x9c
'ssweg', # 0x9d
'sswegg', # 0x9e
'sswegs', # 0x9f
'sswen', # 0xa0
'sswenj', # 0xa1
'sswenh', # 0xa2
'sswed', # 0xa3
'sswel', # 0xa4
'sswelg', # 0xa5
'sswelm', # 0xa6
'sswelb', # 0xa7
'sswels', # 0xa8
'sswelt', # 0xa9
'sswelp', # 0xaa
'sswelh', # 0xab
'sswem', # 0xac
'ssweb', # 0xad
'sswebs', # 0xae
'sswes', # 0xaf
'sswess', # 0xb0
'ssweng', # 0xb1
'sswej', # 0xb2
'sswec', # 0xb3
'sswek', # 0xb4
'sswet', # 0xb5
'sswep', # 0xb6
'ssweh', # 0xb7
'sswi', # 0xb8
'sswig', # 0xb9
'sswigg', # 0xba
'sswigs', # 0xbb
'sswin', # 0xbc
'sswinj', # 0xbd
'sswinh', # 0xbe
'sswid', # 0xbf
'sswil', # 0xc0
'sswilg', # 0xc1
'sswilm', # 0xc2
'sswilb', # 0xc3
'sswils', # 0xc4
'sswilt', # 0xc5
'sswilp', # 0xc6
'sswilh', # 0xc7
'sswim', # 0xc8
'sswib', # 0xc9
'sswibs', # 0xca
'sswis', # 0xcb
'sswiss', # 0xcc
'sswing', # 0xcd
'sswij', # 0xce
'sswic', # 0xcf
'sswik', # 0xd0
'sswit', # 0xd1
'sswip', # 0xd2
'sswih', # 0xd3
'ssyu', # 0xd4
'ssyug', # 0xd5
'ssyugg', # 0xd6
'ssyugs', # 0xd7
'ssyun', # 0xd8
'ssyunj', # 0xd9
'ssyunh', # 0xda
'ssyud', # 0xdb
'ssyul', # 0xdc
'ssyulg', # 0xdd
'ssyulm', # 0xde
'ssyulb', # 0xdf
'ssyuls', # 0xe0
'ssyult', # 0xe1
'ssyulp', # 0xe2
'ssyulh', # 0xe3
'ssyum', # 0xe4
'ssyub', # 0xe5
'ssyubs', # 0xe6
'ssyus', # 0xe7
'ssyuss', # 0xe8
'ssyung', # 0xe9
'ssyuj', # 0xea
'ssyuc', # 0xeb
'ssyuk', # 0xec
'ssyut', # 0xed
'ssyup', # 0xee
'ssyuh', # 0xef
'sseu', # 0xf0
'sseug', # 0xf1
'sseugg', # 0xf2
'sseugs', # 0xf3
'sseun', # 0xf4
'sseunj', # 0xf5
'sseunh', # 0xf6
'sseud', # 0xf7
'sseul', # 0xf8
'sseulg', # 0xf9
'sseulm', # 0xfa
'sseulb', # 0xfb
'sseuls', # 0xfc
'sseult', # 0xfd
'sseulp', # 0xfe
'sseulh', # 0xff
)
| gpl-3.0 |
lepistone/stock-logistics-workflow | __unported__/stock_picking_invoice_link/stock.py | 7 | 4287 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class stock_move(orm.Model):
_inherit = "stock.move"
_columns = {
'invoice_line_id': fields.many2one(
'account.invoice.line', 'Invoice line', readonly=True),
}
class stock_picking(orm.Model):
_inherit = "stock.picking"
def _get_invoice_view_xmlid(self, cr, uid, ids, name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
if pick.invoice_id:
if pick.invoice_id.type in ('in_invoice', 'in_refund'):
res[pick.id] = 'account.invoice_supplier_form'
else:
res[pick.id] = 'account.invoice_form'
else:
res[pick.id] = False
return res
_columns = {
'invoice_id': fields.many2one(
'account.invoice', 'Invoice', readonly=True),
'invoice_view_xmlid': fields.function(
_get_invoice_view_xmlid, type='char', string="Invoice View XMLID",
readonly=True),
}
def _invoice_hook(self, cr, uid, picking, invoice_id):
res = super(stock_picking, self)._invoice_hook(
cr, uid, picking, invoice_id)
picking.write({'invoice_id': invoice_id})
return res
def _invoice_line_hook(self, cr, uid, move_line, invoice_line_id):
res = super(stock_picking, self)._invoice_line_hook(
cr, uid, move_line, invoice_line_id)
move_line.write({'invoice_line_id': invoice_line_id})
return res
class stock_picking_out(orm.Model):
_inherit = "stock.picking.out"
def _out_get_invoice_view_xmlid(
self, cr, uid, ids, name, arg, context=None):
return self.pool['stock.picking']._get_invoice_view_xmlid(
cr, uid, ids, name, arg, context=context)
_columns = {
'invoice_id': fields.many2one(
'account.invoice', 'Invoice', readonly=True),
'invoice_view_xmlid': fields.function(
_out_get_invoice_view_xmlid, type='char',
string="Invoice View XMLID", readonly=True),
}
class stock_picking_in(orm.Model):
_inherit = "stock.picking.in"
def _in_get_invoice_view_xmlid(
self, cr, uid, ids, name, arg, context=None):
return self.pool['stock.picking']._get_invoice_view_xmlid(
cr, uid, ids, name, arg, context=context)
_columns = {
'invoice_id': fields.many2one(
'account.invoice', 'Invoice', readonly=True),
'invoice_view_xmlid': fields.function(
_in_get_invoice_view_xmlid, type='char',
string="Invoice View XMLID", readonly=True),
}
class account_invoice(orm.Model):
_inherit = "account.invoice"
_columns = {
'picking_ids': fields.one2many(
'stock.picking', 'invoice_id', 'Related Pickings', readonly=True,
help="Related pickings (only when the invoice has been generated from the picking)."),
}
class account_invoice_line(orm.Model):
_inherit = "account.invoice.line"
_columns = {
'move_line_ids': fields.one2many(
'stock.move', 'invoice_line_id', 'Related Stock Moves',
readonly=True,
help="Related stock moves (only when the invoice has been generated from the picking)."),
}
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.