repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mindnervestech/mnrp | addons/crm_claim/crm_claim.py | 333 | 10079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
class crm_claim_stage(osv.osv):
""" Model for claim stages. This models the main stages of a claim
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.claim.stage"
_description = "Claim stages"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'section_ids':fields.many2many('crm.case.section', 'section_claim_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'case_default': fields.boolean('Common to All Teams',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
}
_defaults = {
'sequence': lambda *args: 1,
}
class crm_claim(osv.osv):
""" Crm claim
"""
_name = "crm.claim"
_description = "Claim"
_order = "priority,date desc"
_inherit = ['mail.thread']
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return self.pool.get('crm.lead')._resolve_section_id_from_context(cr, uid, context=context) or False
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
section_id = self._get_default_section_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], section_id, [('sequence', '=', '1')], context=context)
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Claim Subject', required=True),
'active': fields.boolean('Active'),
'action_next': fields.char('Next Action'),
'date_action_next': fields.datetime('Next Action Date'),
'description': fields.text('Description'),
'resolution': fields.text('Resolution'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'write_date': fields.datetime('Update Date' , readonly=True),
'date_deadline': fields.date('Deadline'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Claim Date', select=True),
'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.claim')]"),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'type_action': fields.selection([('correction','Corrective Action'),('prevention','Preventive Action')], 'Action Type'),
'user_id': fields.many2one('res.users', 'Responsible', track_visibility='always'),
'user_fault': fields.char('Trouble Responsible'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help="Responsible sales team."\
" Define Responsible user and Email account for"\
" mail gateway."),
'company_id': fields.many2one('res.company', 'Company'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'email_cc': fields.text('Watchers Emails', size=252, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'email_from': fields.char('Email', size=128, help="Destination email for email gateway."),
'partner_phone': fields.char('Phone'),
'stage_id': fields.many2one ('crm.claim.stage', 'Stage', track_visibility='onchange',
domain="['|', ('section_ids', '=', section_id), ('case_default', '=', True)]"),
'cause': fields.text('Root Cause'),
}
_defaults = {
'user_id': lambda s, cr, uid, c: uid,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
'date': fields.datetime.now,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.case', context=c),
'priority': '1',
'active': lambda *a: 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c)
}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for claim in cases:
if claim.section_id:
section_ids.append(claim.section_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * len(section_ids)
for section_id in section_ids:
search_domain.append(('section_ids', '=', section_id))
search_domain.append(('case_default', '=', True))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('crm.claim.stage').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def onchange_partner_id(self, cr, uid, ids, partner_id, email=False, context=None):
"""This function returns value of partner address based on partner
:param email: ignored
"""
if not partner_id:
return {'value': {'email_from': False, 'partner_phone': False}}
address = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
return {'value': {'email_from': address.email, 'partner_phone': address.phone}}
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('section_id') and not context.get('default_section_id'):
context['default_section_id'] = vals.get('section_id')
# context: no_log, because subtype already handle this
return super(crm_claim, self).create(cr, uid, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
claim = self.browse(cr, uid, id, context=context)
default = dict(default or {},
stage_id = self._get_default_stage_id(cr, uid, context=context),
name = _('%s (copy)') % claim.name)
return super(crm_claim, self).copy(cr, uid, id, default, context=context)
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
}
if msg.get('priority'):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(crm_claim, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
class res_partner(osv.osv):
_inherit = 'res.partner'
def _claim_count(self, cr, uid, ids, field_name, arg, context=None):
Claim = self.pool['crm.claim']
return {
partner_id: Claim.search_count(cr,uid, [('partner_id', '=', partner_id)], context=context)
for partner_id in ids
}
_columns = {
'claim_count': fields.function(_claim_count, string='# Claims', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JianfengXu/crosswalk-test-suite | stability/stability-lowresource-android-tests/lowresource/TestApp.py | 3 | 6765 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao<[email protected]>
import sys
import commands
import subprocess
reload(sys)
sys.setdefaultencoding('utf-8')
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
class TestApp():
device = ""
location = ""
pkgname = ""
activname = ""
def __init__(self, device, location, pkgname, activname):
self.device = device
self.location = location
self.pkgname = pkgname
self.activname = activname
def install(self):
action_status = False
if self.location.endswith(".apk"):
if not self.isInstalled():
cmd = "%s -s %s install %s" % (ADB_CMD, self.device, self.location)
(return_code, output) = doCMD(cmd)
if self.isInstalled():
action_status = True
else:
print "-->> %s fail to install." % self.location
else:
print "-->> %s has been installed." % self.pkgname
else:
print "-->> Invalid apk location: %s " % self.location
return action_status
def uninstall(self):
action_status = False
if self.isInstalled():
cmd = "%s -s %s uninstall %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if not self.isInstalled():
action_status = True
else:
print "-->> %s fail to uninstall." % self.pkgname
else:
print "-->> %s has not been installed." % self.pkgname
return action_status
def launch(self):
action_status = False
if not self.isRunning():
cmd = "%s -s %s shell am start -n %s/.%s" % (ADB_CMD, self.device, self.pkgname, self.activname)
(return_code, output) = doCMD(cmd)
if self.isRunning():
action_status = True
else:
print "-->> %s fail to launch." % self.pkgname
else:
print "-->> %s has been launched." % self.pkgname
return action_status
def switch(self):
action_status = False
# If in Activity, switch to background, otherwise switch to front
if self.isActivity():
# Switch to Home
# keycode
# 3 --> "KEYCODE_HOME"
cmd = "%s -s %s shell input keyevent 3" % (ADB_CMD, self.device)
(return_code, output) = doCMD(cmd)
if not self.isActivity():
action_status = True
else:
print "-->> %s fail to switch to background." % self.pkgname
else:
cmd = "%s -s %s shell am start -n %s/.%s" % (ADB_CMD, self.device, self.pkgname, self.activname)
(return_code, output) = doCMD(cmd)
if self.isActivity():
action_status = True
else:
print "-->> %s fail to switch to front." % self.pkgname
return action_status
def stop(self):
action_status = False
if self.isRunning():
cmd = "%s -s %s shell am force-stop %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if not self.isRunning():
action_status = True
else:
print "-->> %s fail to stop." % self.pkgname
else:
print "-->> %s has been stoped." % self.pkgname
return action_status
def isInstalled(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell pm list packages |grep %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
for line in output:
if self.pkgname in line:
action_status = True
break
return action_status
def isRunning(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell ps |grep %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
for line in output:
if self.pkgname in line:
action_status = True
break
return action_status
def isActivity(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell dumpsys activity |grep \"%s\"" % (ADB_CMD, self.device, "Recent #0")
(return_code, output) = doCMD(cmd)
for line in output:
if self.pkgname in line:
action_status = True
break
return action_status
| bsd-3-clause |
calfonso/ansible | contrib/inventory/scaleway.py | 20 | 7196 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Scaleway
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Scaleway API
Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.python.org/pypi/requests)
Before using this script you may want to modify scaleway.ini config file.
This script generates an Ansible hosts file with these host groups:
<hostname>: Defines host itself with Scaleway's hostname as group name.
<tag>: Contains all hosts which has "<tag>" as tag.
<region>: Contains all hosts which are in the "<region>" region.
all: Contains all hosts defined in Scaleway.
'''
# (c) 2017, Paul B. <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import os
import requests
import six
from six.moves import configparser
import sys
import time
import traceback
try:
import json
except ImportError:
import simplejson as json
EMPTY_GROUP = {
'children': [],
'hosts': []
}
class ScalewayAPI:
REGIONS = ['par1', 'ams1']
def __init__(self, auth_token, region):
self.session = requests.session()
self.session.headers.update({
'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0])
})
self.session.headers.update({
'X-Auth-Token': auth_token.encode('latin1')
})
self.base_url = 'https://cp-%s.scaleway.com' % (region)
def servers(self):
raw = self.session.get('/'.join([self.base_url, 'servers']))
try:
response = raw.json()
return self.get_resource('servers', response, raw)
except ValueError:
return []
def get_resource(self, resource, response, raw):
raw.raise_for_status()
if resource in response:
return response[resource]
else:
raise ValueError(
"Resource %s not found in Scaleway API response" % (resource))
def env_or_param(env_key, param=None, fallback=None):
env_value = os.environ.get(env_key)
if (param, env_value) == (None, None):
return fallback
elif env_value is not None:
return env_value
else:
return param
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache', 'cache_dir')
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache', 'cache_dir')
try:
existing = os.stat(
'/'.join([dpath, 'scaleway_ansible_inventory.json']))
except OSError:
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
else:
maxage = 60
if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True
return False
def generate_inv_from_api(config):
try:
inventory['all'] = copy.deepcopy(EMPTY_GROUP)
if config.has_option('auth', 'api_token'):
auth_token = config.get('auth', 'api_token')
auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token)
if auth_token is None:
sys.stderr.write('ERROR: missing authentication token for Scaleway API')
sys.exit(1)
if config.has_option('compute', 'regions'):
regions = config.get('compute', 'regions')
if regions == 'all':
regions = ScalewayAPI.REGIONS
else:
regions = map(str.strip, regions.split(','))
else:
regions = [
env_or_param('SCALEWAY_REGION', fallback='par1')
]
for region in regions:
api = ScalewayAPI(auth_token, region)
for server in api.servers():
hostname = server['hostname']
if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'):
ip = server['public_ip']['address']
else:
ip = server['private_ip']
for server_tag in server['tags']:
if server_tag not in inventory:
inventory[server_tag] = copy.deepcopy(EMPTY_GROUP)
inventory[server_tag]['children'].append(hostname)
if region not in inventory:
inventory[region] = copy.deepcopy(EMPTY_GROUP)
inventory[region]['children'].append(hostname)
inventory['all']['children'].append(hostname)
inventory[hostname] = []
inventory[hostname].append(ip)
return inventory
except Exception:
# Return empty hosts output
traceback.print_exc()
return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(config):
''' Reads the inventory from cache or Scaleway api '''
if cache_available(config):
inv = get_cache('scaleway_ansible_inventory.json', config)
else:
inv = generate_inv_from_api(config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
# Read config
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
if cache_available(config):
inventory = get_cache('scaleway_ansible_inventory.json', config)
else:
inventory = get_inventory(config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()
| gpl-3.0 |
kirca/odoo | addons/account_voucher/report/__init__.py | 378 | 1083 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_voucher_sales_receipt
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lucidbard/NewsBlur | vendor/oauth2client/anyjson.py | 302 | 1044 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = '[email protected] (Joe Gregorio)'
try: # pragma: no cover
# Should work for Python2.6 and higher.
import json as simplejson
except ImportError: # pragma: no cover
try:
import simplejson
except ImportError:
# Try to import from django, should work on App Engine
from django.utils import simplejson
| mit |
yi719/python-driver | cassandra/murmur3.py | 15 | 2387 | from six.moves import range
import struct
def body_and_tail(data):
l = len(data)
nblocks = l // 16
tail = l % 16
if nblocks:
return struct.unpack_from('qq' * nblocks, data), struct.unpack_from('b' * tail, data, -tail), l
else:
return tuple(), struct.unpack_from('b' * tail, data, -tail), l
def rotl64(x, r):
# note: not a general-purpose function because it leaves the high-order bits intact
# suitable for this use case without wasting cycles
mask = 2 ** r - 1
rotated = (x << r) | ((x >> 64 - r) & mask)
return rotated
def fmix(k):
# masking off the 31s bits that would be leftover after >> 33 a 64-bit number
k ^= (k >> 33) & 0x7fffffff
k *= 0xff51afd7ed558ccd
k ^= (k >> 33) & 0x7fffffff
k *= 0xc4ceb9fe1a85ec53
k ^= (k >> 33) & 0x7fffffff
return k
INT64_MAX = int(2 ** 63 - 1)
INT64_MIN = -INT64_MAX - 1
INT64_OVF_OFFSET = INT64_MAX + 1
INT64_OVF_DIV = 2 * INT64_OVF_OFFSET
def truncate_int64(x):
if not INT64_MIN <= x <= INT64_MAX:
x = (x + INT64_OVF_OFFSET) % INT64_OVF_DIV - INT64_OVF_OFFSET
return x
def _murmur3(data):
h1 = h2 = 0
c1 = -8663945395140668459 # 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
body, tail, total_len = body_and_tail(data)
# body
for i in range(0, len(body), 2):
k1 = body[i]
k2 = body[i + 1]
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
h1 = rotl64(h1, 27)
h1 += h2
h1 = h1 * 5 + 0x52dce729
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
h2 = rotl64(h2, 31)
h2 += h1
h2 = h2 * 5 + 0x38495ab5
# tail
k1 = k2 = 0
len_tail = len(tail)
if len_tail > 8:
for i in range(len_tail - 1, 7, -1):
k2 ^= tail[i] << (i - 8) * 8
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
if len_tail:
for i in range(min(7, len_tail - 1), -1, -1):
k1 ^= tail[i] << i * 8
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
# finalization
h1 ^= total_len
h2 ^= total_len
h1 += h2
h2 += h1
h1 = fmix(h1)
h2 = fmix(h2)
h1 += h2
return truncate_int64(h1)
try:
from cassandra.cmurmur3 import murmur3
except ImportError:
murmur3 = _murmur3
| apache-2.0 |
littlstar/chromium.src | chrome/common/extensions/docs/server2/github_file_system_provider.py | 121 | 1611 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from caching_file_system import CachingFileSystem
from empty_dir_file_system import EmptyDirFileSystem
from github_file_system import GithubFileSystem as OldGithubFileSystem
from new_github_file_system import GithubFileSystem as NewGithubFileSystem
class GithubFileSystemProvider(object):
'''Provides GithubFileSystems bound to an owner/repo pair.
'''
def __init__(self, object_store_creator):
self._object_store_creator = object_store_creator
def Create(self, owner, repo):
'''Creates a GithubFileSystem. For legacy reasons this is hacked
such that the apps samples returns the old GithubFileSystem.
|owner| is the owner of the GitHub account, e.g. 'GoogleChrome'.
|repo| is the repository name, e.g. 'devtools-docs'.
'''
if owner == 'GoogleChrome' and repo == 'chrome-app-samples':
# NOTE: The old GitHub file system implementation doesn't support being
# wrapped by a CachingFileSystem. It's also too slow to run on the dev
# server, since every app API page would need to read from it.
return OldGithubFileSystem.CreateChromeAppsSamples(
self._object_store_creator)
return CachingFileSystem(
NewGithubFileSystem.Create(owner, repo, self._object_store_creator),
self._object_store_creator)
@staticmethod
def ForEmpty():
class EmptyImpl(object):
def Create(self, owner, repo):
return EmptyDirFileSystem()
return EmptyImpl()
| bsd-3-clause |
errordeveloper/fe-devel | Native/ThirdParty/Private/Python/js_beautifier.py | 4 | 22785 | import sys
import os
import exceptions
import glob
fileTypes = ['.js','.kl','.html']
controls = ['case', 'default', 'do', 'else','for', 'if','while','throw', 'switch', 'catch']
keywords = ['break', 'continue', 'finally', 'return',
'try', 'var', 'with', 'delete', 'new', 'typeof', 'instanceof', '#include']
functions = ['function', 'operator']
curly = ['{', '}']
brace = ['(', ')']
bracket = ['[', ']']
allbrackets = []
allbrackets.extend(curly)
allbrackets.extend(brace)
allbrackets.extend(bracket)
quotes = ['"', "'"]
whitespace = [' ', '\n']
comment = ['//', '/*', '*/']
semicolon = [';']
comma = [',','.']
unaoperators = ['++', '--', '>>', '<<']
binoperators = ['===', '!==', '<<=', '>>=', '+=', '-=', '/=', '*=', '%=',
'||', '&&', '>=', '<=', '==', '!=', '^=', '&=', '|=',
'+', '-', '/', '*', '%', '>', '<', ':', '?', '&', '^', '=', '!']
operators = []
operators.extend(unaoperators)
operators.extend(binoperators)
splitters = []
splitters.extend(comment)
splitters.extend(comma)
splitters.extend(semicolon)
splitters.extend(allbrackets)
splitters.extend(quotes)
splitters.extend(whitespace)
splitters.extend(operators)
TYPE_CONTROL = 0
TYPE_KEYWORD = 1
TYPE_FUNCTION = 2
TYPE_CURLY = 4
TYPE_BRACE = 8
TYPE_BRACKET = 16
TYPE_ALL_BRACKETS = TYPE_CURLY | TYPE_BRACE | TYPE_BRACKET
TYPE_QUOTE = 32
TYPE_WHITESPACE = 64
TYPE_COMMENT = 128
TYPE_NO_CODE = TYPE_WHITESPACE | TYPE_COMMENT
TYPE_SEMICOLON = 256
TYPE_COMMA = 512
TYPE_BINOPERATOR = 1024
TYPE_UNAOPERATOR = 2048
TYPE_OPERATOR = TYPE_BINOPERATOR | TYPE_UNAOPERATOR
TYPE_IDENTIFIER = 4096
class token():
string = ''
type = ''
index = -1
def __init__(self,string,type = TYPE_IDENTIFIER,index = 0):
self.string = string
self.type = type
self.index = index
def isTypeOf(self,type):
return self.type
def tokenize(content):
# first some basic formatting
content = content.replace('\t',' ')
# get all of the words
words = []
while len(content) > 0:
minSplitIndex = len(content)
minSplitter = ''
for i in range(len(splitters)):
split = content.partition(splitters[i])
if len(split[1]) > 0:
if len(split[0]) < minSplitIndex:
minSplitIndex = len(split[0])
minSplitter = splitters[i]
if minSplitIndex == len(content):
words.append(content)
content = ''
else:
split = content.partition(minSplitter)
if len(split[0]) > 0:
words.append(split[0])
words.append(split[1])
content = split[2]
# parse the words to tokens
tokens = []
for word in words:
tokenIdentified = False
if not tokenIdentified:
for i in range(len(controls)):
if(word == controls[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_CONTROL,i))
break
if not tokenIdentified:
for i in range(len(keywords)):
if(word == keywords[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_KEYWORD,i))
break
if not tokenIdentified:
for i in range(len(functions)):
if(word == functions[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_FUNCTION,i))
break
if not tokenIdentified:
for i in range(len(curly)):
if(word == curly[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_CURLY,i))
break
if not tokenIdentified:
for i in range(len(brace)):
if(word == brace[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BRACE,i))
break
if not tokenIdentified:
for i in range(len(bracket)):
if(word == bracket[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BRACKET,i))
break
if not tokenIdentified:
for i in range(len(quotes)):
if(word == quotes[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_QUOTE,i))
break
if not tokenIdentified:
for i in range(len(whitespace)):
if(word == whitespace[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_WHITESPACE,i))
break
if not tokenIdentified:
for i in range(len(comment)):
if(word == comment[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_COMMENT,i))
break
if not tokenIdentified:
for i in range(len(semicolon)):
if(word == semicolon[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_SEMICOLON,i))
break
if not tokenIdentified:
for i in range(len(comma)):
if(word == comma[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_COMMA,i))
break
if not tokenIdentified:
for i in range(len(binoperators)):
if(word == binoperators[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BINOPERATOR,i))
break
if not tokenIdentified:
for i in range(len(unaoperators)):
if(word == unaoperators[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_UNAOPERATOR,i))
break
if not tokenIdentified:
tokenIdentified = True
tokens.append(token(word,TYPE_IDENTIFIER,0))
# now since we know the tokens, let's simply some of them
# simplify the comment tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(lastToken.index == 0):
if(tokens[i].type == TYPE_WHITESPACE and tokens[i].index == 1):
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
else:
lastToken.string += tokens[i].string;
elif(lastToken.index == 1):
lastToken.string += tokens[i].string;
if(tokens[i].type == TYPE_COMMENT and tokens[i].index == 2):
newTokens.append(lastToken)
lastToken = False
elif(tokens[i].type == TYPE_COMMENT):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# simplify the string tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(tokens[i].type == TYPE_QUOTE):
if(tokens[i].index == lastToken.index):
lastToken.string += "'"
newTokens.append(lastToken)
lastToken = False
else:
lastToken.string += '"'
else:
lastToken.string += tokens[i].string
elif(tokens[i].type == TYPE_QUOTE):
lastToken = tokens[i]
lastToken.string = "'" # prefer singles
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# simplify the numeric tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)-1):
if(lastToken):
if(tokens[i].type == TYPE_IDENTIFIER):
if(tokens[i].string == 'e' and lastToken.string.find('e') == -1):
lastToken.string += tokens[i].string;
else:
try:
intvalue = int(tokens[i].string[0:1])
lastToken.string += tokens[i].string;
except Exception:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_COMMA and tokens[i].index == 1 and lastToken.string.endswith('e')):
lastToken.string += tokens[i].string;
elif(tokens[i].type == TYPE_BINOPERATOR and tokens[i].string == '-' and tokens[i+1].type == TYPE_IDENTIFIER):
try:
intvalue = int(tokens[i+1].string[0:1])
lastToken.string += tokens[i].string;
except Exception:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
else:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_IDENTIFIER):
try:
intvalue = int(tokens[i].string[0:1])
lastToken = tokens[i]
except Exception:
newTokens.append(tokens[i])
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
newTokens.append(tokens[len(tokens)-1])
tokens = newTokens
# simplify the regex tokens into single tokens
newTokens = []
startIndex = -1
endIndex = -1
string = ''
i = 0
while(i < len(tokens)):
if(startIndex > -1):
tkn = tokens[i];
if(not string.endswith("\\") and (
(tkn.type == TYPE_SEMICOLON) or
(tkn.type == TYPE_BRACE and tkn.index == 1) or
(tkn.type == TYPE_WHITESPACE and tkn == 0)
)):
if(endIndex > -1):
string = ''
for j in range(startIndex,endIndex+1):
string += tokens[j].string
newTokens.append(token(string))
i = endIndex
else:
i = startIndex
newTokens.append(tokens[i])
startIndex = -1
endIndex = -1
string = ''
elif(tkn.type == TYPE_BINOPERATOR and tkn.string == '/'):
endIndex = i
string += tkn.string
else:
string += tkn.string
elif(i > 0 and tokens[i].type == TYPE_BINOPERATOR and tokens[i].string == '/'):
# check if the previous is not an identifier, not an operator
j = i-1
prev = tokens[j]
while(prev.type == TYPE_WHITESPACE and j > 0):
j -= 1
prev = tokens[j]
if((prev.type == TYPE_BINOPERATOR and prev.string == '=') or
(prev.type == TYPE_BRACE and prev.index == 0) or
(prev.type == TYPE_COMMA and prev_index == 0)):
startIndex = i
string = tokens[i].string
else:
newTokens.append(tokens[i])
else:
newTokens.append(tokens[i])
i+=1
tokens = newTokens
# now let's simplify the whitespace tokens into single ones
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(lastToken.index == 0):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
lastToken = False
elif(lastToken.index == 1):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
if(len(lastToken.string) < 2):
lastToken.string += tokens[i].string
else:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_WHITESPACE):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# now let's switch curly and newline tokens
for i in range(len(tokens)-1):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 0):
if(i < len(tokens)-2):
if(tokens[i+2].type == TYPE_WHITESPACE):
tokens.remove(tokens[i+2])
if(i == 0 or tokens[i-1].type != TYPE_COMMENT):
tmp = tokens[i]
tokens[i] = tokens[i+1]
tokens[i+1] = tmp
elif(tokens[i].type == TYPE_CURLY and tokens[i].index == 0):
if(tokens[i+1].type != TYPE_WHITESPACE and not(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 1)):
tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
elif(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 1):
if(tokens[i].type != TYPE_WHITESPACE and not(tokens[i].type == TYPE_CURLY and tokens[i+1].index == 0)):
tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
if(i == len(tokens)-2):
break
# now let's switch curly and newline tokens
curlyCount = 0
braceCount = 0
for i in range(len(tokens)-1):
if(tokens[i].type == TYPE_CURLY):
if(tokens[i].index == 0):
curlyCount += 1
else:
curlyCount -= 1
elif(tokens[i].type == TYPE_BRACE):
if(tokens[i].index == 0):
braceCount += 1
else:
braceCount -= 1
#elif(tokens[i].type == TYPE_COMMA and tokens[i].index == 0):
# if(braceCount <= curlyCount):
# tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
return tokens
def stringify(tokens, extension = 'js'):
lines = []
line = []
# loop over all tokens and put them in lines
for i in range(len(tokens)):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
lines.append(line)
if(len(tokens[i].string) > 1):
lines.append([token('',TYPE_WHITESPACE)])
line = []
continue
line.append(tokens[i])
if(len(line)>0):
lines.append(line)
strings = []
tabs = ''
globalCurlyCount = 0
globalBraceCount = 0
globalBracketCount = 0
globalQuoteCount = 0
entryQuote = 0
history = []
for j in range(len(lines)):
line = lines[j]
curlyCount = 0
braceCount = 0
bracketCount = 0
string = ''
# check if we have a single control line without curly
prevLine = False
if(j > 0):
k = j-1
while(k >= 0):
if(len(lines[k]) > 0 and (len(lines[k]) > 1 or lines[k][0].type != TYPE_WHITESPACE)):
prevLine = lines[k]
break
k -= 1
for i in range(len(line)):
if(line[i].type == TYPE_CURLY):
if(line[i].index == 0):
globalCurlyCount += 1
curlyCount += 1
else:
if(curlyCount == 0):
string = string[2:100000]
globalCurlyCount -= 1
curlyCount -= 1
if(line[i].type == TYPE_BRACE):
if(line[i].index == 0):
globalBraceCount += 1
braceCount += 1
else:
if(braceCount == 0):
string = string[2:100000]
globalBraceCount -= 1
braceCount -= 1
if(line[i].type == TYPE_BRACKET):
if(line[i].index == 0):
globalBracketCount += 1
bracketCount += 1
else:
if(bracketCount == 0):
string = string[2:100000]
globalBracketCount -= 1
bracketCount -= 1
tabCount = curlyCount + braceCount + bracketCount
tabBefore = True
if(prevLine):
if(prevLine[0].type == TYPE_CONTROL and prevLine[0].string != 'case' and prevLine[0].string != 'default'):
lastToken = prevLine[len(prevLine)-1]
if(lastToken.type != TYPE_CURLY or lastToken.index > 0):
string += ' ';
elif(prevLine[len(prevLine)-1].type == TYPE_BINOPERATOR and tabCount <= 0):
tabBefore = False
string += ' ';
if(tabCount < 0 and tabBefore):
for i in range(abs(tabCount)):
tabs = tabs[2:10000]
string += tabs
if(len(line)>1):
firstToken = line[0]
lastToken = line[len(line)-1]
if(firstToken.index == 1 and (firstToken.type == TYPE_CURLY or firstToken.type == TYPE_BRACE or firstToken.type == TYPE_BRACKET) and
lastToken.index == 0 and (lastToken.type == TYPE_CURLY or lastToken.type == TYPE_BRACE or lastToken.type == TYPE_BRACKET)):
string = string[2:10000]
elif(len(line) == 1 and line[0].type == TYPE_CURLY and line[0].index == 0):
string = string[2:10000]
if(tabCount < 0 and not tabBefore):
for i in range(abs(tabCount)):
tabs = tabs[2:10000]
if(tabCount > 0):
for i in range(tabCount):
tabs += ' '
for i in range(0,len(line)):
if(line[i].type == TYPE_BRACE or line[i].type == TYPE_CURLY or line[i].type == TYPE_BRACKET):
if(line[i].index == 0):
history.append(line[i].string)
else:
if(line[i].type == TYPE_CURLY):
if(len(history) > 2 and history[len(history)-1] == 'case'):
tabs = tabs[2:10000]
string = string[2:10000]
history.pop()
if(len(history) > 0):
history.pop()
if(line[i].type == TYPE_COMMENT):
string += line[i].string.strip()
continue
if(line[i].type == TYPE_CURLY):
if(line[i].index == 0 and not string.endswith(' ') and not string.endswith('[') and not string.endswith('(')):
string += ' '+line[i].string
continue
if(line[i].type == TYPE_FUNCTION):
if(line[i+1].type != TYPE_BRACE and (line[i].string == 'function' or extension == 'kl')):
string += line[i].string+' '
continue
if(line[i].type == TYPE_BINOPERATOR):
if(line[i].string == '-'):
if(i==0):
string += line[i].string
continue
if(line[i-1].type != TYPE_IDENTIFIER and line[i-1].index == 0):
string += line[i].string
continue
if(not string.endswith(' ')):
if line[i].string == ":" :
if(len(history) > 0):
if(history[len(history)-1] == '?'):
string += ' '
history.pop()
elif line[i].string == "?":
history.append('?')
string += ' '
elif line[i].string == "!":
if(not string.endswith('(')):
string += ' '
else:
string += ' '
string += line[i].string
if(i < len(line)-1 and line[i].string != '!'):
string += ' '
continue
if(line[i].type == TYPE_COMMA and line[i].index == 0 and i < len(line)-1):
string += line[i].string+' '
continue
if(line[i].type == TYPE_CONTROL):
if(line[i].string == 'case' or line[i].string == 'default'):
if(len(history)>0 and history[len(history)-1] == 'case'):
string = string[2:10000]
else:
history.append('case')
tabs += ' '
if(i < len(line)-1 and (line[i+1].type == TYPE_BRACE or line[i+1].type == TYPE_CONTROL or line[i+1].type == TYPE_COMMENT or line[i+1].type == TYPE_IDENTIFIER)):
string += line[i].string+' '
else:
string += line[i].string
continue
if(line[i].type == TYPE_KEYWORD and (line[i].string == "var" or line[i].string == "#include")):
string += line[i].string+' '
continue
if(line[i].type == TYPE_KEYWORD and line[i].string == "return" and i < len(line)-1 and line[i+1].type != TYPE_SEMICOLON):
string += line[i].string+' '
continue
if(line[i].type == TYPE_IDENTIFIER and len(string) > 0 and not string.endswith(' ') and not string.endswith('.') and not string.endswith('(') and not string.endswith('[') and not string.endswith('{') and not string.endswith('!')):
if(string.endswith('-') and not string[0:len(string)-1].endswith(' ')):
string += line[i].string
else:
string += ' '+line[i].string
continue
if(line[i].type == TYPE_SEMICOLON and i < len(line)-1 and line[i+1].type != TYPE_WHITESPACE):
string += line[i].string + ' '
continue
string += line[i].string
if(len(string.strip())==0):
strings.append('')
else:
strings.append(string)
# now reindent the tabs, based on smallest indent possible
counts = []
for string in strings:
count = 0
while(string[count*2:count*2+1] == ' '):
count += 1
counts.append(count)
def reindent(strings,counts,index):
if(strings[index] == ''):
return
count = counts[index]
while(counts[index+1] == count or strings[index+1] == ''):
index += 1
if(index == len(counts)-1):
return
if(counts[index+1] > count+1):
highIndex = index+1
lowIndex = index+1
# we found a 2 tabbing or higher
# now let's check if the next lower one is also my count
while(counts[lowIndex] >= counts[highIndex] or strings[lowIndex] == ''):
lowIndex += 1
if(lowIndex == len(counts)-1):
break
if(counts[lowIndex] <= count):
# fantastic, we can lower the tabs
diff = count - counts[highIndex] + 1
for i in range(highIndex,lowIndex):
counts[i] += diff
for i in range(len(counts)-1):
reindent(strings,counts,i)
for i in range(len(counts)):
count = 0
while(strings[i][count:count+1] == ' '):
count += 1
newCount = counts[i] * 2
strings[i] = strings[i][(count-newCount):100000]
return '\n'.join(strings)
def parseJSFile(fileName):
# get the content
content = open(fileName).read()
tokens = tokenize(content)
string = stringify(tokens)
if(not string.endswith('\n')):
string += '\n'
open(fileName,'w').write(string)
def parseHTMLFile(fileName):
# get the content
lines = open(fileName).read().replace('\t',' ').replace('\r\n','\n').replace('\r','\n').split('\n')
prejscontent = []
jscontent = []
postjscontent = []
insideJS = 0
for line in lines:
stripped = line.lower().strip()
if(insideJS == 0):
if(stripped.startswith('<')):
stripped = stripped[1:10000].strip()
if(stripped.startswith('script') and stripped.find('src')==-1):
insideJS = 1
prejscontent.append(line)
elif(insideJS == 1):
if(stripped.startswith('<')):
insideJS = 2
postjscontent.append(line)
else:
jscontent.append(line)
else:
postjscontent.append(line)
tokens = tokenize('\n'.join(jscontent))
string = stringify(tokens)
string = '\n'.join(prejscontent) + '\n' + string + '\n' + '\n'.join(postjscontent)
open(fileName,'w').write(string)
def main():
if(not sys.argv or len(sys.argv) == 0):
raise(Exception("No files specified!"))
arguments = []
for arg in sys.argv:
arguments.append(arg)
if(len(arguments) <= 1):
print("Run the tool with all paths to beautify!")
return
files = []
for arg in arguments:
if(arg.find('*') != -1):
matched = glob.glob(arg)
for match in matched:
arguments.append(match)
continue
for ft in fileTypes:
if(arg.lower().endswith(ft)):
if(os.path.exists(arg)):
files.append(arg)
break
else:
raise(Exception("The file '"+arg+' does not exist!'))
# parse each file
for i in range(len(files)):
extension = files[i].lower().rpartition('.')[2]
if(extension == 'js' or extension == 'kl'):
parseJSFile(files[i])
elif(extension == 'html' or extension == 'htm'):
parseHTMLFile(files[i])
else:
raise(Exception("Unsupported file format '"+extension+"'!"))
print(str(i+1)+" of "+str(len(files))+" : beautified '"+files[i]+"' successfully.")
if __name__ == '__main__':
main() | agpl-3.0 |
krishna-pandey-git/django | django/contrib/gis/shortcuts.py | 388 | 1209 | import zipfile
from io import BytesIO
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = BytesIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
content_type='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='text/plain')
| bsd-3-clause |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/pip/_vendor/retrying.py | 934 | 9972 | ## Copyright 2013-2014 Ray Holder
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import random
from pip._vendor import six
import sys
import time
import traceback
# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint...
MAX_WAIT = 1073741823
def retry(*dargs, **dkw):
"""
Decorator function that instantiates the Retrying object
@param *dargs: positional arguments passed to Retrying object
@param **dkw: keyword arguments passed to the Retrying object
"""
# support both @retry and @retry() as valid syntax
if len(dargs) == 1 and callable(dargs[0]):
def wrap_simple(f):
@six.wraps(f)
def wrapped_f(*args, **kw):
return Retrying().call(f, *args, **kw)
return wrapped_f
return wrap_simple(dargs[0])
else:
def wrap(f):
@six.wraps(f)
def wrapped_f(*args, **kw):
return Retrying(*dargs, **dkw).call(f, *args, **kw)
return wrapped_f
return wrap
class Retrying(object):
def __init__(self,
stop=None, wait=None,
stop_max_attempt_number=None,
stop_max_delay=None,
wait_fixed=None,
wait_random_min=None, wait_random_max=None,
wait_incrementing_start=None, wait_incrementing_increment=None,
wait_exponential_multiplier=None, wait_exponential_max=None,
retry_on_exception=None,
retry_on_result=None,
wrap_exception=False,
stop_func=None,
wait_func=None,
wait_jitter_max=None):
self._stop_max_attempt_number = 5 if stop_max_attempt_number is None else stop_max_attempt_number
self._stop_max_delay = 100 if stop_max_delay is None else stop_max_delay
self._wait_fixed = 1000 if wait_fixed is None else wait_fixed
self._wait_random_min = 0 if wait_random_min is None else wait_random_min
self._wait_random_max = 1000 if wait_random_max is None else wait_random_max
self._wait_incrementing_start = 0 if wait_incrementing_start is None else wait_incrementing_start
self._wait_incrementing_increment = 100 if wait_incrementing_increment is None else wait_incrementing_increment
self._wait_exponential_multiplier = 1 if wait_exponential_multiplier is None else wait_exponential_multiplier
self._wait_exponential_max = MAX_WAIT if wait_exponential_max is None else wait_exponential_max
self._wait_jitter_max = 0 if wait_jitter_max is None else wait_jitter_max
# TODO add chaining of stop behaviors
# stop behavior
stop_funcs = []
if stop_max_attempt_number is not None:
stop_funcs.append(self.stop_after_attempt)
if stop_max_delay is not None:
stop_funcs.append(self.stop_after_delay)
if stop_func is not None:
self.stop = stop_func
elif stop is None:
self.stop = lambda attempts, delay: any(f(attempts, delay) for f in stop_funcs)
else:
self.stop = getattr(self, stop)
# TODO add chaining of wait behaviors
# wait behavior
wait_funcs = [lambda *args, **kwargs: 0]
if wait_fixed is not None:
wait_funcs.append(self.fixed_sleep)
if wait_random_min is not None or wait_random_max is not None:
wait_funcs.append(self.random_sleep)
if wait_incrementing_start is not None or wait_incrementing_increment is not None:
wait_funcs.append(self.incrementing_sleep)
if wait_exponential_multiplier is not None or wait_exponential_max is not None:
wait_funcs.append(self.exponential_sleep)
if wait_func is not None:
self.wait = wait_func
elif wait is None:
self.wait = lambda attempts, delay: max(f(attempts, delay) for f in wait_funcs)
else:
self.wait = getattr(self, wait)
# retry on exception filter
if retry_on_exception is None:
self._retry_on_exception = self.always_reject
else:
self._retry_on_exception = retry_on_exception
# TODO simplify retrying by Exception types
# retry on result filter
if retry_on_result is None:
self._retry_on_result = self.never_reject
else:
self._retry_on_result = retry_on_result
self._wrap_exception = wrap_exception
def stop_after_attempt(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Stop after the previous attempt >= stop_max_attempt_number."""
return previous_attempt_number >= self._stop_max_attempt_number
def stop_after_delay(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Stop after the time from the first attempt >= stop_max_delay."""
return delay_since_first_attempt_ms >= self._stop_max_delay
def no_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Don't sleep at all before retrying."""
return 0
def fixed_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Sleep a fixed amount of time between each retry."""
return self._wait_fixed
def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Sleep a random amount of time between wait_random_min and wait_random_max"""
return random.randint(self._wait_random_min, self._wait_random_max)
def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""
Sleep an incremental amount of time after each attempt, starting at
wait_incrementing_start and incrementing by wait_incrementing_increment
"""
result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1))
if result < 0:
result = 0
return result
def exponential_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
exp = 2 ** previous_attempt_number
result = self._wait_exponential_multiplier * exp
if result > self._wait_exponential_max:
result = self._wait_exponential_max
if result < 0:
result = 0
return result
def never_reject(self, result):
return False
def always_reject(self, result):
return True
def should_reject(self, attempt):
reject = False
if attempt.has_exception:
reject |= self._retry_on_exception(attempt.value[1])
else:
reject |= self._retry_on_result(attempt.value)
return reject
def call(self, fn, *args, **kwargs):
start_time = int(round(time.time() * 1000))
attempt_number = 1
while True:
try:
attempt = Attempt(fn(*args, **kwargs), attempt_number, False)
except:
tb = sys.exc_info()
attempt = Attempt(tb, attempt_number, True)
if not self.should_reject(attempt):
return attempt.get(self._wrap_exception)
delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time
if self.stop(attempt_number, delay_since_first_attempt_ms):
if not self._wrap_exception and attempt.has_exception:
# get() on an attempt with an exception should cause it to be raised, but raise just in case
raise attempt.get()
else:
raise RetryError(attempt)
else:
sleep = self.wait(attempt_number, delay_since_first_attempt_ms)
if self._wait_jitter_max:
jitter = random.random() * self._wait_jitter_max
sleep = sleep + max(0, jitter)
time.sleep(sleep / 1000.0)
attempt_number += 1
class Attempt(object):
"""
An Attempt encapsulates a call to a target function that may end as a
normal return value from the function or an Exception depending on what
occurred during the execution.
"""
def __init__(self, value, attempt_number, has_exception):
self.value = value
self.attempt_number = attempt_number
self.has_exception = has_exception
def get(self, wrap_exception=False):
"""
Return the return value of this Attempt instance or raise an Exception.
If wrap_exception is true, this Attempt is wrapped inside of a
RetryError before being raised.
"""
if self.has_exception:
if wrap_exception:
raise RetryError(self)
else:
six.reraise(self.value[0], self.value[1], self.value[2])
else:
return self.value
def __repr__(self):
if self.has_exception:
return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2])))
else:
return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value)
class RetryError(Exception):
"""
A RetryError encapsulates the last Attempt instance right before giving up.
"""
def __init__(self, last_attempt):
self.last_attempt = last_attempt
def __str__(self):
return "RetryError[{0}]".format(self.last_attempt)
| gpl-3.0 |
aasiutin/electrum | gui/qt/qrtextedit.py | 12 | 1913 | from electrum.i18n import _
from electrum.plugins import run_hook
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from util import ButtonsTextEdit, MessageBoxMixin
class ShowQRTextEdit(ButtonsTextEdit):
def __init__(self, text=None):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(1)
self.addButton(":icons/qrcode.png", self.qr_show, _("Show as QR code"))
run_hook('show_text_edit', self)
def qr_show(self):
from qrcodewidget import QRDialog
try:
s = str(self.toPlainText())
except:
s = unicode(self.toPlainText())
QRDialog(s).exec_()
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Show as QR code"), self.qr_show)
m.exec_(e.globalPos())
class ScanQRTextEdit(ButtonsTextEdit, MessageBoxMixin):
def __init__(self, text=""):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(0)
self.addButton(":icons/file.png", self.file_input, _("Read file"))
self.addButton(":icons/qrcode.png", self.qr_input, _("Read QR code"))
run_hook('scan_text_edit', self)
def file_input(self):
fileName = unicode(QFileDialog.getOpenFileName(self, 'select file'))
if not fileName:
return
with open(fileName, "r") as f:
data = f.read()
self.setText(data)
def qr_input(self):
from electrum import qrscanner, get_config
try:
data = qrscanner.scan_qr(get_config())
except BaseException as e:
self.show_error(str(e))
return ""
if type(data) != str:
return
self.setText(data)
return data
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Read QR code"), self.qr_input)
m.exec_(e.globalPos())
| mit |
ahupowerdns/pdns | regression-tests.api/test_Servers.py | 13 | 1661 | from test_helper import ApiTestCase, is_auth, is_recursor
class Servers(ApiTestCase):
def test_list_servers(self):
r = self.session.get(self.url("/api/v1/servers"))
self.assert_success_json(r)
lst = r.json()
self.assertEquals(len(lst), 1) # only localhost allowed in there
data = lst[0]
for k in ('id', 'daemon_type', 'url'):
self.assertIn(k, data)
self.assertEquals(data['id'], 'localhost')
def test_servers_localhost(self):
r = self.session.get(self.url("/api/v1/servers/localhost"))
self.assert_success_json(r)
data = r.json()
for k in ('id', 'type', 'version', 'daemon_type', 'url', 'zones_url', 'config_url'):
self.assertIn(k, data)
self.assertEquals(data['id'], 'localhost')
self.assertEquals(data['type'], 'Server')
# or 'recursor' for recursors
if is_auth():
daemon_type = 'authoritative'
elif is_recursor():
daemon_type = 'recursor'
else:
raise RuntimeError('Unknown daemon type')
self.assertEquals(data['daemon_type'], daemon_type)
def test_read_config(self):
r = self.session.get(self.url("/api/v1/servers/localhost/config"))
self.assert_success_json(r)
data = dict([(r['name'], r['value']) for r in r.json()])
self.assertIn('daemon', data)
def test_read_statistics(self):
r = self.session.get(self.url("/api/v1/servers/localhost/statistics"))
self.assert_success_json(r)
data = dict([(r['name'], r['value']) for r in r.json()])
self.assertIn('uptime', data)
| gpl-2.0 |
Lujeni/ansible | lib/ansible/modules/cloud/vmware/vmware_guest.py | 1 | 150199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_guest
short_description: Manages virtual machines in vCenter
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
modify various virtual machine components like network, disk, customization etc.,
rename a virtual machine and remove a virtual machine with associated components.
version_added: '2.2'
author:
- Loic Blot (@nerzhul) <[email protected]>
- Philippe Dellaert (@pdellaert) <[email protected]>
- Abhijeet Kasurde (@Akasurde) <[email protected]>
requirements:
- python >= 2.6
- PyVmomi
notes:
- Please make sure that the user used for vmware_guest has the correct level of privileges.
- For example, following is the list of minimum privileges required by users to create virtual machines.
- " DataStore > Allocate Space"
- " Virtual Machine > Configuration > Add New Disk"
- " Virtual Machine > Configuration > Add or Remove Device"
- " Virtual Machine > Inventory > Create New"
- " Network > Assign Network"
- " Resource > Assign Virtual Machine to Resource Pool"
- "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
- Tested on vSphere 5.5, 6.0, 6.5 and 6.7
- Use SCSI disks instead of IDE when you want to expand online disks by specifying a SCSI controller
- Uses SysPrep for Windows VM (depends on 'guest_id' parameter match 'win') with PyVmomi
- "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
options:
state:
description:
- Specify the state the virtual machine should be in.
- 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
configurations conforms to task arguments.'
- 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
is removed with its associated components.'
- 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists, then virtual machine is deployed with given parameters.'
- 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
then the specified virtual machine is powered on.'
- 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
then the specified virtual machine is powered off.'
- 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
- 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
- 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
- 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
default: present
choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
- 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
identify uniqueness of the virtual machine.'
- This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists.
- This parameter is case sensitive.
required: yes
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: [ first, last ]
uuid:
description:
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
- This is required if C(name) is not supplied.
- If virtual machine does not exists, then this parameter is ignored.
- Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
template:
description:
- Template or existing virtual machine used to create new virtual machine.
- If this value is not set, virtual machine is created without using a template.
- If the virtual machine already exists, this parameter will be ignored.
- This parameter is case sensitive.
- You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value.
- From version 2.8 onwards, absolute path to virtual machine or template can be used.
aliases: [ 'template_src' ]
is_template:
description:
- Flag the instance as a template.
- This will mark the given virtual machine as template.
default: 'no'
type: bool
version_added: '2.3'
folder:
description:
- Destination folder, absolute path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- This parameter is required, while deploying new virtual machine. version_added 2.5.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
hardware:
description:
- Manage virtual machine's hardware attributes.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
- ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
version_added: 2.5'
- ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
- ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket.'
- " C(num_cpus) must be a multiple of C(num_cpu_cores_per_socket).
For example to create a VM with 2 sockets of 4 cores, specify C(num_cpus): 8 and C(num_cpu_cores_per_socket): 4"
- ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
- " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
will always be equal to the virtual machine's memory size. version_added: 2.5"
- ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
version_added: 2.5.'
- ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
version_added: 2.5'
- ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
machine. Unit is MB. C(memory_reservation) is alias to this. version_added: 2.5'
- ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
version_added: 2.5'
- ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
Unit is MHz. version_added: 2.5'
- ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
If value specified as C(latest), version is set to the most current virtual hardware supported on the host.
C(latest) is added in version 2.10.
Please check VMware documentation for correct virtual machine hardware version.
Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
version then no action is taken. version_added: 2.6'
- ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine.
Allowed values are "bios" and "efi". version_added: 2.7'
- ' - C(virt_based_security) (bool): Enable Virtualization Based Security feature for Windows 10.
(Support from Virtual machine hardware version 14, Guest OS Windows 10 64 bit, Windows Server 2016)'
guest_id:
description:
- Set the guest ID.
- This parameter is case sensitive.
- 'Examples:'
- " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
- " virtual machine with CentOS 64 bit, will be 'centos64Guest'"
- " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
- This field is required when creating a virtual machine, not required when creating from the template.
- >
Valid values are referenced here:
U(https://code.vmware.com/apis/358/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
version_added: '2.3'
disk:
description:
- A list of disks to add.
- This parameter is case sensitive.
- Shrinking disks is not supported.
- Removing existing disks of the virtual machine is not supported.
- 'Valid attributes are:'
- ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
- ' - C(type) (string): Valid values are:'
- ' - C(thin) thin disk'
- ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
- ' Default: C(None) thick disk, no eagerzero.'
- ' - C(datastore) (string): The name of datastore which will be used for the disk. If C(autoselect_datastore) is set to True,
then will select the less used datastore whose name contains this "disk.datastore" string.'
- ' - C(filename) (string): Existing disk image to be used. Filename must already exist on the datastore.'
- ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.8.'
- ' - C(autoselect_datastore) (bool): select the less used datastore. "disk.datastore" and "disk.autoselect_datastore"
will not be used if C(datastore) is specified outside this C(disk) configuration.'
- ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6'
- ' - Available options are :'
- ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.'
- ' - C(independent_persistent): Same as persistent, but not affected by snapshots.'
- ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
cdrom:
description:
- A CD-ROM configuration for the virtual machine.
- Or a list of CD-ROMs configuration for the virtual machine. Added in version 2.9.
- 'Parameters C(controller_type), C(controller_number), C(unit_number), C(state) are added for a list of CD-ROMs
configuration support.'
- 'Valid attributes are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM
will be disconnected but present.'
- ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso).
Required if type is set C(iso).'
- ' - C(controller_type) (string): Default value is C(ide). Only C(ide) controller type for CD-ROM is supported for
now, will add SATA controller type in the future.'
- ' - C(controller_number) (int): For C(ide) controller, valid value is 0 or 1.'
- ' - C(unit_number) (int): For CD-ROM device attach to C(ide) controller, valid value is 0 or 1.
C(controller_number) and C(unit_number) are mandatory attributes.'
- ' - C(state) (string): Valid value is C(present) or C(absent). Default is C(present). If set to C(absent), then
the specified CD-ROM will be removed. For C(ide) controller, hot-add or hot-remove CD-ROM is not supported.'
version_added: '2.5'
resource_pool:
description:
- Use the given resource pool for virtual machine operation.
- This parameter is case sensitive.
- Resource pool should be child of the selected host parent.
version_added: '2.3'
wait_for_ip_address:
description:
- Wait until vCenter detects an IP address for the virtual machine.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
- "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
default: 'no'
type: bool
wait_for_ip_address_timeout:
description:
- Define a timeout (in seconds) for the wait_for_ip_address parameter.
default: '300'
type: int
version_added: '2.10'
wait_for_customization_timeout:
description:
- Define a timeout (in seconds) for the wait_for_customization parameter.
- Be careful when setting this value since the time guest customization took may differ among guest OSes.
default: '3600'
type: int
version_added: '2.10'
wait_for_customization:
description:
- Wait until vCenter detects all guest customizations as successfully completed.
- When enabled, the VM will automatically be powered on.
- "If vCenter does not detect guest customization start or succeed, failed events after time
C(wait_for_customization_timeout) parameter specified, warning message will be printed and task result is fail."
default: 'no'
type: bool
version_added: '2.8'
state_change_timeout:
description:
- If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
- If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
- The value sets a timeout in seconds for the module to wait for the state change.
default: 0
version_added: '2.6'
snapshot_src:
description:
- Name of the existing snapshot to use to create a clone of a virtual machine.
- This parameter is case sensitive.
- While creating linked clone using C(linked_clone) parameter, this parameter is required.
version_added: '2.4'
linked_clone:
description:
- Whether to create a linked clone from the snapshot specified.
- If specified, then C(snapshot_src) is required parameter.
default: 'no'
type: bool
version_added: '2.4'
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful while removing virtual machine which is powered on state.
- 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
default: 'no'
type: bool
delete_from_inventory:
description:
- Whether to delete Virtual machine from inventory or delete from disk.
default: False
type: bool
version_added: '2.10'
datacenter:
description:
- Destination datacenter for the deploy operation.
- This parameter is case sensitive.
default: ha-datacenter
cluster:
description:
- The cluster name where the virtual machine will run.
- This is a required parameter, if C(esxi_hostname) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
version_added: '2.3'
esxi_hostname:
description:
- The ESXi hostname where the virtual machine will run.
- This is a required parameter, if C(cluster) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
annotation:
description:
- A note or annotation to include in the virtual machine.
version_added: '2.3'
customvalues:
description:
- Define a list of custom values to set on virtual machine.
- A custom value object takes two fields C(key) and C(value).
- Incorrect key and values will be ignored.
version_added: '2.3'
networks:
description:
- A list of networks (in the order of the NICs).
- Removing NICs is not allowed, while reconfiguring the virtual machine.
- All parameters and VMware object names are case sensitive.
- 'One of the below parameters is required per entry:'
- ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
- ' - C(vlan) (integer): VLAN number for this interface.'
- 'Optional parameters per entry (used for virtual hardware):'
- ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
- ' - C(mac) (string): Customize MAC address.'
- ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
This value is required if multiple distributed portgroups exists with the same name. version_added 2.7'
- ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
- ' - C(ip) (string): Static IP address (implies C(type: static)).'
- ' - C(netmask) (string): Static netmask required for C(ip).'
- ' - C(gateway) (string): Static gateway.'
- ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
- ' - C(domain) (string): Domain name for this network interface (Windows).'
- ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
- ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
version_added: '2.3'
customization:
description:
- Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly.
- Not all operating systems are supported for customization with respective vCenter version,
please check VMware documentation for respective OS customization.
- For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
- All parameters and VMware object names are case sensitive.
- Linux based OSes requires Perl package to be installed for OS customizations.
- 'Common parameters (Linux/Windows):'
- ' - C(existing_vm) (bool): If set to C(True), do OS customization on the specified virtual machine directly.
If set to C(False) or not specified, do OS customization when cloning from the template or the virtual machine. version_added: 2.8'
- ' - C(dns_servers) (list): List of DNS servers to configure.'
- ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
- ' - C(domain) (string): DNS domain name to use.'
- ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
and minus, rest of the characters are dropped as per RFC 952.'
- 'Parameters related to Linux customization:'
- ' - C(timezone) (string): Timezone (See List of supported time zones for different vSphere versions in Linux/Unix
systems (2145518) U(https://kb.vmware.com/s/article/2145518)). version_added: 2.9'
- ' - C(hwclockUTC) (bool): Specifies whether the hardware clock is in UTC or local time.
True when the hardware clock is in UTC, False when the hardware clock is in local time. version_added: 2.9'
- 'Parameters related to Windows customization:'
- ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
- ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
- ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(fullname) (string): Server owner name (default: Administrator).'
- ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
- ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
- ' - C(orgname) (string): Organisation name (default: ACME).'
- ' - C(password) (string): Local administrator password.'
- ' - C(productid) (string): Product ID.'
- ' - C(runonce) (list): List of commands to run at first user logon.'
- ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
version_added: '2.3'
vapp_properties:
description:
- A list of vApp properties
- 'For full list of attributes and types refer to: U(https://github.com/vmware/pyvmomi/blob/master/docs/vim/vApp/PropertyInfo.rst)'
- 'Basic attributes are:'
- ' - C(id) (string): Property id - required.'
- ' - C(value) (string): Property value.'
- ' - C(type) (string): Value type, string type by default.'
- ' - C(operation): C(remove): This attribute is required only when removing properties.'
version_added: '2.6'
customization_spec:
description:
- Unique name identifying the requested customization specification.
- This parameter is case sensitive.
- If set, then overrides C(customization) parameter values.
version_added: '2.6'
datastore:
description:
- Specify datastore or datastore cluster to provision virtual machine.
- 'This parameter takes precedence over "disk.datastore" parameter.'
- 'This parameter can be used to override datastore or datastore cluster setting of the virtual machine when deployed
from the template.'
- Please see example for more usage.
version_added: '2.7'
convert:
description:
- Specify convert disk type while cloning template or virtual machine.
choices: [ thin, thick, eagerzeroedthick ]
version_added: '2.8'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create a virtual machine on given ESXi hostname
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /DC1/vm/
name: test_vm_0001
state: poweredon
guest_id: centos64Guest
# This is hostname of particular ESXi server on which user wants VM to be deployed
esxi_hostname: "{{ esxi_hostname }}"
disk:
- size_gb: 10
type: thin
datastore: datastore1
hardware:
memory_mb: 512
num_cpus: 4
scsi: paravirtual
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
ip: 10.10.10.100
netmask: 255.255.255.0
device_type: vmxnet3
wait_for_ip_address: yes
wait_for_ip_address_timeout: 600
delegate_to: localhost
register: deploy_vm
- name: Create a virtual machine from a template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /testvms
name: testvm_2
state: poweredon
template: template_el7
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
# Add another disk from an existing VMDK
- filename: "[datastore1] testvms/testvm_2_1/testvm_2_1.vmdk"
hardware:
memory_mb: 512
num_cpus: 6
num_cpu_cores_per_socket: 3
scsi: paravirtual
memory_reservation_lock: True
mem_limit: 8096
mem_reservation: 4096
cpu_limit: 8096
cpu_reservation: 4096
max_connections: 5
hotadd_cpu: True
hotremove_cpu: True
hotadd_memory: False
version: 12 # Hardware version of virtual machine
boot_firmware: "efi"
cdrom:
type: iso
iso_path: "[datastore1] livecd.iso"
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Clone a virtual machine from Windows template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: datacenter1
cluster: cluster
name: testvm-2
template: template_windows
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
gateway: 192.168.1.1
mac: aa:bb:dd:aa:00:14
domain: my_domain
dns_servers:
- 192.168.1.1
- 192.168.1.2
- vlan: 1234
type: dhcp
customization:
autologon: yes
dns_servers:
- 192.168.1.1
- 192.168.1.2
domain: my_domain
password: new_vm_password
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Clone a virtual machine from Linux template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter }}"
state: present
folder: /DC1/vm
template: "{{ template }}"
name: "{{ vm_name }}"
cluster: DC1_C1
networks:
- name: VM Network
ip: 192.168.10.11
netmask: 255.255.255.0
wait_for_ip_address: True
customization:
domain: "{{ guest_domain }}"
dns_servers:
- 8.9.9.9
- 7.8.8.9
dns_suffix:
- example.com
- example2.com
delegate_to: localhost
- name: Rename a virtual machine (requires the virtual machine's uuid)
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
name: new_name
state: present
delegate_to: localhost
- name: Remove a virtual machine by uuid
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: absent
delegate_to: localhost
- name: Remove a virtual machine from inventory
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: vm_name
delete_from_inventory: True
state: absent
delegate_to: localhost
- name: Manipulate vApp properties
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: vm_name
state: present
vapp_properties:
- id: remoteIP
category: Backup
label: Backup server IP
type: str
value: 10.10.10.1
- id: old_property
operation: remove
delegate_to: localhost
- name: Set powerstate of a virtual machine to poweroff by using UUID
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: poweredoff
delegate_to: localhost
- name: Deploy a virtual machine in a datastore different from the datastore of the template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: "{{ vm_name }}"
state: present
template: "{{ template_name }}"
# Here datastore can be different which holds template
datastore: "{{ virtual_machine_datastore }}"
hardware:
memory_mb: 512
num_cpus: 2
scsi: paravirtual
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import re
import time
import string
HAS_PYVMOMI = False
try:
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
pass
from random import randint
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.network import is_mac
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
compile_folder_path_for_object, serialize_spec,
vmware_argument_spec, set_vm_power_state, PyVmomi,
find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip,
wait_for_task, TaskError, quote_obj_name)
def list_or_dict(value):
if isinstance(value, list) or isinstance(value, dict):
return value
else:
raise ValueError("'%s' is not valid, valid type is 'list' or 'dict'." % value)
class PyVmomiDeviceHelper(object):
""" This class is a helper to create easily VMware Objects for PyVmomiHelper """
def __init__(self, module):
self.module = module
self.next_disk_unit_number = 0
self.scsi_device_type = {
'lsilogic': vim.vm.device.VirtualLsiLogicController,
'paravirtual': vim.vm.device.ParaVirtualSCSIController,
'buslogic': vim.vm.device.VirtualBusLogicController,
'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController,
}
def create_scsi_controller(self, scsi_type):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
scsi_ctl.device = scsi_device()
scsi_ctl.device.busNumber = 0
# While creating a new SCSI controller, temporary key value
# should be unique negative integers
scsi_ctl.device.key = -randint(1000, 9999)
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
def is_scsi_controller(self, device):
return isinstance(device, tuple(self.scsi_device_type.values()))
@staticmethod
def create_ide_controller(bus_number=0):
ide_ctl = vim.vm.device.VirtualDeviceSpec()
ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_ctl.device = vim.vm.device.VirtualIDEController()
ide_ctl.device.deviceInfo = vim.Description()
# While creating a new IDE controller, temporary key value
# should be unique negative integers
ide_ctl.device.key = -randint(200, 299)
ide_ctl.device.busNumber = bus_number
return ide_ctl
@staticmethod
def create_cdrom(ide_device, cdrom_type, iso_path=None, unit_number=0):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
cdrom_spec.device = vim.vm.device.VirtualCdrom()
cdrom_spec.device.controllerKey = ide_device.key
cdrom_spec.device.key = -randint(3000, 3999)
cdrom_spec.device.unitNumber = unit_number
cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_spec.device.connectable.allowGuestControl = True
cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
if cdrom_type in ["none", "client"]:
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_type == "iso":
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
return cdrom_spec
@staticmethod
def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
if cdrom_type == "none":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
not cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
elif cdrom_type == "client":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
elif cdrom_type == "iso":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
cdrom_device.backing.fileName == iso_path and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
@staticmethod
def update_cdrom_config(vm_obj, cdrom_spec, cdrom_device, iso_path=None):
# Updating an existing CD-ROM
if cdrom_spec["type"] in ["client", "none"]:
cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_spec["type"] == "iso" and iso_path is not None:
cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_device.connectable.allowGuestControl = True
cdrom_device.connectable.startConnected = (cdrom_spec["type"] != "none")
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_device.connectable.connected = (cdrom_spec["type"] != "none")
def remove_cdrom(self, cdrom_device):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
cdrom_spec.device = cdrom_device
return cdrom_spec
def create_scsi_disk(self, scsi_ctl, disk_index=None):
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.device = vim.vm.device.VirtualDisk()
diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskspec.device.controllerKey = scsi_ctl.device.key
if self.next_disk_unit_number == 7:
raise AssertionError()
if disk_index == 7:
raise AssertionError()
"""
Configure disk unit number.
"""
if disk_index is not None:
diskspec.device.unitNumber = disk_index
self.next_disk_unit_number = disk_index + 1
else:
diskspec.device.unitNumber = self.next_disk_unit_number
self.next_disk_unit_number += 1
# unit number 7 is reserved to SCSI controller, increase next index
if self.next_disk_unit_number == 7:
self.next_disk_unit_number += 1
return diskspec
def get_device(self, device_type, name):
nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
vmxnet2=vim.vm.device.VirtualVmxnet2(),
vmxnet3=vim.vm.device.VirtualVmxnet3(),
e1000=vim.vm.device.VirtualE1000(),
e1000e=vim.vm.device.VirtualE1000e(),
sriov=vim.vm.device.VirtualSriovEthernetCard(),
)
if device_type in nic_dict:
return nic_dict[device_type]
else:
self.module.fail_json(msg='Invalid device_type "%s"'
' for network "%s"' % (device_type, name))
def create_nic(self, device_type, device_label, device_infos):
nic = vim.vm.device.VirtualDeviceSpec()
nic.device = self.get_device(device_type, device_infos['name'])
nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = device_label
nic.device.deviceInfo.summary = device_infos['name']
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
nic.device.connectable.connected = True
if 'mac' in device_infos and is_mac(device_infos['mac']):
nic.device.addressType = 'manual'
nic.device.macAddress = device_infos['mac']
else:
nic.device.addressType = 'generated'
return nic
def integer_value(self, input_value, name):
"""
Function to return int value for given input, else return error
Args:
input_value: Input value to retrieve int value from
name: Name of the Input value (used to build error message)
Returns: (int) if integer value can be obtained, otherwise will send a error message.
"""
if isinstance(input_value, int):
return input_value
elif isinstance(input_value, str) and input_value.isdigit():
return int(input_value)
else:
self.module.fail_json(msg='"%s" attribute should be an'
' integer value.' % name)
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
self.networks = {}
self.clusters = {}
self.esx_hosts = {}
self.parent_datacenters = {}
def find_obj(self, content, types, name, confine_to_datacenter=True):
""" Wrapper around find_obj to set datacenter context """
result = find_obj(content, types, name)
if result and confine_to_datacenter:
if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
result = None
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
for obj in objects:
if name is None or to_text(obj.name) == to_text(name):
return obj
return result
def get_all_objs(self, content, types, confine_to_datacenter=True):
""" Wrapper around get_all_objs to set datacenter context """
objects = get_all_objs(content, types)
if confine_to_datacenter:
if hasattr(objects, 'items'):
# resource pools come back as a dictionary
# make a copy
for k, v in tuple(objects.items()):
parent_dc = self.get_parent_datacenter(k)
if parent_dc.name != self.dc_name:
del objects[k]
else:
# everything else should be a list
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
return objects
def get_network(self, network):
network = quote_obj_name(network)
if network not in self.networks:
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
return self.networks[network]
def get_cluster(self, cluster):
if cluster not in self.clusters:
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
return self.clusters[cluster]
def get_esx_host(self, host):
if host not in self.esx_hosts:
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
return self.esx_hosts[host]
def get_parent_datacenter(self, obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
if obj in self.parent_datacenters:
return self.parent_datacenters[obj]
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
self.parent_datacenters[obj] = datacenter
return datacenter
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.device_helper = PyVmomiDeviceHelper(self.module)
self.configspec = None
self.relospec = None
self.change_detected = False # a change was detected and needs to be applied through reconfiguration
self.change_applied = False # a change was applied meaning at least one task succeeded
self.customspec = None
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def remove_vm(self, vm, delete_from_inventory=False):
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
if vm.summary.runtime.powerState.lower() == 'poweredon':
self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
"please use 'force' parameter to remove or poweroff VM "
"and try removing VM again." % vm.name)
# Delete VM from Inventory
if delete_from_inventory:
try:
vm.UnregisterVM()
except (vim.fault.TaskInProgress,
vmodl.RuntimeFault) as e:
return {'changed': self.change_applied, 'failed': True, 'msg': e.msg, 'op': 'UnregisterVM'}
self.change_applied = True
return {'changed': self.change_applied, 'failed': False}
# Delete VM from Disk
task = vm.Destroy()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'}
else:
return {'changed': self.change_applied, 'failed': False}
def configure_guestid(self, vm_obj, vm_creation=False):
# guest_id is not required when using templates
if self.params['template']:
return
# guest_id is only mandatory on VM creation
if vm_creation and self.params['guest_id'] is None:
self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
if self.params['guest_id'] and \
(vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
self.change_detected = True
self.configspec.guestId = self.params['guest_id']
def configure_resource_alloc_info(self, vm_obj):
"""
Function to configure resource allocation information about virtual machine
:param vm_obj: VM object in case of reconfigure, None in case of deploy
:return: None
"""
rai_change_detected = False
memory_allocation = vim.ResourceAllocationInfo()
cpu_allocation = vim.ResourceAllocationInfo()
if 'hardware' in self.params:
if 'mem_limit' in self.params['hardware']:
mem_limit = None
try:
mem_limit = int(self.params['hardware'].get('mem_limit'))
except ValueError:
self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
memory_allocation.limit = mem_limit
if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
rai_change_detected = True
if 'mem_reservation' in self.params['hardware'] or 'memory_reservation' in self.params['hardware']:
mem_reservation = self.params['hardware'].get('mem_reservation')
if mem_reservation is None:
mem_reservation = self.params['hardware'].get('memory_reservation')
try:
mem_reservation = int(mem_reservation)
except ValueError:
self.module.fail_json(msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value.")
memory_allocation.reservation = mem_reservation
if vm_obj is None or \
memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
rai_change_detected = True
if 'cpu_limit' in self.params['hardware']:
cpu_limit = None
try:
cpu_limit = int(self.params['hardware'].get('cpu_limit'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
cpu_allocation.limit = cpu_limit
if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
rai_change_detected = True
if 'cpu_reservation' in self.params['hardware']:
cpu_reservation = None
try:
cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
cpu_allocation.reservation = cpu_reservation
if vm_obj is None or \
cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
rai_change_detected = True
if rai_change_detected:
self.configspec.memoryAllocation = memory_allocation
self.configspec.cpuAllocation = cpu_allocation
self.change_detected = True
def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
# set cpu/memory/etc
if 'hardware' in self.params:
if 'num_cpus' in self.params['hardware']:
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
# check VM power state and cpu hot-add/hot-remove state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, "
"cpuHotRemove is not enabled")
if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, "
"cpuHotAdd is not enabled")
if 'num_cpu_cores_per_socket' in self.params['hardware']:
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
"should be an integer value.")
if num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
"of hardware.num_cpu_cores_per_socket")
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
self.change_detected = True
self.configspec.numCPUs = num_cpus
if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
self.change_detected = True
# num_cpu is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
if 'memory_mb' in self.params['hardware']:
try:
memory_mb = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
" Please refer the documentation and provide"
" correct value.")
# check VM power state and memory hotadd state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="Configured memory is less than memory size of the VM, "
"operation is not supported")
elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="memoryHotAdd is not enabled")
self.configspec.memoryMB = memory_mb
if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
self.change_detected = True
# memory_mb is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
if 'hotadd_memory' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']):
self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on")
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
self.change_detected = True
if 'hotadd_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']):
self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on")
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
self.change_detected = True
if 'hotremove_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']):
self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on")
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
self.change_detected = True
if 'memory_reservation_lock' in self.params['hardware']:
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
self.change_detected = True
if 'boot_firmware' in self.params['hardware']:
# boot firmware re-config can cause boot issue
if vm_obj is not None:
return
boot_firmware = self.params['hardware']['boot_firmware'].lower()
if boot_firmware not in ('bios', 'efi'):
self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]."
" Need one of ['bios', 'efi']." % boot_firmware)
self.configspec.firmware = boot_firmware
self.change_detected = True
def sanitize_cdrom_params(self):
# cdroms {'ide': [{num: 0, cdrom: []}, {}], 'sata': [{num: 0, cdrom: []}, {}, ...]}
cdroms = {'ide': [], 'sata': []}
expected_cdrom_spec = self.params.get('cdrom')
if expected_cdrom_spec:
for cdrom_spec in expected_cdrom_spec:
cdrom_spec['controller_type'] = cdrom_spec.get('controller_type', 'ide').lower()
if cdrom_spec['controller_type'] not in ['ide', 'sata']:
self.module.fail_json(msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'."
% cdrom_spec['controller_type'])
cdrom_spec['state'] = cdrom_spec.get('state', 'present').lower()
if cdrom_spec['state'] not in ['present', 'absent']:
self.module.fail_json(msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'."
% cdrom_spec['state'])
if cdrom_spec['state'] == 'present':
if 'type' in cdrom_spec and cdrom_spec.get('type') not in ['none', 'client', 'iso']:
self.module.fail_json(msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'."
% cdrom_spec.get('type'))
if cdrom_spec.get('type') == 'iso' and not cdrom_spec.get('iso_path'):
self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
if cdrom_spec['controller_type'] == 'ide' and \
(cdrom_spec.get('controller_number') not in [0, 1] or cdrom_spec.get('unit_number') not in [0, 1]):
self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid"
" values are 0 or 1 for IDE controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
if cdrom_spec['controller_type'] == 'sata' and \
(cdrom_spec.get('controller_number') not in range(0, 4) or cdrom_spec.get('unit_number') not in range(0, 30)):
self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s,"
" valid controller_number value is 0-3, valid unit_number is 0-29"
" for SATA controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
ctl_exist = False
for exist_spec in cdroms.get(cdrom_spec['controller_type']):
if exist_spec['num'] == cdrom_spec['controller_number']:
ctl_exist = True
exist_spec['cdrom'].append(cdrom_spec)
break
if not ctl_exist:
cdroms.get(cdrom_spec['controller_type']).append({'num': cdrom_spec['controller_number'], 'cdrom': [cdrom_spec]})
return cdroms
def configure_cdrom(self, vm_obj):
# Configure the VM CD-ROM
if self.params.get('cdrom'):
if vm_obj and vm_obj.config.template:
# Changing CD-ROM settings on a template is not supported
return
if isinstance(self.params.get('cdrom'), dict):
self.configure_cdrom_dict(vm_obj)
elif isinstance(self.params.get('cdrom'), list):
self.configure_cdrom_list(vm_obj)
def configure_cdrom_dict(self, vm_obj):
if self.params["cdrom"].get('type') not in ['none', 'client', 'iso']:
self.module.fail_json(msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'.")
if self.params["cdrom"]['type'] == 'iso' and not self.params["cdrom"].get('iso_path'):
self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
cdrom_spec = None
cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
iso_path = self.params["cdrom"].get("iso_path")
if len(cdrom_devices) == 0:
# Creating new CD-ROM
ide_devices = self.get_vm_ide_devices(vm=vm_obj)
if len(ide_devices) == 0:
# Creating new IDE device
ide_ctl = self.device_helper.create_ide_controller()
ide_device = ide_ctl.device
self.change_detected = True
self.configspec.deviceChange.append(ide_ctl)
else:
ide_device = ide_devices[0]
if len(ide_device.device) > 3:
self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4"
" IDE devices of which none are a cdrom")
cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=self.params["cdrom"]["type"],
iso_path=iso_path)
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_devices[0],
cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
self.device_helper.update_cdrom_config(vm_obj, self.params["cdrom"], cdrom_devices[0], iso_path=iso_path)
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_devices[0]
if cdrom_spec:
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
def configure_cdrom_list(self, vm_obj):
configured_cdroms = self.sanitize_cdrom_params()
cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
# configure IDE CD-ROMs
if configured_cdroms['ide']:
ide_devices = self.get_vm_ide_devices(vm=vm_obj)
for expected_cdrom_spec in configured_cdroms['ide']:
ide_device = None
for device in ide_devices:
if device.busNumber == expected_cdrom_spec['num']:
ide_device = device
break
# if not find the matched ide controller or no existing ide controller
if not ide_device:
ide_ctl = self.device_helper.create_ide_controller(bus_number=expected_cdrom_spec['num'])
ide_device = ide_ctl.device
self.change_detected = True
self.configspec.deviceChange.append(ide_ctl)
for cdrom in expected_cdrom_spec['cdrom']:
cdrom_device = None
iso_path = cdrom.get('iso_path')
unit_number = cdrom.get('unit_number')
for target_cdrom in cdrom_devices:
if target_cdrom.controllerKey == ide_device.key and target_cdrom.unitNumber == unit_number:
cdrom_device = target_cdrom
break
# create new CD-ROM
if not cdrom_device and cdrom.get('state') != 'absent':
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-add.')
if len(ide_device.device) == 2:
self.module.fail_json(msg='Maximum number of CD-ROMs attached to IDE controller is 2.')
cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=cdrom['type'],
iso_path=iso_path, unit_number=unit_number)
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# re-configure CD-ROM
elif cdrom_device and cdrom.get('state') != 'absent' and \
not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device,
cdrom_type=cdrom['type'], iso_path=iso_path):
self.device_helper.update_cdrom_config(vm_obj, cdrom, cdrom_device, iso_path=iso_path)
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_device
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# delete CD-ROM
elif cdrom_device and cdrom.get('state') == 'absent':
if vm_obj and vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-remove.')
cdrom_spec = self.device_helper.remove_cdrom(cdrom_device)
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# configure SATA CD-ROMs is not supported yet
if configured_cdroms['sata']:
pass
def configure_hardware_params(self, vm_obj):
"""
Function to configure hardware related configuration of virtual machine
Args:
vm_obj: virtual machine object
"""
if 'hardware' in self.params:
if 'max_connections' in self.params['hardware']:
# maxMksConnections == max_connections
self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.maxMksConnections:
self.change_detected = True
if 'nested_virt' in self.params['hardware']:
self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
self.change_detected = True
if 'version' in self.params['hardware']:
hw_version_check_failed = False
temp_version = self.params['hardware'].get('version', 10)
if isinstance(temp_version, str) and temp_version.lower() == 'latest':
# Check is to make sure vm_obj is not of type template
if vm_obj and not vm_obj.config.template:
try:
task = vm_obj.UpgradeVM_Task()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
else:
try:
temp_version = int(temp_version)
except ValueError:
hw_version_check_failed = True
if temp_version not in range(3, 16):
hw_version_check_failed = True
if hw_version_check_failed:
self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
" values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
# Hardware version is denoted as "vmx-10"
version = "vmx-%02d" % temp_version
self.configspec.version = version
if vm_obj is None or self.configspec.version != vm_obj.config.version:
self.change_detected = True
# Check is to make sure vm_obj is not of type template
if vm_obj and not vm_obj.config.template:
# VM exists and we need to update the hardware version
current_version = vm_obj.config.version
# current_version = "vmx-10"
version_digit = int(current_version.split("-", 1)[-1])
if temp_version < version_digit:
self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
" version '%d'. Downgrading hardware version is"
" not supported. Please specify version greater"
" than the current version." % (version_digit,
temp_version))
new_version = "vmx-%02d" % temp_version
try:
task = vm_obj.UpgradeVM_Task(new_version)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
if 'virt_based_security' in self.params['hardware']:
host_version = self.select_host().summary.config.product.version
if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7):
self.module.fail_json(msg="ESXi version %s not support VBS." % host_version)
guest_ids = ['windows9_64Guest', 'windows9Server64Guest']
if vm_obj is None:
guestid = self.configspec.guestId
else:
guestid = vm_obj.summary.config.guestId
if guestid not in guest_ids:
self.module.fail_json(msg="Guest '%s' not support VBS." % guestid)
if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \
(vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)):
self.configspec.flags = vim.vm.FlagInfo()
self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security'])
if bool(self.params['hardware']['virt_based_security']):
self.configspec.flags.vvtdEnabled = True
self.configspec.nestedHVEnabled = True
if (vm_obj is None and self.configspec.firmware == 'efi') or \
(vm_obj and vm_obj.config.firmware == 'efi'):
self.configspec.bootOptions = vim.vm.BootOptions()
self.configspec.bootOptions.efiSecureBootEnabled = True
else:
self.module.fail_json(msg="Not support VBS when firmware is BIOS.")
if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled:
self.change_detected = True
def get_device_by_type(self, vm=None, type=None):
device_list = []
if vm is None or type is None:
return device_list
for device in vm.config.hardware.device:
if isinstance(device, type):
device_list.append(device)
return device_list
def get_vm_cdrom_devices(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
def get_vm_ide_devices(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
def get_vm_network_interfaces(self, vm=None):
device_list = []
if vm is None:
return device_list
nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2,
vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000,
vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard)
for device in vm.config.hardware.device:
if isinstance(device, nw_device_types):
device_list.append(device)
return device_list
def sanitize_network_params(self):
"""
Sanitize user provided network provided params
Returns: A sanitized list of network params, else fails
"""
network_devices = list()
# Clean up user data here
for network in self.params['networks']:
if 'name' not in network and 'vlan' not in network:
self.module.fail_json(msg="Please specify at least a network name or"
" a VLAN name under VM network list.")
if 'name' in network and self.cache.get_network(network['name']) is None:
self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
elif 'vlan' in network:
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
for dvp in dvps:
if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
network['name'] = dvp.config.name
break
if 'dvswitch_name' in network and \
dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
if dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
else:
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
if 'type' in network:
if network['type'] not in ['dhcp', 'static']:
self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
" Valid parameters are ['dhcp', 'static']." % network)
if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
' but "type" is set to "%(type)s".' % network)
else:
# Type is optional parameter, if user provided IP or Subnet assume
# network type as 'static'
if 'ip' in network or 'netmask' in network:
network['type'] = 'static'
else:
# User wants network type as 'dhcp'
network['type'] = 'dhcp'
if network.get('type') == 'static':
if 'ip' in network and 'netmask' not in network:
self.module.fail_json(msg="'netmask' is required if 'ip' is"
" specified under VM network list.")
if 'ip' not in network and 'netmask' in network:
self.module.fail_json(msg="'ip' is required if 'netmask' is"
" specified under VM network list.")
validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
if 'device_type' in network and network['device_type'] not in validate_device_types:
self.module.fail_json(msg="Device type specified '%s' is not valid."
" Please specify correct device"
" type from ['%s']." % (network['device_type'],
"', '".join(validate_device_types)))
if 'mac' in network and not is_mac(network['mac']):
self.module.fail_json(msg="Device MAC address '%s' is invalid."
" Please provide correct MAC address." % network['mac'])
network_devices.append(network)
return network_devices
def configure_network(self, vm_obj):
# Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
if len(self.params['networks']) == 0:
return
network_devices = self.sanitize_network_params()
# List current device for Clone or Idempotency
current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
if len(network_devices) < len(current_net_devices):
self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
"Removing interfaces is not allowed"
% (len(network_devices), len(current_net_devices)))
for key in range(0, len(network_devices)):
nic_change_detected = False
network_name = network_devices[key]['name']
if key < len(current_net_devices) and (vm_obj or self.params['template']):
# We are editing existing network devices, this is either when
# are cloning from VM or Template
nic = vim.vm.device.VirtualDeviceSpec()
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic.device = current_net_devices[key]
if ('wake_on_lan' in network_devices[key] and
nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
nic_change_detected = True
if ('start_connected' in network_devices[key] and
nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
nic.device.connectable.startConnected = network_devices[key].get('start_connected')
nic_change_detected = True
if ('allow_guest_control' in network_devices[key] and
nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
nic_change_detected = True
if nic.device.deviceInfo.summary != network_name:
nic.device.deviceInfo.summary = network_name
nic_change_detected = True
if 'device_type' in network_devices[key]:
device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
device_class = type(device)
if not isinstance(nic.device, device_class):
self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
"The failing device type is %s" % network_devices[key]['device_type'])
# Changing mac address has no effect when editing interface
if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
"The failing new MAC address is %s" % nic.device.macAddress)
else:
# Default device type is vmxnet3, VMware best practice
device_type = network_devices[key].get('device_type', 'vmxnet3')
nic = self.device_helper.create_nic(device_type,
'Network Adapter %s' % (key + 1),
network_devices[key])
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_change_detected = True
if hasattr(self.cache.get_network(network_name), 'portKeys'):
# VDS switch
pg_obj = None
if 'dvswitch_name' in network_devices[key]:
dvs_name = network_devices[key]['dvswitch_name']
dvs_obj = find_dvs_by_name(self.content, dvs_name)
if dvs_obj is None:
self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
pg_obj = find_dvspg_by_name(dvs_obj, network_name)
if pg_obj is None:
self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
else:
pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
# TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
# For now, check if we are able to find distributed virtual switch
if not pg_obj.config.distributedVirtualSwitch:
self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
" distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
" the given distributed virtual portgroup. Also, check if user has correct"
" permission to access distributed virtual switch in the given portgroup." % pg_obj.name)
if (nic.device.backing and
(not hasattr(nic.device.backing, 'port') or
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = pg_obj.key
# If user specifies distributed port group without associating to the hostsystem on which
# virtual machine is going to be deployed then we get error. We can infer that there is no
# association between given distributed port group and host system.
host_system = self.params.get('esxi_hostname')
if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
" virtual portgroup '%s'. Please make sure host system is associated"
" with given distributed virtual portgroup" % (host_system, pg_obj.name))
dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic.device.backing.port = dvs_port_connection
elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
# NSX-T Logical Switch
nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
nic.device.backing.opaqueNetworkId = network_id
nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
nic_change_detected = True
else:
# vSwitch
if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_change_detected = True
net_obj = self.cache.get_network(network_name)
if nic.device.backing.network != net_obj:
nic.device.backing.network = net_obj
nic_change_detected = True
if nic.device.backing.deviceName != network_name:
nic.device.backing.deviceName = network_name
nic_change_detected = True
if nic_change_detected:
# Change to fix the issue found while configuring opaque network
# VMs cloned from a template with opaque network will get disconnected
# Replacing deprecated config parameter with relocation Spec
if isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
self.relospec.deviceChange.append(nic)
else:
self.configspec.deviceChange.append(nic)
self.change_detected = True
def configure_vapp_properties(self, vm_obj):
if len(self.params['vapp_properties']) == 0:
return
for x in self.params['vapp_properties']:
if not x.get('id'):
self.module.fail_json(msg="id is required to set vApp property")
new_vmconfig_spec = vim.vApp.VmConfigSpec()
if vm_obj:
# VM exists
# This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
# each property must have a unique key
# init key counter with max value + 1
all_keys = [x.key for x in orig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
for property_id, property_spec in vapp_properties_to_change.items():
is_property_changed = False
new_vapp_property_spec = vim.vApp.PropertySpec()
if property_id in vapp_properties_current:
if property_spec.get('operation') == 'remove':
new_vapp_property_spec.operation = 'remove'
new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
is_property_changed = True
else:
# this is 'edit' branch
new_vapp_property_spec.operation = 'edit'
new_vapp_property_spec.info = vapp_properties_current[property_id]
try:
for property_name, property_value in property_spec.items():
if property_name == 'operation':
# operation is not an info object property
# if set to anything other than 'remove' we don't fail
continue
# Updating attributes only if needed
if getattr(new_vapp_property_spec.info, property_name) != property_value:
setattr(new_vapp_property_spec.info, property_name, property_value)
is_property_changed = True
except Exception as e:
msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e))
self.module.fail_json(msg=msg)
else:
if property_spec.get('operation') == 'remove':
# attempt to delete non-existent property
continue
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
else:
# New VM
all_keys = [x.key for x in new_vmconfig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
is_property_changed = False
for property_id, property_spec in vapp_properties_to_change.items():
new_vapp_property_spec = vim.vApp.PropertySpec()
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
if new_vmconfig_spec.property:
self.configspec.vAppConfig = new_vmconfig_spec
self.change_detected = True
def customize_customvalues(self, vm_obj, config_spec):
if len(self.params['customvalues']) == 0:
return
vm_custom_spec = config_spec
vm_custom_spec.extraConfig = []
changed = False
facts = self.gather_facts(vm_obj)
for kv in self.params['customvalues']:
if 'key' not in kv or 'value' not in kv:
self.module.exit_json(msg="customvalues items required both 'key' and 'value' fields.")
# If kv is not kv fetched from facts, change it
if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
option = vim.option.OptionValue()
option.key = kv['key']
option.value = kv['value']
vm_custom_spec.extraConfig.append(option)
changed = True
if changed:
self.change_detected = True
def customize_vm(self, vm_obj):
# User specified customization specification
custom_spec_name = self.params.get('customization_spec')
if custom_spec_name:
cc_mgr = self.content.customizationSpecManager
if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
self.customspec = temp_spec.spec
return
else:
self.module.fail_json(msg="Unable to find customization specification"
" '%s' in given configuration." % custom_spec_name)
# Network settings
adaptermaps = []
for network in self.params['networks']:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
if 'ip' in network and 'netmask' in network:
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = str(network['ip'])
guest_map.adapter.subnetMask = str(network['netmask'])
elif 'type' in network and network['type'] == 'dhcp':
guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
if 'gateway' in network:
guest_map.adapter.gateway = network['gateway']
# On Windows, DNS domain and DNS servers can be set by network interface
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
if 'domain' in network:
guest_map.adapter.dnsDomain = network['domain']
elif 'domain' in self.params['customization']:
guest_map.adapter.dnsDomain = self.params['customization']['domain']
if 'dns_servers' in network:
guest_map.adapter.dnsServerList = network['dns_servers']
elif 'dns_servers' in self.params['customization']:
guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
adaptermaps.append(guest_map)
# Global DNS settings
globalip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in self.params['customization']:
globalip.dnsServerList = self.params['customization']['dns_servers']
# TODO: Maybe list the different domains from the interfaces here by default ?
if 'dns_suffix' in self.params['customization']:
dns_suffix = self.params['customization']['dns_suffix']
if isinstance(dns_suffix, list):
globalip.dnsSuffixList = " ".join(dns_suffix)
else:
globalip.dnsSuffixList = dns_suffix
elif 'domain' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization']['domain']
if self.params['guest_id']:
guest_id = self.params['guest_id']
else:
guest_id = vm_obj.summary.config.guestId
# For windows guest OS, use SysPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
if 'win' in guest_id:
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
# Setting hostName, orgName and fullName is mandatory, so we set some default when missing
ident.userData.computerName = vim.vm.customization.FixedName()
# computer name will be truncated to 15 characters if using VM name
default_name = self.params['name'].replace(' ', '')
punctuation = string.punctuation.replace('-', '')
default_name = ''.join([c for c in default_name if c not in punctuation])
ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15]))
ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
if 'productid' in self.params['customization']:
ident.userData.productId = str(self.params['customization']['productid'])
ident.guiUnattended = vim.vm.customization.GuiUnattended()
if 'autologon' in self.params['customization']:
ident.guiUnattended.autoLogon = self.params['customization']['autologon']
ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
if 'timezone' in self.params['customization']:
# Check if timezone value is a int before proceeding.
ident.guiUnattended.timeZone = self.device_helper.integer_value(
self.params['customization']['timezone'],
'customization.timezone')
ident.identification = vim.vm.customization.Identification()
if self.params['customization'].get('password', '') != '':
ident.guiUnattended.password = vim.vm.customization.Password()
ident.guiUnattended.password.value = str(self.params['customization']['password'])
ident.guiUnattended.password.plainText = True
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
ident.identification.joinDomain = str(self.params['customization']['joindomain'])
ident.identification.domainAdminPassword = vim.vm.customization.Password()
ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
ident.identification.domainAdminPassword.plainText = True
elif 'joinworkgroup' in self.params['customization']:
ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
if 'runonce' in self.params['customization']:
ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
ident.guiRunOnce.commandList = self.params['customization']['runonce']
else:
# FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
# For Linux guest OS, use LinuxPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
ident = vim.vm.customization.LinuxPrep()
# TODO: Maybe add domain from interface if missing ?
if 'domain' in self.params['customization']:
ident.domain = str(self.params['customization']['domain'])
ident.hostName = vim.vm.customization.FixedName()
hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
# Remove all characters except alphanumeric and minus which is allowed by RFC 952
valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
ident.hostName.name = valid_hostname
# List of supported time zones for different vSphere versions in Linux/Unix systems
# https://kb.vmware.com/s/article/2145518
if 'timezone' in self.params['customization']:
ident.timeZone = str(self.params['customization']['timezone'])
if 'hwclockUTC' in self.params['customization']:
ident.hwClockUTC = self.params['customization']['hwclockUTC']
self.customspec = vim.vm.customization.Specification()
self.customspec.nicSettingMap = adaptermaps
self.customspec.globalIPSettings = globalip
self.customspec.identity = ident
def get_vm_scsi_controller(self, vm_obj):
# If vm_obj doesn't exist there is no SCSI controller to find
if vm_obj is None:
return None
for device in vm_obj.config.hardware.device:
if self.device_helper.is_scsi_controller(device):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.device = device
return scsi_ctl
return None
def get_configured_disk_size(self, expected_disk_spec):
# what size is it?
if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
# size, size_tb, size_gb, size_mb, size_kb
if 'size' in expected_disk_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
disk_size_m = size_regex.match(expected_disk_spec['size'])
try:
if disk_size_m:
expected = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
if re.match(r'\d+\.\d+', expected):
# We found float value in string, let's typecast it
expected = float(expected)
else:
# We found int value in string, let's typecast it
expected = int(expected)
if not expected or not unit:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="Failed to parse disk size please review value"
" provided using documentation.")
else:
param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1].lower()
expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
expected = int(expected)
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
if unit in disk_units:
unit = unit.lower()
return expected * (1024 ** disk_units[unit])
else:
self.module.fail_json(msg="%s is not a supported unit for disk size."
" Supported units are ['%s']." % (unit,
"', '".join(disk_units.keys())))
# No size found but disk, fail
self.module.fail_json(
msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl):
"""
Adds vmdk file described by expected_disk_spec['filename'], retrieves the file
information and adds the correct spec to self.configspec.deviceChange.
"""
filename = expected_disk_spec['filename']
# If this is a new disk, or the disk file names are different
if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None:
diskspec.device.backing.fileName = filename
diskspec.device.key = -1
self.change_detected = True
self.configspec.deviceChange.append(diskspec)
def configure_disks(self, vm_obj):
# Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
if len(self.params['disk']) == 0:
return
scsi_ctl = self.get_vm_scsi_controller(vm_obj)
# Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
if vm_obj is None or scsi_ctl is None:
scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
self.change_detected = True
self.configspec.deviceChange.append(scsi_ctl)
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
if vm_obj is not None else None
if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
self.module.fail_json(msg="Provided disks configuration has less disks than "
"the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
disk_index = 0
for expected_disk_spec in self.params.get('disk'):
disk_modified = False
# If we are manipulating and existing objects which has disks and disk_index is in disks
if vm_obj is not None and disks is not None and disk_index < len(disks):
diskspec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
diskspec.device = disks[disk_index]
else:
diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
disk_modified = True
# increment index for next disk search
disk_index += 1
# index 7 is reserved to SCSI controller
if disk_index == 7:
disk_index += 1
if 'disk_mode' in expected_disk_spec:
disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower()
valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent']
if disk_mode not in valid_disk_mode:
self.module.fail_json(msg="disk_mode specified is not valid."
" Should be one of ['%s']" % "', '".join(valid_disk_mode))
if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
diskspec.device.backing.diskMode = disk_mode
disk_modified = True
else:
diskspec.device.backing.diskMode = "persistent"
# is it thin?
if 'type' in expected_disk_spec:
disk_type = expected_disk_spec.get('type', '').lower()
if disk_type == 'thin':
diskspec.device.backing.thinProvisioned = True
elif disk_type == 'eagerzeroedthick':
diskspec.device.backing.eagerlyScrub = True
if 'filename' in expected_disk_spec and expected_disk_spec['filename'] is not None:
self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl)
continue
elif vm_obj is None or self.params['template']:
# We are creating new VM or from Template
# Only create virtual device if not backed by vmdk in original template
if diskspec.device.backing.fileName == '':
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
# which datastore?
if expected_disk_spec.get('datastore'):
# TODO: This is already handled by the relocation spec,
# but it needs to eventually be handled for all the
# other disks defined
pass
kb = self.get_configured_disk_size(expected_disk_spec)
# VMware doesn't allow to reduce disk sizes
if kb < diskspec.device.capacityInKB:
self.module.fail_json(
msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
(kb, diskspec.device.capacityInKB))
if kb != diskspec.device.capacityInKB or disk_modified:
diskspec.device.capacityInKB = kb
self.configspec.deviceChange.append(diskspec)
self.change_detected = True
def select_host(self):
hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
if not hostsystem:
self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
return hostsystem
def autoselect_datastore(self):
datastore = None
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if not self.is_datastore_valid(datastore_obj=ds):
continue
if ds.summary.freeSpace > datastore_freespace:
datastore = ds
datastore_freespace = ds.summary.freeSpace
return datastore
def get_recommended_datastore(self, datastore_cluster_obj=None):
"""
Function to return Storage DRS recommended datastore from datastore cluster
Args:
datastore_cluster_obj: datastore cluster managed object
Returns: Name of recommended datastore from the given datastore cluster
"""
if datastore_cluster_obj is None:
return None
# Check if Datastore Cluster provided by user is SDRS ready
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
if sdrs_status:
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
pod_sel_spec.storagePod = datastore_cluster_obj
storage_spec = vim.storageDrs.StoragePlacementSpec()
storage_spec.podSelectionSpec = pod_sel_spec
storage_spec.type = 'create'
try:
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
rec_action = rec.recommendations[0].action[0]
return rec_action.destination.name
except Exception:
# There is some error so we fall back to general workflow
pass
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
if not self.is_datastore_valid(datastore_obj=ds):
continue
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
return datastore.name
return None
def select_datastore(self, vm_obj=None):
datastore = None
datastore_name = None
if len(self.params['disk']) != 0:
# TODO: really use the datastore for newly created disks
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
datastores = []
if self.params['cluster']:
cluster = self.find_cluster_by_name(self.params['cluster'], self.content)
for host in cluster.host:
for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
if mi.volume.type == "VMFS":
datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
elif self.params['esxi_hostname']:
host = self.find_hostsystem_by_name(self.params['esxi_hostname'])
for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
if mi.volume.type == "VMFS":
datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
else:
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
datastore_freespace = 0
for ds in datastores:
if not self.is_datastore_valid(datastore_obj=ds):
continue
if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
# If datastore field is provided, filter destination datastores
if 'datastore' in self.params['disk'][0] and \
isinstance(self.params['disk'][0]['datastore'], str) and \
ds.name.find(self.params['disk'][0]['datastore']) < 0:
continue
datastore = ds
datastore_name = datastore.name
datastore_freespace = ds.summary.freeSpace
elif 'datastore' in self.params['disk'][0]:
datastore_name = self.params['disk'][0]['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
if not datastore and self.params['template']:
# use the template's existing DS
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
if disks:
datastore = disks[0].backing.datastore
datastore_name = datastore.name
# validation
if datastore:
dc = self.cache.get_parent_datacenter(datastore)
if dc.name != self.params['datacenter']:
datastore = self.autoselect_datastore()
datastore_name = datastore.name
if not datastore:
if len(self.params['disk']) != 0 or self.params['template'] is None:
self.module.fail_json(msg="Unable to find the datastore with given parameters."
" This could mean, %s is a non-existent virtual machine and module tried to"
" deploy it as new virtual machine with no disk. Please specify disks parameter"
" or specify template to clone from." % self.params['name'])
self.module.fail_json(msg="Failed to find a matching datastore")
return datastore, datastore_name
def obj_has_parent(self, obj, parent):
if obj is None and parent is None:
raise AssertionError()
current_parent = obj
while True:
if current_parent.name == parent.name:
return True
# Check if we have reached till root folder
moid = current_parent._moId
if moid in ['group-d1', 'ha-folder-root']:
return False
current_parent = current_parent.parent
if current_parent is None:
return False
def get_scsi_type(self):
disk_controller_type = "paravirtual"
# set cpu/memory/etc
if 'hardware' in self.params:
if 'scsi' in self.params['hardware']:
if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
disk_controller_type = self.params['hardware']['scsi']
else:
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
return disk_controller_type
def find_folder(self, searchpath):
""" Walk inventory objects one position of the searchpath at a time """
# split the searchpath so we can iterate through it
paths = [x.replace('/', '') for x in searchpath.split('/')]
paths_total = len(paths) - 1
position = 0
# recursive walk while looking for next element in searchpath
root = self.content.rootFolder
while root and position <= paths_total:
change = False
if hasattr(root, 'childEntity'):
for child in root.childEntity:
if child.name == paths[position]:
root = child
position += 1
change = True
break
elif isinstance(root, vim.Datacenter):
if hasattr(root, 'vmFolder'):
if root.vmFolder.name == paths[position]:
root = root.vmFolder
position += 1
change = True
else:
root = None
if not change:
root = None
return root
def get_resource_pool(self, cluster=None, host=None, resource_pool=None):
""" Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """
cluster_name = cluster or self.params.get('cluster', None)
host_name = host or self.params.get('esxi_hostname', None)
resource_pool_name = resource_pool or self.params.get('resource_pool', None)
# get the datacenter object
datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if not datacenter:
self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter'])
# if cluster is given, get the cluster object
if cluster_name:
cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
if not cluster:
self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
# if host is given, get the cluster object using the host
elif host_name:
host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter)
if not host:
self.module.fail_json(msg='Unable to find host "%s"' % host_name)
cluster = host.parent
else:
cluster = None
# get resource pools limiting search to cluster or datacenter
resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter)
if not resource_pool:
if resource_pool_name:
self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name)
else:
self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
return resource_pool
def deploy_vm(self):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# FIXME:
# - static IPs
self.folder = self.params.get('folder', None)
if self.folder is None:
self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
# Prepend / if it was missing from the folder path, also strip trailing slashes
if not self.folder.startswith('/'):
self.folder = '/%(folder)s' % self.params
self.folder = self.folder.rstrip('/')
datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if datacenter is None:
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
dcpath = compile_folder_path_for_object(datacenter)
# Nested folder does not have trailing /
if not dcpath.endswith('/'):
dcpath += '/'
# Check for full path first in case it was already supplied
if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or
self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')):
fullpath = self.folder
elif self.folder.startswith('/vm/') or self.folder == '/vm':
fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder)
elif self.folder.startswith('/'):
fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder)
else:
fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder)
f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
# abort if no strategy was successful
if f_obj is None:
# Add some debugging values in failure.
details = {
'datacenter': datacenter.name,
'datacenter_path': dcpath,
'folder': self.folder,
'full_search_path': fullpath,
}
self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
details=details)
destfolder = f_obj
if self.params['template']:
vm_obj = self.get_vm_or_template(template_name=self.params['template'])
if vm_obj is None:
self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
else:
vm_obj = None
# always get a resource_pool
resource_pool = self.get_resource_pool()
# set the destination datastore for VM & disks
if self.params['datastore']:
# Give precedence to datastore value provided by user
# User may want to deploy VM to specific datastore.
datastore_name = self.params['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
(datastore, datastore_name) = self.select_datastore(vm_obj)
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
# create the relocation spec
self.relospec = vim.vm.RelocateSpec()
self.relospec.deviceChange = []
self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
self.configure_hardware_params(vm_obj=vm_obj)
self.configure_resource_alloc_info(vm_obj=vm_obj)
self.configure_vapp_properties(vm_obj=vm_obj)
self.configure_disks(vm_obj=vm_obj)
self.configure_network(vm_obj=vm_obj)
self.configure_cdrom(vm_obj=vm_obj)
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key == 'type' and nw['type'] == 'dhcp':
network_changes = True
break
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec') is not None:
self.customize_vm(vm_obj=vm_obj)
clonespec = None
clone_method = None
try:
if self.params['template']:
# Only select specific host when ESXi hostname is provided
if self.params['esxi_hostname']:
self.relospec.host = self.select_host()
self.relospec.datastore = datastore
# Convert disk present in template if is set
if self.params['convert']:
for device in vm_obj.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk):
disk_locator = vim.vm.RelocateSpec.DiskLocator()
disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
if self.params['convert'] in ['thin']:
disk_locator.diskBackingInfo.thinProvisioned = True
if self.params['convert'] in ['eagerzeroedthick']:
disk_locator.diskBackingInfo.eagerlyScrub = True
if self.params['convert'] in ['thick']:
disk_locator.diskBackingInfo.diskMode = "persistent"
disk_locator.diskId = device.key
disk_locator.datastore = datastore
self.relospec.disk.append(disk_locator)
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# > pool: For a clone operation from a template to a virtual machine, this argument is required.
self.relospec.pool = resource_pool
linked_clone = self.params.get('linked_clone')
snapshot_src = self.params.get('snapshot_src', None)
if linked_clone:
if snapshot_src is not None:
self.relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
else:
self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are"
" required together for linked clone operation.")
clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=self.relospec)
if self.customspec:
clonespec.customization = self.customspec
if snapshot_src is not None:
if vm_obj.snapshot is None:
self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params)
snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList,
snapname=snapshot_src)
if len(snapshot) != 1:
self.module.fail_json(msg='virtual machine "%(template)s" does not contain'
' snapshot named "%(snapshot_src)s"' % self.params)
clonespec.snapshot = snapshot[0].snapshot
clonespec.config = self.configspec
clone_method = 'Clone'
try:
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
except vim.fault.NoPermission as e:
self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
"due to permission issue: %s" % (self.params['name'],
destfolder,
to_native(e.msg)))
self.change_detected = True
else:
# ConfigSpec require name for VM creation
self.configspec.name = self.params['name']
self.configspec.files = vim.vm.FileInfo(logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName="[" + datastore_name + "]")
clone_method = 'CreateVM_Task'
try:
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
except vmodl.fault.InvalidRequest as e:
self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
"parameter %s" % to_native(e.msg))
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to create virtual machine due to "
"product versioning restrictions: %s" % to_native(e.msg))
self.change_detected = True
self.wait_for_task(task)
except TypeError as e:
self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
# provide these to the user for debugging
clonespec_json = serialize_spec(clonespec)
configspec_json = serialize_spec(self.configspec)
kwargs = {
'changed': self.change_applied,
'failed': True,
'msg': task.info.error.msg,
'clonespec': clonespec_json,
'configspec': configspec_json,
'clone_method': clone_method
}
return kwargs
else:
# set annotation
vm = task.info.result
if self.params['annotation']:
annotation_spec = vim.vm.ConfigSpec()
annotation_spec.annotation = str(self.params['annotation'])
task = vm.ReconfigVM_Task(annotation_spec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'}
if self.params['customvalues']:
vm_custom_spec = vim.vm.ConfigSpec()
self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec)
task = vm.ReconfigVM_Task(vm_custom_spec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customvalues'}
if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'restarted']:
set_vm_power_state(self.content, vm, 'poweredon', force=False)
if self.params['wait_for_ip_address']:
wait_for_vm_ip(self.content, vm, self.params['wait_for_ip_address_timeout'])
if self.params['wait_for_customization']:
is_customization_ok = self.wait_for_customization(vm=vm, timeout=self.params['wait_for_customization_timeout'])
if not is_customization_ok:
vm_facts = self.gather_facts(vm)
return {'changed': self.change_applied, 'failed': True, 'instance': vm_facts, 'op': 'customization'}
vm_facts = self.gather_facts(vm)
return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
def get_snapshots_by_name_recursively(self, snapshots, snapname):
snap_obj = []
for snapshot in snapshots:
if snapshot.name == snapname:
snap_obj.append(snapshot)
else:
snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
return snap_obj
def reconfigure_vm(self):
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
# create the relocation spec
self.relospec = vim.vm.RelocateSpec()
self.relospec.deviceChange = []
self.configure_guestid(vm_obj=self.current_vm_obj)
self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
self.configure_hardware_params(vm_obj=self.current_vm_obj)
self.configure_disks(vm_obj=self.current_vm_obj)
self.configure_network(vm_obj=self.current_vm_obj)
self.configure_cdrom(vm_obj=self.current_vm_obj)
self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec)
self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
self.configure_vapp_properties(vm_obj=self.current_vm_obj)
if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
self.configspec.annotation = str(self.params['annotation'])
self.change_detected = True
if self.params['resource_pool']:
self.relospec.pool = self.get_resource_pool()
if self.relospec.pool != self.current_vm_obj.resourcePool:
task = self.current_vm_obj.RelocateVM_Task(spec=self.relospec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'}
# Only send VMware task if we see a modification
if self.change_detected:
task = None
try:
task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
" product versioning restrictions: %s" % to_native(e.msg))
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'}
# Rename VM
if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
task = self.current_vm_obj.Rename_Task(self.params['name'])
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'}
# Mark VM as Template
if self.params['is_template'] and not self.current_vm_obj.config.template:
try:
self.current_vm_obj.MarkAsTemplate()
self.change_applied = True
except vmodl.fault.NotSupported as e:
self.module.fail_json(msg="Failed to mark virtual machine [%s] "
"as template: %s" % (self.params['name'], e.msg))
# Mark Template as VM
elif not self.params['is_template'] and self.current_vm_obj.config.template:
resource_pool = self.get_resource_pool()
kwargs = dict(pool=resource_pool)
if self.params.get('esxi_hostname', None):
host_system_obj = self.select_host()
kwargs.update(host=host_system_obj)
try:
self.current_vm_obj.MarkAsVirtualMachine(**kwargs)
self.change_applied = True
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(msg="Virtual machine is not marked"
" as template : %s" % to_native(invalid_state.msg))
except vim.fault.InvalidDatastore as invalid_ds:
self.module.fail_json(msg="Converting template to virtual machine"
" operation cannot be performed on the"
" target datastores: %s" % to_native(invalid_ds.msg))
except vim.fault.CannotAccessVmComponent as cannot_access:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" as operation unable access virtual machine"
" component: %s" % to_native(cannot_access.msg))
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to : %s" % to_native(invalid_argument.msg))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to generic error : %s" % to_native(generic_exc))
# Automatically update VMware UUID when converting template to VM.
# This avoids an interactive prompt during VM startup.
uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"]
if not uuid_action:
uuid_action_opt = vim.option.OptionValue()
uuid_action_opt.key = "uuid.action"
uuid_action_opt.value = "create"
self.configspec.extraConfig.append(uuid_action_opt)
self.change_detected = True
# add customize existing VM after VM re-configure
if 'existing_vm' in self.params['customization'] and self.params['customization']['existing_vm']:
if self.current_vm_obj.config.template:
self.module.fail_json(msg="VM is template, not support guest OS customization.")
if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.")
cus_result = self.customize_exist_vm()
if cus_result['failed']:
return cus_result
vm_facts = self.gather_facts(self.current_vm_obj)
return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
def customize_exist_vm(self):
task = None
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 1 or network_changes or self.params.get('customization_spec'):
self.customize_vm(vm_obj=self.current_vm_obj)
try:
task = self.current_vm_obj.CustomizeVM_Task(self.customspec)
except vim.fault.CustomizationFault as e:
self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg))
except vim.fault.RuntimeFault as e:
self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg))
except Exception as e:
self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg))
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'}
if self.params['wait_for_customization']:
set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False)
is_customization_ok = self.wait_for_customization(vm=self.current_vm_obj, timeout=self.params['wait_for_customization_timeout'])
if not is_customization_ok:
return {'changed': self.change_applied, 'failed': True,
'msg': 'Wait for customization failed due to timeout', 'op': 'wait_for_customize_exist'}
return {'changed': self.change_applied, 'failed': False}
def wait_for_task(self, task, poll_interval=1):
"""
Wait for a VMware task to complete. Terminal states are 'error' and 'success'.
Inputs:
- task: the task to wait for
- poll_interval: polling interval to check the task, in seconds
Modifies:
- self.change_applied
"""
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
# https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
while task.info.state not in ['error', 'success']:
time.sleep(poll_interval)
self.change_applied = self.change_applied or task.info.state == 'success'
def get_vm_events(self, vm, eventTypeIdList):
byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self")
filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList)
eventManager = self.content.eventManager
return eventManager.QueryEvent(filterSpec)
def wait_for_customization(self, vm, timeout=3600, sleep=10):
poll = int(timeout // sleep)
thispoll = 0
while thispoll <= poll:
eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent'])
if len(eventStarted):
thispoll = 0
while thispoll <= poll:
eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed'])
if len(eventsFinishedResult):
if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded):
self.module.warn("Customization failed with error {%s}:{%s}"
% (eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage))
return False
else:
return True
else:
time.sleep(sleep)
thispoll += 1
if len(eventsFinishedResult) == 0:
self.module.warn('Waiting for customization result event timed out.')
return False
else:
time.sleep(sleep)
thispoll += 1
if len(eventStarted):
self.module.warn('Waiting for customization result event timed out.')
else:
self.module.warn('Waiting for customization start event timed out.')
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']),
template=dict(type='str', aliases=['template_src']),
is_template=dict(type='bool', default=False),
annotation=dict(type='str', aliases=['notes']),
customvalues=dict(type='list', default=[]),
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
folder=dict(type='str'),
guest_id=dict(type='str'),
disk=dict(type='list', default=[]),
cdrom=dict(type=list_or_dict, default=[]),
hardware=dict(type='dict', default={}),
force=dict(type='bool', default=False),
datacenter=dict(type='str', default='ha-datacenter'),
esxi_hostname=dict(type='str'),
cluster=dict(type='str'),
wait_for_ip_address=dict(type='bool', default=False),
wait_for_ip_address_timeout=dict(type='int', default=300),
state_change_timeout=dict(type='int', default=0),
snapshot_src=dict(type='str'),
linked_clone=dict(type='bool', default=False),
networks=dict(type='list', default=[]),
resource_pool=dict(type='str'),
customization=dict(type='dict', default={}, no_log=True),
customization_spec=dict(type='str', default=None),
wait_for_customization=dict(type='bool', default=False),
wait_for_customization_timeout=dict(type='int', default=3600),
vapp_properties=dict(type='list', default=[]),
datastore=dict(type='str'),
convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']),
delete_from_inventory=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['cluster', 'esxi_hostname'],
],
required_one_of=[
['name', 'uuid'],
],
)
result = {'failed': False, 'changed': False}
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
# VM already exists
if vm:
if module.params['state'] == 'absent':
# destroy it
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='remove_vm',
)
module.exit_json(**result)
if module.params['force']:
# has to be poweredoff first
set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
result = pyv.remove_vm(vm, module.params['delete_from_inventory'])
elif module.params['state'] == 'present':
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
desired_operation='reconfigure_vm',
)
module.exit_json(**result)
result = pyv.reconfigure_vm()
elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='set_vm_power_state',
)
module.exit_json(**result)
# set powerstate
tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
if tmp_result['changed']:
result["changed"] = True
if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']:
wait_result = wait_for_vm_ip(pyv.content, vm, module.params['wait_for_ip_address_timeout'])
if not wait_result:
module.fail_json(msg='Waiting for IP address timed out')
tmp_result['instance'] = wait_result
if not tmp_result["failed"]:
result["failed"] = False
result['instance'] = tmp_result['instance']
if tmp_result["failed"]:
result["failed"] = True
result["msg"] = tmp_result["msg"]
else:
# This should not happen
raise AssertionError()
# VM doesn't exist
else:
if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
if module.check_mode:
result.update(
changed=True,
desired_operation='deploy_vm',
)
module.exit_json(**result)
result = pyv.deploy_vm()
if result['failed']:
module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
nexiles/odoo | addons/account/report/account_general_journal.py | 381 | 7669 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class journal_print(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(journal_print, self).__init__(cr, uid, name, context=context)
self.period_ids = []
self.journal_ids = []
self.localcontext.update( {
'time': time,
'lines': self.lines,
'periods': self.periods,
'sum_debit_period': self._sum_debit_period,
'sum_credit_period': self._sum_credit_period,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_fiscalyear': self._get_fiscalyear,
'get_account': self._get_account,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_sortby': self._get_sortby,
'get_filter': self._get_filter,
'get_journal': self._get_journal,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'display_currency':self._display_currency,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
new_ids = ids
self.query_get_clause = ''
self.target_move = data['form'].get('target_move', 'all')
if (data['model'] == 'ir.ui.menu'):
new_ids = 'active_ids' in data['form'] and data['form']['active_ids'] or []
self.query_get_clause = 'AND '
self.query_get_clause += obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
objects = self.pool.get('account.journal.period').browse(self.cr, self.uid, new_ids)
if new_ids:
self.cr.execute('SELECT period_id, journal_id FROM account_journal_period WHERE id IN %s', (tuple(new_ids),))
res = self.cr.fetchall()
self.period_ids, self.journal_ids = zip(*res)
return super(journal_print, self).set_context(objects, data, ids, report_type=report_type)
# returns a list of period objs
def periods(self, journal_period_objs):
dic = {}
def filter_unique(o):
key = o.period_id.id
res = key in dic
if not res:
dic[key] = True
return not res
filtered_objs = filter(filter_unique, journal_period_objs)
return map(lambda x: x.period_id, filtered_objs)
def lines(self, period_id):
if not self.journal_ids:
return []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT j.code, j.name, l.amount_currency,c.symbol AS currency_code,l.currency_id, '
'SUM(l.debit) AS debit, SUM(l.credit) AS credit '
'FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'LEFT JOIN account_journal j ON (l.journal_id=j.id) '
'LEFT JOIN res_currency c on (l.currency_id=c.id)'
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' '
'GROUP BY j.id, j.code, j.name, l.amount_currency, c.symbol, l.currency_id ',
(tuple(move_state), period_id, tuple(self.journal_ids)))
return self.cr.dictfetchall()
def _set_get_account_currency_code(self, account_id):
self.cr.execute("SELECT c.symbol AS code "\
"FROM res_currency c, account_account AS ac "\
"WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id))
result = self.cr.fetchone()
if result:
self.account_currency = result[0]
else:
self.account_currency = False
def _get_account(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).company_id.name
return super(journal_print, self)._get_account(data)
def _get_fiscalyear(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).fiscalyear_id.name
return super(journal_print, self)._get_fiscalyear(data)
def _display_currency(self, data):
if data['model'] == 'account.journal.period':
return True
return data['form']['amount_currency']
def _sum_debit_period(self, period_id, journal_id=False):
if journal_id:
journals = [journal_id]
else:
journals = self.journal_ids
if not journals:
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(l.debit) FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' ' \
'AND l.state<>\'draft\'',
(tuple(move_state), period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit_period(self, period_id, journal_id=None):
if journal_id:
journals = [journal_id]
else:
journals = self.journal_ids
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if not journals:
return 0.0
self.cr.execute('SELECT SUM(l.credit) FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s '+ self.query_get_clause + ' ' \
'AND l.state<>\'draft\'',
(tuple(move_state), period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
class report_generaljournal(osv.AbstractModel):
_name = 'report.account.report_generaljournal'
_inherit = 'report.abstract_report'
_template = 'account.report_generaljournal'
_wrapped_report_class = journal_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jblackburne/scikit-learn | sklearn/tree/tests/test_tree.py | 7 | 55471 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_less_equal(est.min_impurity_split, 1e-7,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3], [0.6,0.3,0.1,1.0,0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
| bsd-3-clause |
ChronoMonochrome/android_external_chromium_org | tools/valgrind/valgrind_test.py | 24 | 46017 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs an exe through Valgrind and puts the intermediate files in a
directory.
"""
import datetime
import glob
import logging
import optparse
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import common
import drmemory_analyze
import memcheck_analyze
import tsan_analyze
class BaseTool(object):
"""Abstract class for running Valgrind-, PIN-based and other dynamic
error detector tools.
Always subclass this and implement ToolCommand with framework- and
tool-specific stuff.
"""
def __init__(self):
temp_parent_dir = None
self.log_parent_dir = ""
if common.IsWindows():
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
#
# TODO(bruening): if scripts die in middle and don't clean up temp
# dir, we'll accumulate files in profile dir. should remove
# really old files automatically.
profile = os.getenv("USERPROFILE")
if profile:
self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
if os.path.exists(self.log_parent_dir):
self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
temp_parent_dir = self.log_parent_dir
# Generated every time (even when overridden)
self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
self.log_dir = self.temp_dir # overridable by --keep_logs
self.option_parser_hooks = []
# TODO(glider): we may not need some of the env vars on some of the
# platforms.
self._env = {
"G_SLICE" : "always-malloc",
"NSS_DISABLE_UNLOAD" : "1",
"NSS_DISABLE_ARENA_FREE_LIST" : "1",
"GTEST_DEATH_TEST_USE_FORK": "1",
}
def ToolName(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Analyze(self, check_sanity=False):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def RegisterOptionParserHook(self, hook):
# Frameworks and tools can add their own flags to the parser.
self.option_parser_hooks.append(hook)
def CreateOptionParser(self):
# Defines Chromium-specific flags.
self._parser = optparse.OptionParser("usage: %prog [options] <program to "
"test>")
self._parser.disable_interspersed_args()
self._parser.add_option("-t", "--timeout",
dest="timeout", metavar="TIMEOUT", default=10000,
help="timeout in seconds for the run (default 10000)")
self._parser.add_option("", "--build-dir",
help="the location of the compiler output")
self._parser.add_option("", "--source-dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
self._parser.add_option("", "--gtest_filter", default="",
help="which test case to run")
self._parser.add_option("", "--gtest_repeat",
help="how many times to run each test")
self._parser.add_option("", "--gtest_print_time", action="store_true",
default=False,
help="show how long each test takes")
self._parser.add_option("", "--ignore_exit_code", action="store_true",
default=False,
help="ignore exit code of the test "
"(e.g. test failures)")
self._parser.add_option("", "--keep_logs", action="store_true",
default=False,
help="store memory tool logs in the <tool>.logs "
"directory instead of /tmp.\nThis can be "
"useful for tool developers/maintainers.\n"
"Please note that the <tool>.logs directory "
"will be clobbered on tool startup.")
# To add framework- or tool-specific flags, please add a hook using
# RegisterOptionParserHook in the corresponding subclass.
# See ValgrindTool and ThreadSanitizerBase for examples.
for hook in self.option_parser_hooks:
hook(self, self._parser)
def ParseArgv(self, args):
self.CreateOptionParser()
# self._tool_flags will store those tool flags which we don't parse
# manually in this script.
self._tool_flags = []
known_args = []
""" We assume that the first argument not starting with "-" is a program
name and all the following flags should be passed to the program.
TODO(timurrrr): customize optparse instead
"""
while len(args) > 0 and args[0][:1] == "-":
arg = args[0]
if (arg == "--"):
break
if self._parser.has_option(arg.split("=")[0]):
known_args += [arg]
else:
self._tool_flags += [arg]
args = args[1:]
if len(args) > 0:
known_args += args
self._options, self._args = self._parser.parse_args(known_args)
self._timeout = int(self._options.timeout)
self._source_dir = self._options.source_dir
if self._options.keep_logs:
# log_parent_dir has trailing slash if non-empty
self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
os.mkdir(self.log_dir)
logging.info("Logs are in " + self.log_dir)
self._ignore_exit_code = self._options.ignore_exit_code
if self._options.gtest_filter != "":
self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
if self._options.gtest_repeat:
self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_print_time:
self._args.append("--gtest_print_time")
return True
def Setup(self, args):
return self.ParseArgv(args)
def ToolCommand(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Cleanup(self):
# You may override it in the tool-specific subclass
pass
def Execute(self):
""" Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc."""
logging.info("starting execution...")
proc = self.ToolCommand()
for var in self._env:
common.PutEnvAndLog(var, self._env[var])
return common.RunSubprocess(proc, self._timeout)
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
analyze_retcode = self.Analyze(check_sanity)
if analyze_retcode:
logging.error("Analyze failed.")
logging.info("Search the log for '[ERROR]' to see the error reports.")
return analyze_retcode
if exec_retcode:
if self._ignore_exit_code:
logging.info("Test execution failed, but the exit code is ignored.")
else:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
if not analyze_retcode:
logging.info("Analysis completed successfully.")
return 0
def Main(self, args, check_sanity, min_runtime_in_seconds):
"""Call this to run through the whole process: Setup, Execute, Analyze"""
start_time = datetime.datetime.now()
retcode = -1
if self.Setup(args):
retcode = self.RunTestsAndAnalyze(check_sanity)
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.Cleanup()
else:
logging.error("Setup failed")
end_time = datetime.datetime.now()
runtime_in_seconds = (end_time - start_time).seconds
hours = runtime_in_seconds / 3600
seconds = runtime_in_seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
if (min_runtime_in_seconds > 0 and
runtime_in_seconds < min_runtime_in_seconds):
logging.error("Layout tests finished too quickly. "
"It should have taken at least %d seconds. "
"Something went wrong?" % min_runtime_in_seconds)
retcode = -1
return retcode
def Run(self, args, module, min_runtime_in_seconds=0):
MODULES_TO_SANITY_CHECK = ["base"]
# TODO(timurrrr): this is a temporary workaround for http://crbug.com/47844
if self.ToolName() == "tsan" and common.IsMac():
MODULES_TO_SANITY_CHECK = []
check_sanity = module in MODULES_TO_SANITY_CHECK
return self.Main(args, check_sanity, min_runtime_in_seconds)
class ValgrindTool(BaseTool):
"""Abstract class for running Valgrind tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def __init__(self):
super(ValgrindTool, self).__init__()
self.RegisterOptionParserHook(ValgrindTool.ExtendOptionParser)
def UseXML(self):
# Override if tool prefers nonxml output
return True
def SelfContained(self):
# Returns true iff the tool is distibuted as a self-contained
# .sh script (e.g. ThreadSanitizer)
return False
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a valgrind suppression file")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running valgrind directly")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running Dr. Memory "
"directly.")
parser.add_option("", "--trace_children", action="store_true",
default=False,
help="also trace child processes")
parser.add_option("", "--num-callers",
dest="num_callers", default=30,
help="number of callers to show in stack traces")
parser.add_option("", "--generate_dsym", action="store_true",
default=False,
help="Generate .dSYM file on Mac if needed. Slow!")
def Setup(self, args):
if not BaseTool.Setup(self, args):
return False
if common.IsMac():
self.PrepareForTestMac()
return True
def PrepareForTestMac(self):
"""Runs dsymutil if needed.
Valgrind for Mac OS X requires that debugging information be in a .dSYM
bundle generated by dsymutil. It is not currently able to chase DWARF
data into .o files like gdb does, so executables without .dSYM bundles or
with the Chromium-specific "fake_dsym" bundles generated by
build/mac/strip_save_dsym won't give source file and line number
information in valgrind.
This function will run dsymutil if the .dSYM bundle is missing or if
it looks like a fake_dsym. A non-fake dsym that already exists is assumed
to be up-to-date.
"""
test_command = self._args[0]
dsym_bundle = self._args[0] + '.dSYM'
dsym_file = os.path.join(dsym_bundle, 'Contents', 'Resources', 'DWARF',
os.path.basename(test_command))
dsym_info_plist = os.path.join(dsym_bundle, 'Contents', 'Info.plist')
needs_dsymutil = True
saved_test_command = None
if os.path.exists(dsym_file) and os.path.exists(dsym_info_plist):
# Look for the special fake_dsym tag in dsym_info_plist.
dsym_info_plist_contents = open(dsym_info_plist).read()
if not re.search('^\s*<key>fake_dsym</key>$', dsym_info_plist_contents,
re.MULTILINE):
# fake_dsym is not set, this is a real .dSYM bundle produced by
# dsymutil. dsymutil does not need to be run again.
needs_dsymutil = False
else:
# fake_dsym is set. dsym_file is a copy of the original test_command
# before it was stripped. Copy it back to test_command so that
# dsymutil has unstripped input to work with. Move the stripped
# test_command out of the way, it will be restored when this is
# done.
saved_test_command = test_command + '.stripped'
os.rename(test_command, saved_test_command)
shutil.copyfile(dsym_file, test_command)
shutil.copymode(saved_test_command, test_command)
if needs_dsymutil:
if self._options.generate_dsym:
# Remove the .dSYM bundle if it exists.
shutil.rmtree(dsym_bundle, True)
dsymutil_command = ['dsymutil', test_command]
# dsymutil is crazy slow. Ideally we'd have a timeout here,
# but common.RunSubprocess' timeout is only checked
# after each line of output; dsymutil is silent
# until the end, and is then killed, which is silly.
common.RunSubprocess(dsymutil_command)
if saved_test_command:
os.rename(saved_test_command, test_command)
else:
logging.info("No real .dSYM for test_command. Line numbers will "
"not be shown. Either tell xcode to generate .dSYM "
"file, or use --generate_dsym option to this tool.")
def ToolCommand(self):
"""Get the valgrind command to run."""
# Note that self._args begins with the exe to be run.
tool_name = self.ToolName()
# Construct the valgrind command.
if self.SelfContained():
proc = ["valgrind-%s.sh" % tool_name]
else:
if 'CHROME_VALGRIND' in os.environ:
path = os.path.join(os.environ['CHROME_VALGRIND'], "bin", "valgrind")
else:
path = "valgrind"
proc = [path, "--tool=%s" % tool_name]
proc += ["--num-callers=%i" % int(self._options.num_callers)]
if self._options.trace_children:
proc += ["--trace-children=yes"]
proc += ["--trace-children-skip='*dbus-daemon*'"]
proc += ["--trace-children-skip='*dbus-launch*'"]
proc += ["--trace-children-skip='*perl*'"]
proc += ["--trace-children-skip='*python*'"]
# This is really Python, but for some reason Valgrind follows it.
proc += ["--trace-children-skip='*lsb_release*'"]
proc += self.ToolSpecificFlags()
proc += self._tool_flags
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + ("/%s." % tool_name) + "%p"
if self.UseXML():
proc += ["--xml=yes", "--xml-file=" + logfilename]
else:
proc += ["--log-file=" + logfilename]
# The Valgrind command is constructed.
# Valgrind doesn't play nice with the Chrome sandbox. Empty this env var
# set by runtest.py to disable the sandbox.
if os.environ.get("CHROME_DEVEL_SANDBOX", None):
logging.info("Removing CHROME_DEVEL_SANDBOX fron environment")
os.environ["CHROME_DEVEL_SANDBOX"] = ''
# Handle --indirect_webkit_layout separately.
if self._options.indirect_webkit_layout:
# Need to create the wrapper before modifying |proc|.
wrapper = self.CreateBrowserWrapper(proc, webkit=True)
proc = self._args
proc.append("--wrapper")
proc.append(wrapper)
return proc
if self._options.indirect:
wrapper = self.CreateBrowserWrapper(proc)
os.environ["BROWSER_WRAPPER"] = wrapper
logging.info('export BROWSER_WRAPPER=' + wrapper)
proc = []
proc += self._args
return proc
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def CreateBrowserWrapper(self, proc, webkit=False):
"""The program being run invokes Python or something else that can't stand
to be valgrinded, and also invokes the Chrome browser. In this case, use a
magic wrapper to only valgrind the Chrome browser. Build the wrapper here.
Returns the path to the wrapper. It's up to the caller to use the wrapper
appropriately.
"""
command = " ".join(proc)
# Add the PID of the browser wrapper to the logfile names so we can
# separate log files for different UI tests at the analyze stage.
command = command.replace("%p", "$$.%p")
(fd, indirect_fname) = tempfile.mkstemp(dir=self.log_dir,
prefix="browser_wrapper.",
text=True)
f = os.fdopen(fd, "w")
f.write('#!/bin/bash\n'
'echo "Started Valgrind wrapper for this test, PID=$$" >&2\n')
f.write('DIR=`dirname $0`\n'
'TESTNAME_FILE=$DIR/testcase.$$.name\n\n')
if webkit:
# Webkit layout_tests pass the URL as the first line of stdin.
f.write('tee $TESTNAME_FILE | %s "$@"\n' % command)
else:
# Try to get the test case name by looking at the program arguments.
# i.e. Chromium ui_tests used --test-name arg.
# TODO(timurrrr): This doesn't handle "--test-name Test.Name"
# TODO(timurrrr): ui_tests are dead. Where do we use the non-webkit
# wrapper now? browser_tests? What do they do?
f.write('for arg in $@\ndo\n'
' if [[ "$arg" =~ --test-name=(.*) ]]\n then\n'
' echo ${BASH_REMATCH[1]} >$TESTNAME_FILE\n'
' fi\n'
'done\n\n'
'%s "$@"\n' % command)
f.close()
os.chmod(indirect_fname, stat.S_IRUSR|stat.S_IXUSR)
return indirect_fname
def CreateAnalyzer(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def GetAnalyzeResults(self, check_sanity=False):
# Glob all the files in the log directory
filenames = glob.glob(self.log_dir + "/" + self.ToolName() + ".*")
# If we have browser wrapper, the logfiles are named as
# "toolname.wrapper_PID.valgrind_PID".
# Let's extract the list of wrapper_PIDs and name it ppids
ppids = set([int(f.split(".")[-2]) \
for f in filenames if re.search("\.[0-9]+\.[0-9]+$", f)])
analyzer = self.CreateAnalyzer()
if len(ppids) == 0:
# Fast path - no browser wrapper was set.
return analyzer.Report(filenames, None, check_sanity)
ret = 0
for ppid in ppids:
testcase_name = None
try:
f = open(self.log_dir + ("/testcase.%d.name" % ppid))
testcase_name = f.read().strip()
f.close()
wk_layout_prefix="third_party/WebKit/LayoutTests/"
wk_prefix_at = testcase_name.rfind(wk_layout_prefix)
if wk_prefix_at != -1:
testcase_name = testcase_name[wk_prefix_at + len(wk_layout_prefix):]
except IOError:
pass
print "====================================================="
print " Below is the report for valgrind wrapper PID=%d." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
print " You can find the corresponding test"
print " by searching the above log for 'PID=%d'" % ppid
sys.stdout.flush()
ppid_filenames = [f for f in filenames \
if re.search("\.%d\.[0-9]+$" % ppid, f)]
# check_sanity won't work with browser wrappers
assert check_sanity == False
ret |= analyzer.Report(ppid_filenames, testcase_name)
print "====================================================="
sys.stdout.flush()
if ret != 0:
print ""
print "The Valgrind reports are grouped by test names."
print "Each test has its PID printed in the log when the test was run"
print "and at the beginning of its Valgrind report."
print "Hint: you can search for the reports by Ctrl+F -> `=#`"
sys.stdout.flush()
return ret
# TODO(timurrrr): Split into a separate file.
class Memcheck(ValgrindTool):
"""Memcheck
Dynamic memory error detector for Linux & Mac
http://valgrind.org/info/tools.html#memcheck
"""
def __init__(self):
super(Memcheck, self).__init__()
self.RegisterOptionParserHook(Memcheck.ExtendOptionParser)
def ToolName(self):
return "memcheck"
def ExtendOptionParser(self, parser):
parser.add_option("--leak-check", "--leak_check", type="string",
default="yes", # --leak-check=yes is equivalent of =full
help="perform leak checking at the end of the run")
parser.add_option("", "--show_all_leaks", action="store_true",
default=False,
help="also show less blatant leaks")
parser.add_option("", "--track_origins", action="store_true",
default=False,
help="Show whence uninitialized bytes came. 30% slower.")
def ToolSpecificFlags(self):
ret = ["--gen-suppressions=all", "--demangle=no"]
ret += ["--leak-check=%s" % self._options.leak_check]
if self._options.show_all_leaks:
ret += ["--show-reachable=yes"]
else:
ret += ["--show-possibly-lost=no"]
if self._options.track_origins:
ret += ["--track-origins=yes"]
# TODO(glider): this is a temporary workaround for http://crbug.com/51716
# Let's see whether it helps.
if common.IsMac():
ret += ["--smc-check=all"]
return ret
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return memcheck_analyze.MemcheckAnalyzer(self._source_dir,
self._options.show_all_leaks,
use_gdb=use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-valgrind for the info on Memcheck/Valgrind")
return ret
class PinTool(BaseTool):
"""Abstract class for running PIN tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def PrepareForTest(self):
pass
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def ToolCommand(self):
"""Get the PIN command to run."""
# Construct the PIN command.
pin_cmd = os.getenv("PIN_COMMAND")
if not pin_cmd:
raise RuntimeError, "Please set PIN_COMMAND environment variable " \
"with the path to pin.exe"
proc = pin_cmd.split(" ")
proc += self.ToolSpecificFlags()
# The PIN command is constructed.
# PIN requires -- to separate PIN flags from the executable name.
# self._args begins with the exe to be run.
proc += ["--"]
proc += self._args
return proc
class ThreadSanitizerBase(object):
"""ThreadSanitizer
Dynamic data race detector for Linux, Mac and Windows.
http://code.google.com/p/data-race-test/wiki/ThreadSanitizer
Since TSan works on both Valgrind (Linux, Mac) and PIN (Windows), we need
to have multiple inheritance
"""
INFO_MESSAGE="Please see http://dev.chromium.org/developers/how-tos/" \
"using-valgrind/threadsanitizer for the info on " \
"ThreadSanitizer"
def __init__(self):
super(ThreadSanitizerBase, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerBase.ExtendOptionParser)
def ToolName(self):
return "tsan"
def UseXML(self):
return False
def SelfContained(self):
return True
def ExtendOptionParser(self, parser):
parser.add_option("", "--hybrid", default="no",
dest="hybrid",
help="Finds more data races, may give false positive "
"reports unless the code is annotated")
parser.add_option("", "--announce-threads", default="yes",
dest="announce_threads",
help="Show the the stack traces of thread creation")
parser.add_option("", "--free-is-write", default="no",
dest="free_is_write",
help="Treat free()/operator delete as memory write. "
"This helps finding more data races, but (currently) "
"this may give false positive reports on std::string "
"internals, see http://code.google.com/p/data-race-test"
"/issues/detail?id=40")
def EvalBoolFlag(self, flag_value):
if (flag_value in ["1", "true", "yes"]):
return True
elif (flag_value in ["0", "false", "no"]):
return False
raise RuntimeError, "Can't parse flag value (%s)" % flag_value
def ToolSpecificFlags(self):
ret = []
ignore_files = ["ignores.txt"]
for platform_suffix in common.PlatformNames():
ignore_files.append("ignores_%s.txt" % platform_suffix)
for ignore_file in ignore_files:
fullname = os.path.join(self._source_dir,
"tools", "valgrind", "tsan", ignore_file)
if os.path.exists(fullname):
fullname = common.NormalizeWindowsPath(fullname)
ret += ["--ignore=%s" % fullname]
# This should shorten filepaths for local builds.
ret += ["--file-prefix-to-cut=%s/" % self._source_dir]
# This should shorten filepaths on bots.
ret += ["--file-prefix-to-cut=build/src/"]
ret += ["--file-prefix-to-cut=out/Release/../../"]
# This should shorten filepaths for functions intercepted in TSan.
ret += ["--file-prefix-to-cut=scripts/tsan/tsan/"]
ret += ["--file-prefix-to-cut=src/tsan/tsan/"]
ret += ["--gen-suppressions=true"]
if self.EvalBoolFlag(self._options.hybrid):
ret += ["--hybrid=yes"] # "no" is the default value for TSAN
if self.EvalBoolFlag(self._options.announce_threads):
ret += ["--announce-threads"]
if self.EvalBoolFlag(self._options.free_is_write):
ret += ["--free-is-write=yes"]
else:
ret += ["--free-is-write=no"]
# --show-pc flag is needed for parsing the error logs on Darwin.
if platform_suffix == 'mac':
ret += ["--show-pc=yes"]
ret += ["--show-pid=no"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
for bc in boring_callers:
ret += ["--cut_stack_below=%s" % bc]
return ret
class ThreadSanitizerPosix(ThreadSanitizerBase, ValgrindTool):
def ToolSpecificFlags(self):
proc = ThreadSanitizerBase.ToolSpecificFlags(self)
# The -v flag is needed for printing the list of used suppressions and
# obtaining addresses for loaded shared libraries on Mac.
proc += ["-v"]
return proc
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return tsan_analyze.TsanAnalyzer(use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info(self.INFO_MESSAGE)
return ret
class ThreadSanitizerWindows(ThreadSanitizerBase, PinTool):
def __init__(self):
super(ThreadSanitizerWindows, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerWindows.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to TSan suppression file")
def ToolSpecificFlags(self):
add_env = {
"CHROME_ALLOCATOR" : "WINHEAP",
}
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
proc = ThreadSanitizerBase.ToolSpecificFlags(self)
# On PIN, ThreadSanitizer has its own suppression mechanism
# and --log-file flag which work exactly on Valgrind.
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
suppression_file = common.NormalizeWindowsPath(suppression_file)
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + "/tsan.%p"
proc += ["--log-file=" + common.NormalizeWindowsPath(logfilename)]
# TODO(timurrrr): Add flags for Valgrind trace children analog when we
# start running complex tests (e.g. UI) under TSan/Win.
return proc
def Analyze(self, check_sanity=False):
filenames = glob.glob(self.log_dir + "/tsan.*")
analyzer = tsan_analyze.TsanAnalyzer()
ret = analyzer.Report(filenames, None, check_sanity)
if ret != 0:
logging.info(self.INFO_MESSAGE)
return ret
class DrMemory(BaseTool):
"""Dr.Memory
Dynamic memory error detector for Windows.
http://dev.chromium.org/developers/how-tos/using-drmemory
It is not very mature at the moment, some things might not work properly.
"""
def __init__(self, full_mode, pattern_mode):
super(DrMemory, self).__init__()
self.full_mode = full_mode
self.pattern_mode = pattern_mode
self.RegisterOptionParserHook(DrMemory.ExtendOptionParser)
def ToolName(self):
return "drmemory"
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a drmemory suppression file")
parser.add_option("", "--follow_python", action="store_true",
default=False, dest="follow_python",
help="Monitor python child processes. If off, neither "
"python children nor any children of python children "
"will be monitored.")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running Dr. Memory directly on the harness")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running valgrind "
"directly.")
parser.add_option("", "--use_debug", action="store_true",
default=False, dest="use_debug",
help="Run Dr. Memory debug build")
parser.add_option("", "--trace_children", action="store_true",
default=True,
help="TODO: default value differs from Valgrind")
def ToolCommand(self):
"""Get the tool command to run."""
# WINHEAP is what Dr. Memory supports as there are issues w/ both
# jemalloc (http://code.google.com/p/drmemory/issues/detail?id=320) and
# tcmalloc (http://code.google.com/p/drmemory/issues/detail?id=314)
add_env = {
"CHROME_ALLOCATOR" : "WINHEAP",
"JSIMD_FORCEMMX" : "1", # http://code.google.com/p/drmemory/issues/detail?id=540
}
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
drmem_cmd = os.getenv("DRMEMORY_COMMAND")
if not drmem_cmd:
raise RuntimeError, "Please set DRMEMORY_COMMAND environment variable " \
"with the path to drmemory.exe"
proc = drmem_cmd.split(" ")
# By default, don't run python (this will exclude python's children as well)
# to reduce runtime. We're not really interested in spending time finding
# bugs in the python implementation.
# With file-based config we must update the file every time, and
# it will affect simultaneous drmem uses by this user. While file-based
# config has many advantages, here we may want this-instance-only
# (http://code.google.com/p/drmemory/issues/detail?id=334).
drconfig_cmd = [ proc[0].replace("drmemory.exe", "drconfig.exe") ]
drconfig_cmd += ["-quiet"] # suppress errors about no 64-bit libs
run_drconfig = True
if self._options.follow_python:
logging.info("Following python children")
# -unreg fails if not already registered so query for that first
query_cmd = drconfig_cmd + ["-isreg", "python.exe"]
query_proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE,
shell=True)
(query_out, query_err) = query_proc.communicate()
if re.search("exe not registered", query_out):
run_drconfig = False # all set
else:
drconfig_cmd += ["-unreg", "python.exe"]
else:
logging.info("Excluding python children")
drconfig_cmd += ["-reg", "python.exe", "-norun"]
if run_drconfig:
drconfig_retcode = common.RunSubprocess(drconfig_cmd, self._timeout)
if drconfig_retcode:
logging.error("Configuring whether to follow python children failed " \
"with %d.", drconfig_retcode)
raise RuntimeError, "Configuring python children failed "
suppression_count = 0
supp_files = self._options.suppressions
if self.full_mode:
supp_files += [s.replace(".txt", "_full.txt") for s in supp_files]
for suppression_file in supp_files:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["-suppress", common.NormalizeWindowsPath(suppression_file)]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
# Un-comment to dump Dr.Memory events on error
#proc += ["-dr_ops", "-dumpcore_mask", "-dr_ops", "0x8bff"]
# Un-comment and comment next line to debug Dr.Memory
#proc += ["-dr_ops", "-no_hide"]
#proc += ["-dr_ops", "-msgbox_mask", "-dr_ops", "15"]
#Proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "15"]
# Ensure we see messages about Dr. Memory crashing!
proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "12"]
if self._options.use_debug:
proc += ["-debug"]
proc += ["-logdir", common.NormalizeWindowsPath(self.log_dir)]
if self.log_parent_dir:
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
symcache_dir = os.path.join(self.log_parent_dir, "drmemory.symcache")
elif self._options.build_dir:
# The other case is only possible with -t cmdline.
# Anyways, if we omit -symcache_dir the -logdir's value is used which
# should be fine.
symcache_dir = os.path.join(self._options.build_dir, "drmemory.symcache")
if symcache_dir:
if not os.path.exists(symcache_dir):
try:
os.mkdir(symcache_dir)
except OSError:
logging.warning("Can't create symcache dir?")
if os.path.exists(symcache_dir):
proc += ["-symcache_dir", common.NormalizeWindowsPath(symcache_dir)]
# Use -no_summary to suppress DrMemory's summary and init-time
# notifications. We generate our own with drmemory_analyze.py.
proc += ["-batch", "-no_summary"]
# Un-comment to disable interleaved output. Will also suppress error
# messages normally printed to stderr.
#proc += ["-quiet", "-no_results_to_stderr"]
proc += ["-callstack_max_frames", "40"]
# disable leak scan for now
proc += ["-no_count_leaks", "-no_leak_scan"]
# make callstacks easier to read
proc += ["-callstack_srcfile_prefix",
"build\\src,chromium\\src,crt_build\\self_x86"]
proc += ["-callstack_modname_hide",
"*drmemory*,chrome.dll"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
proc += ["-callstack_truncate_below", ",".join(boring_callers)]
if self.pattern_mode:
proc += ["-pattern", "0xf1fd", "-no_count_leaks", "-redzone_size", "0x20"]
elif not self.full_mode:
proc += ["-light"]
proc += self._tool_flags
# DrM i#850/851: The new -callstack_use_top_fp_selectively has bugs.
proc += ["-no_callstack_use_top_fp_selectively"]
# Dr.Memory requires -- to separate tool flags from the executable name.
proc += ["--"]
if self._options.indirect or self._options.indirect_webkit_layout:
# TODO(timurrrr): reuse for TSan on Windows
wrapper_path = os.path.join(self._source_dir,
"tools", "valgrind", "browser_wrapper_win.py")
wrapper = " ".join(["python", wrapper_path] + proc)
self.CreateBrowserWrapper(wrapper)
logging.info("browser wrapper = " + " ".join(proc))
if self._options.indirect_webkit_layout:
proc = self._args
# Layout tests want forward slashes.
wrapper = wrapper.replace('\\', '/')
proc += ["--wrapper", wrapper]
return proc
else:
proc = []
# Note that self._args begins with the name of the exe to be run.
self._args[0] = common.NormalizeWindowsPath(self._args[0])
proc += self._args
return proc
def CreateBrowserWrapper(self, command):
os.putenv("BROWSER_WRAPPER", command)
def Analyze(self, check_sanity=False):
# Use one analyzer for all the log files to avoid printing duplicate reports
#
# TODO(timurrrr): unify this with Valgrind and other tools when we have
# http://code.google.com/p/drmemory/issues/detail?id=684
analyzer = drmemory_analyze.DrMemoryAnalyzer()
ret = 0
if not self._options.indirect and not self._options.indirect_webkit_layout:
filenames = glob.glob(self.log_dir + "/*/results.txt")
ret = analyzer.Report(filenames, None, check_sanity)
else:
testcases = glob.glob(self.log_dir + "/testcase.*.logs")
# If we have browser wrapper, the per-test logdirs are named as
# "testcase.wrapper_PID.name".
# Let's extract the list of wrapper_PIDs and name it ppids.
# NOTE: ppids may contain '_', i.e. they are not ints!
ppids = set([f.split(".")[-2] for f in testcases])
for ppid in ppids:
testcase_name = None
try:
f = open("%s/testcase.%s.name" % (self.log_dir, ppid))
testcase_name = f.read().strip()
f.close()
except IOError:
pass
print "====================================================="
print " Below is the report for drmemory wrapper PID=%s." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
# TODO(timurrrr): hm, the PID line is suppressed on Windows...
print " You can find the corresponding test"
print " by searching the above log for 'PID=%s'" % ppid
sys.stdout.flush()
ppid_filenames = glob.glob("%s/testcase.%s.logs/*/results.txt" %
(self.log_dir, ppid))
ret |= analyzer.Report(ppid_filenames, testcase_name, False)
print "====================================================="
sys.stdout.flush()
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-drmemory for the info on Dr. Memory")
return ret
# RaceVerifier support. See
# http://code.google.com/p/data-race-test/wiki/RaceVerifier for more details.
class ThreadSanitizerRV1Analyzer(tsan_analyze.TsanAnalyzer):
""" TsanAnalyzer that saves race reports to a file. """
TMP_FILE = "rvlog.tmp"
def __init__(self, source_dir, use_gdb):
super(ThreadSanitizerRV1Analyzer, self).__init__(use_gdb)
self.out = open(self.TMP_FILE, "w")
def Report(self, files, testcase, check_sanity=False):
reports = self.GetReports(files)
for report in reports:
print >>self.out, report
if len(reports) > 0:
logging.info("RaceVerifier pass 1 of 2, found %i reports" % len(reports))
return -1
return 0
def CloseOutputFile(self):
self.out.close()
class ThreadSanitizerRV1Mixin(object):
"""RaceVerifier first pass.
Runs ThreadSanitizer as usual, but hides race reports and collects them in a
temporary file"""
def __init__(self):
super(ThreadSanitizerRV1Mixin, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerRV1Mixin.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.set_defaults(hybrid="yes")
def CreateAnalyzer(self):
use_gdb = common.IsMac()
self.analyzer = ThreadSanitizerRV1Analyzer(self._source_dir, use_gdb)
return self.analyzer
def Cleanup(self):
super(ThreadSanitizerRV1Mixin, self).Cleanup()
self.analyzer.CloseOutputFile()
class ThreadSanitizerRV2Mixin(object):
"""RaceVerifier second pass."""
def __init__(self):
super(ThreadSanitizerRV2Mixin, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerRV2Mixin.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.add_option("", "--race-verifier-sleep-ms",
dest="race_verifier_sleep_ms", default=10,
help="duration of RaceVerifier delays")
def ToolSpecificFlags(self):
proc = super(ThreadSanitizerRV2Mixin, self).ToolSpecificFlags()
proc += ['--race-verifier=%s' % ThreadSanitizerRV1Analyzer.TMP_FILE,
'--race-verifier-sleep-ms=%d' %
int(self._options.race_verifier_sleep_ms)]
return proc
def Cleanup(self):
super(ThreadSanitizerRV2Mixin, self).Cleanup()
os.unlink(ThreadSanitizerRV1Analyzer.TMP_FILE)
class ThreadSanitizerRV1Posix(ThreadSanitizerRV1Mixin, ThreadSanitizerPosix):
pass
class ThreadSanitizerRV2Posix(ThreadSanitizerRV2Mixin, ThreadSanitizerPosix):
pass
class ThreadSanitizerRV1Windows(ThreadSanitizerRV1Mixin,
ThreadSanitizerWindows):
pass
class ThreadSanitizerRV2Windows(ThreadSanitizerRV2Mixin,
ThreadSanitizerWindows):
pass
class RaceVerifier(object):
"""Runs tests under RaceVerifier/Valgrind."""
MORE_INFO_URL = "http://code.google.com/p/data-race-test/wiki/RaceVerifier"
def RV1Factory(self):
if common.IsWindows():
return ThreadSanitizerRV1Windows()
else:
return ThreadSanitizerRV1Posix()
def RV2Factory(self):
if common.IsWindows():
return ThreadSanitizerRV2Windows()
else:
return ThreadSanitizerRV2Posix()
def ToolName(self):
return "tsan"
def Main(self, args, check_sanity, min_runtime_in_seconds):
logging.info("Running a TSan + RaceVerifier test. For more information, " +
"see " + self.MORE_INFO_URL)
cmd1 = self.RV1Factory()
ret = cmd1.Main(args, check_sanity, min_runtime_in_seconds)
# Verify race reports, if there are any.
if ret == -1:
logging.info("Starting pass 2 of 2. Running the same binary in " +
"RaceVerifier mode to confirm possible race reports.")
logging.info("For more information, see " + self.MORE_INFO_URL)
cmd2 = self.RV2Factory()
ret = cmd2.Main(args, check_sanity, min_runtime_in_seconds)
else:
logging.info("No reports, skipping RaceVerifier second pass")
logging.info("Please see " + self.MORE_INFO_URL + " for more information " +
"on RaceVerifier")
return ret
def Run(self, args, module, min_runtime_in_seconds=0):
return self.Main(args, False, min_runtime_in_seconds)
class EmbeddedTool(BaseTool):
"""Abstract class for tools embedded directly into the test binary.
"""
# TODO(glider): need to override Execute() and support process chaining here.
def ToolCommand(self):
# In the simplest case just the args of the script.
return self._args
class Asan(EmbeddedTool):
"""AddressSanitizer, a memory error detector.
More information at
http://dev.chromium.org/developers/testing/addresssanitizer
"""
def __init__(self):
super(Asan, self).__init__()
self._timeout = 1200
if common.IsMac():
self._env["DYLD_NO_PIE"] = "1"
def ToolName(self):
return "asan"
def ToolCommand(self):
# TODO(glider): use pipes instead of the ugly wrapper here once they
# are supported.
procs = [os.path.join(self._source_dir, "tools", "valgrind",
"asan", "asan_wrapper.sh")]
procs.extend(self._args)
return procs
def Analyze(sels, unused_check_sanity):
return 0
class ToolFactory:
def Create(self, tool_name):
if tool_name == "memcheck":
return Memcheck()
if tool_name == "tsan":
if common.IsWindows():
return ThreadSanitizerWindows()
else:
return ThreadSanitizerPosix()
if tool_name == "drmemory" or tool_name == "drmemory_light":
# TODO(timurrrr): remove support for "drmemory" when buildbots are
# switched to drmemory_light OR make drmemory==drmemory_full the default
# mode when the tool is mature enough.
return DrMemory(False, False)
if tool_name == "drmemory_full":
return DrMemory(True, False)
if tool_name == "drmemory_pattern":
return DrMemory(False, True)
if tool_name == "tsan_rv":
return RaceVerifier()
if tool_name == "asan":
return Asan()
try:
platform_name = common.PlatformNames()[0]
except common.NotImplementedError:
platform_name = sys.platform + "(Unknown)"
raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
platform_name)
def CreateTool(tool):
return ToolFactory().Create(tool)
| bsd-3-clause |
QuantScientist/Deep-Learning-Boot-Camp | Kaggle-PyTorch/PyTorch-Ensembler/nnmodels/simplenet.py | 1 | 3522 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import math
use_gpu = torch.cuda.is_available()
# class SimpleNet(nn.Module):
# def __init__(self, num_classes=1, n_dim=3):
# super(SimpleNet, self).__init__()
# self.conv1 = nn.Conv2d(n_dim, 32, 3, stride=1)
# self.conv2 = nn.Conv2d(32, 32, kernel_size=3)
#
# self.conv3 = nn.Conv2d(32, 64, kernel_size=3)
# self.conv4 = nn.Conv2d(64, 64, kernel_size=3)
# self.dense1 = nn.Linear(179776, out_features=512)
# self.dense1_bn = nn.BatchNorm1d(512)
# self.dense2 = nn.Linear(512, (num_classes))
#
# def forward(self, x):
# x = F.relu(self.conv1(x))
# x = F.relu(F.dropout(F.max_pool2d(self.conv2(x), 2), 0.25))
# x = F.relu(self.conv3(x))
# x = F.relu(F.dropout(F.max_pool2d(self.conv4(x), 2), 0.25))
# x = x.view(x.size(0), -1)
# # print (x.data.shape)
# x = F.relu(self.dense1_bn(self.dense1(x)))
# x = x.view(x.size(0), -1)
# # print (x.data.shape)
# x = self.dense2(x)
#
# return x
dropout = torch.nn.Dropout(p=0.30)
relu=torch.nn.LeakyReLU()
pool = nn.MaxPool2d(2, 2)
class ConvRes(nn.Module):
def __init__(self, insize, outsize):
super(ConvRes, self).__init__()
drate = .3
self.math = nn.Sequential(
nn.BatchNorm2d(insize),
nn.Dropout(drate),
torch.nn.Conv2d(insize, outsize, kernel_size=2, padding=2),
nn.PReLU(),
)
def forward(self, x):
return self.math(x)
class ConvCNN(nn.Module):
def __init__(self, insize, outsize, kernel_size=7, padding=2, pool=2, avg=True):
super(ConvCNN, self).__init__()
self.avg = avg
self.math = torch.nn.Sequential(
torch.nn.Conv2d(insize, outsize, kernel_size=kernel_size, padding=padding),
torch.nn.BatchNorm2d(outsize),
torch.nn.LeakyReLU(),
torch.nn.MaxPool2d(pool, pool),
)
self.avgpool = torch.nn.AvgPool2d(pool, pool)
def forward(self, x):
x = self.math(x)
if self.avg is True:
x = self.avgpool(x)
return x
class SimpleNet(nn.Module):
def __init__(self,num_classes, n_dim):
super(SimpleNet, self).__init__()
self.num_classes=num_classes
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cnn1 = ConvCNN (n_dim,32, kernel_size=7, pool=4, avg=False)
self.cnn2 = ConvCNN (32,32, kernel_size=5, pool=2, avg=True)
self.cnn3 = ConvCNN (32,32, kernel_size=5, pool=2, avg=True)
self.res1 = ConvRes (32,64)
self.features = nn.Sequential(
self.cnn1, dropout,
self.cnn2,
self.cnn3,
self.res1,
)
self.classifier = torch.nn.Sequential(
nn.Linear(1024, (num_classes)),
)
self.sig=nn.Sigmoid()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
# print (x.data.shape)
x = self.classifier(x)
if (self.num_classes == 1):
x = self.sig(x)
return x
# return F.log_softmax(x)
def simpleXX_generic(num_classes, imgDim):
# depth, num_classes = 1, widen_factor = 1, dropRate = 0.0
model = SimpleNet(num_classes=num_classes, n_dim=imgDim) # 56
return model
| mit |
showerst/openstates | openstates/ma/bills.py | 1 | 5391 | import re
import time
import itertools
from datetime import datetime
import lxml.html
from billy.scrape.bills import BillScraper, Bill
from .actions import Categorizer
class MABillScraper(BillScraper):
jurisdiction = 'ma'
categorizer = Categorizer()
def __init__(self, *args, **kwargs):
super(MABillScraper, self).__init__(*args, **kwargs)
# forcing these values so that 500s come back as skipped bills
# self.retry_attempts = 0
self.raise_errors = False
def scrape(self, chamber, session):
# for the chamber of the action
chamber_map = {'House': 'lower', 'Senate': 'upper', 'Joint': 'joint',
'Governor': 'executive'}
session_slug = session[:-2]
chamber_slug = 'House' if chamber == 'lower' else 'Senate'
# keep track of how many we've had to skip
skipped = 0
for n in itertools.count(1):
bill_id = '%s%d' % (chamber_slug[0], n)
bill_url = 'http://www.malegislature.gov/Bills/%s/%s/%s' % (
session_slug, chamber_slug, bill_id)
# lets assume if 10 bills are missing we're done
if skipped == 10:
break
html = self.get(bill_url, verify=False).text
if 'Unable to find the Bill' in html:
self.warning('skipping %s' % bill_url)
skipped += 1
continue
# sometimes the site breaks, missing vital data
if 'billShortDesc' not in html:
self.warning('truncated page on %s' % bill_url)
time.sleep(1)
html = self.get(bill_url, verify=False).text
if 'billShortDesc' not in html:
self.warning('skipping %s' % bill_url)
skipped += 1
continue
else:
skipped = 0
else:
skipped = 0
doc = lxml.html.fromstring(html)
doc.make_links_absolute('http://www.malegislature.gov/')
title = doc.xpath('//h2/span/text()')[0].strip()
desc = doc.xpath('//p[@class="billShortDesc"]/text()')[0]
#for resoltions that do not always have a typical title
if (title == ''):
title = doc.xpath('//*[@id="billDetail"]/div[2]/p')[0].text_content().strip()
# create bill
bill = Bill(session, chamber, bill_id, title, summary=desc)
bill.add_source(bill_url)
# actions
for act_row in doc.xpath('//tbody[@class="bgwht"]/tr'):
date = act_row.xpath('./td[@headers="bDate"]/text()')[0]
date = datetime.strptime(date, "%m/%d/%Y")
actor_txt = act_row.xpath('./td[@headers="bBranch"]')[0].text_content().strip()
if actor_txt:
actor = chamber_map[actor_txt]
action = act_row.xpath('./td[@headers="bAction"]')[0].text_content().strip()
# from here (MABillScraper namespace) we import categorizer from actions.py which
# imports categorizer from billy.scrape.actions.BaseCategorizer
attrs = self.categorizer.categorize(action)
bill.add_action(actor, action, date, **attrs)
# I tried to, as I was finding the sponsors, detect whether a
# sponsor was already known. One has to do this because an author
# is listed in the "Sponsors:" section and then the same person
# will be listed with others in the "Petitioners:" section. We are
# guessing that "Sponsors" are authors and "Petitioners" are
# co-authors. Does this make sense?
sponsors = dict((a.get('href'), a.text) for a in
doc.xpath('//p[@class="billReferral"]/a'))
petitioners = dict((a.get('href'), a.text) for a in
doc.xpath('//div[@id="billSummary"]/p[1]/a'))
if len(sponsors) == 0:
spons = doc.xpath('//p[@class="billReferral"]')[0].text_content()
spons = spons.strip()
spons = spons.split("\n")
cspons = []
for s in spons:
if s and s.strip() != "":
cspons.append(s)
sponsors = dict((s, s) for s in cspons)
# remove sponsors from petitioners
for k in sponsors:
petitioners.pop(k, None)
for sponsor in sponsors.values():
if sponsor == 'NONE':
continue
if sponsor is None:
continue
bill.add_sponsor('primary', sponsor)
for petitioner in petitioners.values():
if sponsor == 'NONE':
continue
bill.add_sponsor('cosponsor', petitioner)
bill_text_url = doc.xpath(
'//a[contains(@href, "/Document/Bill/{}/")]/@href'.
format(session_slug))
if bill_text_url:
assert bill_text_url[0].endswith('.pdf'), "Handle other mimetypes"
bill.add_version('Current Text', bill_text_url[0],
mimetype='application/pdf')
self.save_bill(bill)
| gpl-3.0 |
peterjoel/servo | tests/wpt/web-platform-tests/tools/third_party/html5lib/html5lib/tests/test_encoding.py | 30 | 4801 | from __future__ import absolute_import, division, unicode_literals
import os
import pytest
from .support import get_data_files, test_dir, errorMessage, TestData as _TestData
from html5lib import HTMLParser, _inputstream
def test_basic_prescan_length():
data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode('utf-8')
pad = 1024 - len(data) + 1
data = data.replace(b"-a-", b"-" + (b"a" * pad) + b"-")
assert len(data) == 1024 # Sanity
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
assert 'utf-8' == stream.charEncoding[0].name
def test_parser_reparse():
data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode('utf-8')
pad = 10240 - len(data) + 1
data = data.replace(b"-a-", b"-" + (b"a" * pad) + b"-")
assert len(data) == 10240 # Sanity
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
assert 'windows-1252' == stream.charEncoding[0].name
p = HTMLParser(namespaceHTMLElements=False)
doc = p.parse(data, useChardet=False)
assert 'utf-8' == p.documentEncoding
assert doc.find(".//title").text == "Caf\u00E9"
@pytest.mark.parametrize("expected,data,kwargs", [
("utf-16le", b"\xFF\xFE", {"override_encoding": "iso-8859-2"}),
("utf-16be", b"\xFE\xFF", {"override_encoding": "iso-8859-2"}),
("utf-8", b"\xEF\xBB\xBF", {"override_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"override_encoding": "iso-8859-2", "transport_encoding": "iso-8859-3"}),
("iso-8859-2", b"<meta charset=iso-8859-3>", {"transport_encoding": "iso-8859-2"}),
("iso-8859-2", b"<meta charset=iso-8859-2>", {"same_origin_parent_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "iso-8859-2", "likely_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16be", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16le", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"likely_encoding": "iso-8859-2", "default_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"default_encoding": "iso-8859-2"}),
("windows-1252", b"", {"default_encoding": "totally-bogus-string"}),
("windows-1252", b"", {}),
])
def test_parser_args(expected, data, kwargs):
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False, **kwargs)
assert expected == stream.charEncoding[0].name
p = HTMLParser()
p.parse(data, useChardet=False, **kwargs)
assert expected == p.documentEncoding
@pytest.mark.parametrize("kwargs", [
{"override_encoding": "iso-8859-2"},
{"override_encoding": None},
{"transport_encoding": "iso-8859-2"},
{"transport_encoding": None},
{"same_origin_parent_encoding": "iso-8859-2"},
{"same_origin_parent_encoding": None},
{"likely_encoding": "iso-8859-2"},
{"likely_encoding": None},
{"default_encoding": "iso-8859-2"},
{"default_encoding": None},
{"foo_encoding": "iso-8859-2"},
{"foo_encoding": None},
])
def test_parser_args_raises(kwargs):
with pytest.raises(TypeError) as exc_info:
p = HTMLParser()
p.parse("", useChardet=False, **kwargs)
assert exc_info.value.args[0].startswith("Cannot set an encoding with a unicode input")
def runParserEncodingTest(data, encoding):
p = HTMLParser()
assert p.documentEncoding is None
p.parse(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
assert encoding == p.documentEncoding, errorMessage(data, encoding, p.documentEncoding)
def runPreScanEncodingTest(data, encoding):
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
# Very crude way to ignore irrelevant tests
if len(data) > stream.numBytesMeta:
return
assert encoding == stream.charEncoding[0].name, errorMessage(data, encoding, stream.charEncoding[0].name)
def test_encoding():
for filename in get_data_files("encoding"):
tests = _TestData(filename, b"data", encoding=None)
for test in tests:
yield (runParserEncodingTest, test[b'data'], test[b'encoding'])
yield (runPreScanEncodingTest, test[b'data'], test[b'encoding'])
# pylint:disable=wrong-import-position
try:
import chardet # noqa
except ImportError:
print("chardet not found, skipping chardet tests")
else:
def test_chardet():
with open(os.path.join(test_dir, "encoding", "chardet", "test_big5.txt"), "rb") as fp:
encoding = _inputstream.HTMLInputStream(fp.read()).charEncoding
assert encoding[0].name == "big5"
# pylint:enable=wrong-import-position
| mpl-2.0 |
Novasoft-India/OperERP-AM-Motors | openerp/tools/misc.py | 16 | 39088 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#.apidoc title: Utilities: tools.misc
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import zipfile
from collections import defaultdict
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
def find_in_path(name):
try:
return which(name)
except IOError:
return None
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
return None
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (prog,) + args
return subprocess.call(args2)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis ([email protected])
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
#----------------------------------------------------------
# SMS
#----------------------------------------------------------
# text must be latin-1 encoded
def sms_send(user, password, api_id, text, to):
import urllib
url = "http://api.urlsms.com/SendSMS.aspx"
#url = "http://196.7.150.220/http/sendmsg"
params = urllib.urlencode({'UserID': user, 'Password': password, 'SenderID': api_id, 'MsgText': text, 'RecipientMobileNo':to})
urllib.urlopen(url+"?"+params)
# FIXME: Use the logger if there is an error
return True
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'ab_RU': u'Abkhazian / аҧсуа',
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BS': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_CA': u'English (CA)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_HN': u'Spanish (HN) / Español (HN)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_NI': u'Spanish (NI) / Español (NI)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PR': u'Spanish (PR) / Español (PR)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_SV': u'Spanish (SV) / Español (SV)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ',
'ja_JP': u'Japanese / 日本語',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'ml_IN': u'Malayalam / മലയാളം',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Flemish (BE) / Vlaams (BE)',
'oc_FR': u'Occitan (FR, post 1500) / Occitan',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'si_LK': u'Sinhalese / සිංහල',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'ur_PK': u'Urdu / اردو',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
'tlh_TLH': u'Klingon',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def extract_zip_file(zip_file, outdirectory):
zf = zipfile.ZipFile(zip_file, 'r')
out = outdirectory
for path in zf.namelist():
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
if not tgt.endswith(os.sep):
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vetu11/piloco | telegram/inlinequeryresultcontact.py | 1 | 3739 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultContact"""
from telegram import InlineQueryResult, InlineKeyboardMarkup, InputMessageContent
class InlineQueryResultContact(InlineQueryResult):
"""Represents a contact with a phone number. By default, this contact will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the contact.
Attributes:
phone_number (str): Contact's phone number.
first_name (str): Contact's first name.
last_name (Optional[str]): Contact's last name.
reply_markup (Optional[:class:`telegram.InlineKeyboardMarkup`]): Inline keyboard attached
to the message.
input_message_content (Optional[:class:`telegram.InputMessageContent`]): Content of the
message to be sent instead of the contact.
thumb_url (Optional[str]): Url of the thumbnail for the result.
thumb_width (Optional[int]): Thumbnail width.
thumb_height (Optional[int]): Thumbnail height.
Args:
id (str):
phone_number (str):
first_name (str):
last_name (Optional[str]):
reply_markup (Optional[:class:`telegram.InlineKeyboardMarkup`]):
input_message_content (Optional[:class:`telegram.InputMessageContent`]):
thumb_url (Optional[str]): Url of the thumbnail for the result.
thumb_width (Optional[int]):
thumb_height (Optional[int]):
**kwargs (dict): Arbitrary keyword arguments.
"""
def __init__(self,
id,
phone_number,
first_name,
last_name=None,
reply_markup=None,
input_message_content=None,
thumb_url=None,
thumb_width=None,
thumb_height=None,
**kwargs):
# Required
super(InlineQueryResultContact, self).__init__('contact', id)
self.phone_number = phone_number
self.first_name = first_name
# Optionals
if last_name:
self.last_name = last_name
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
if thumb_url:
self.thumb_url = thumb_url
if thumb_width:
self.thumb_width = thumb_width
if thumb_height:
self.thumb_height = thumb_height
@staticmethod
def de_json(data, bot):
data = super(InlineQueryResultContact, InlineQueryResultContact).de_json(data, bot)
data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'), bot)
data['input_message_content'] = InputMessageContent.de_json(
data.get('input_message_content'), bot)
return InlineQueryResultContact(**data)
| gpl-3.0 |
ddurst/zamboni | mkt/features/tests/test_utils_.py | 6 | 2321 | from django.test.client import RequestFactory
import mock
from nose.tools import eq_
import mkt.site.tests
from mkt.constants.features import FeatureProfile
from mkt.features.utils import load_feature_profile
class TestLoadFeatureProfile(mkt.site.tests.TestCase):
def setUp(self):
super(TestLoadFeatureProfile, self).setUp()
self.profile = FeatureProfile(apps=True)
self.signature = self.profile.to_signature()
def test_does_nothing_on_desktop(self):
request = RequestFactory().get('/?dev=desktop&pro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile, None)
def test_does_nothing_without_dev_param(self):
request = RequestFactory().get('/?pro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile, None)
request = RequestFactory().get(
'/?device=mobilepro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile, None)
def test_does_nothing_without_profile_signature(self):
request = RequestFactory().get('/?dev=firefoxos')
load_feature_profile(request)
eq_(request.feature_profile, None)
def test_does_nothing_if_invalid_profile_signature_is_passed(self):
request = RequestFactory().get('/?dev=firefoxos&pro=whatever')
load_feature_profile(request)
eq_(request.feature_profile, None)
def test_works(self):
request = RequestFactory().get(
'/?dev=firefoxos&pro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile.to_list(), self.profile.to_list())
@mock.patch('mkt.features.utils.FeatureProfile.from_signature')
def test_caching_on_request_property(self, from_signature_mock):
fake_feature_profile = object()
from_signature_mock.return_value = fake_feature_profile
request = RequestFactory().get(
'/?dev=firefoxos&pro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile, fake_feature_profile)
from_signature_mock.return_value = None
load_feature_profile(request)
# Should not be None thanks to the property caching.
eq_(request.feature_profile, fake_feature_profile)
| bsd-3-clause |
RanadeepPolavarapu/kuma | vendor/packages/pygments/lexers/php.py | 72 | 9769 | # -*- coding: utf-8 -*-
"""
pygments.lexers.php
~~~~~~~~~~~~~~~~~~~
Lexers for PHP and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['ZephirLexer', 'PhpLexer']
class ZephirLexer(RegexLexer):
"""
For `Zephir language <http://zephir-lang.com/>`_ source code.
Zephir is a compiled high level language aimed
to the creation of C-extensions for PHP.
.. versionadded:: 2.0
"""
name = 'Zephir'
aliases = ['zephir']
filenames = ['*.zep']
zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
zephir_type = ['bit', 'bits', 'string']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
r'empty)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][\w\\]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_php_builtins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._php_builtins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<([\'"]?)(' + _ident_inner + r')\1\n.*?\n\s*\2;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)(' + _ident_inner + ')',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/@-]+', Operator),
(r'\?', Operator), # don't add to the charclass above!
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)(' + _ident_inner + ')',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait|yield|'
r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+' + _ident_inner + '\}', Name.Variable),
(r'\$+' + _ident_inner, Name.Variable),
(_ident_inner, Name.Other),
(r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float),
(r'\d+e[+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0x[a-f0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'0b[01]+', Number.Bin),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(_ident_inner, Name.Class, '#pop')
],
'functionname': [
(_ident_inner, Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
(r'\$' + _ident_inner + '(\[\S+?\]|->' + _ident_inner + ')?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._php_builtins import MODULES
for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
return rv
| mpl-2.0 |
windofthesky/ansible | contrib/inventory/spacewalk.py | 24 | 8569 | #!/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.9.2 and spacewalk 2.3
"""
#
# Author:: Jon Miller <[email protected]>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# Extended for support of multiple organizations and
# adding the "_meta" dictionary to --list output by
# Bernhard Lichtinger <[email protected]> 2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import time
from optparse import OptionParser
import subprocess
import ConfigParser
try:
import json
except:
import simplejson as json
base_dir = os.path.dirname(os.path.realpath(__file__))
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
INI_FILE = os.path.join(base_dir, "spacewalk.ini")
# Sanity check
if not os.path.exists(SW_REPORT):
print >> sys.stderr, 'Error: %s is required for operation.' % (SW_REPORT)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 2775)
# Helper functions
#------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = [ 'spacewalk_' + key for key in keys ]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
#------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
parser.add_option('-o', '--org', default=None, dest="org_number",
help="Limit to spacewalk organization number")
parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
help="Prefix the group name with the organization number")
(options, args) = parser.parse_args()
# read spacewalk.ini if present
#------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk' , 'cache_age'):
CACHE_AGE = config.get('spacewalk' , 'cache_age')
if not options.org_number and config.has_option('spacewalk' , 'org_number'):
options.org_number = config.get('spacewalk' , 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk' , 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk' , 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
#------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s system-groups": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
# List out the known server from Spacewalk
#------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() )
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s inventory": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
groups = {}
meta = { "hostvars" : {} }
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[ system['spacewalk_group_id'] ]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
prefix = org_id + "-"
group_name = prefix + system['spacewalk_group_name']
else:
group_name = system['spacewalk_group_name']
# if we are limited to one organization:
if options.org_number:
if org_id == options.org_number:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s system-groups-systems": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
if options.human:
for group, systems in groups.iteritems():
print '[%s]\n%s\n' % (group, '\n'.join(systems))
else:
final = dict( [ (k, list(s)) for k, s in groups.iteritems() ] )
final["_meta"] = meta
print json.dumps( final )
#print json.dumps(groups)
sys.exit(0)
# Return a details information concerning the spacewalk server
#------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['spacewalk_hostname'] == options.host:
host_details = system
break
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s inventory": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
if options.human:
print 'Host: %s' % options.host
for k, v in host_details.iteritems():
print ' %s: %s' % (k, '\n '.join(v.split(';')))
else:
print json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) )
sys.exit(0)
else:
parser.print_help()
sys.exit(1)
| gpl-3.0 |
failys/CAIRIS | cairis/test/test_TraceAPI.py | 1 | 4597 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
if (sys.version_info > (3,)):
from urllib.parse import quote
else:
from urllib import quote
from io import StringIO
import os
import jsonpickle
from cairis.core.Trace import Trace
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.mio.ModelImport import importModelFile
from cairis.tools.JsonConverter import json_deserialize
import os
__author__ = 'Shamal Faily'
class TraceAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
def setUp(self):
self.logger = logging.getLogger(__name__)
self.new_tr = Trace(
fObjt = 'requirement',
fName = 'Dataset policy',
tObjt = 'vulnerability',
tName = 'Certificate ubiquity',
lbl = 'supports')
self.new_tr_dict = {
'session_id' : 'test',
'object': self.new_tr
}
def test_get_trace_dimensions(self):
method = 'test_get_trace_dimensions'
url = '/api/traces/dimensions/requirement/is_from/1?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
fromDims = jsonpickle.decode(responseData)
self.assertIsNotNone(fromDims, 'No results after deserialization')
self.logger.info('[%s] Traces found: %d', method, len(fromDims))
self.assertEqual(len(fromDims),6)
url = '/api/traces/dimensions/requirement/is_from/0?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
toDims = jsonpickle.decode(responseData)
self.assertIsNotNone(toDims, 'No results after deserialization')
self.logger.info('[%s] Traces found: %d', method, len(toDims))
self.assertEqual(len(toDims),2)
def test_get_all(self):
method = 'test_get_traces'
url = '/api/traces/environment/Psychosis?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
trs = jsonpickle.decode(responseData)
self.assertIsNotNone(trs, 'No results after deserialization')
self.logger.info('[%s] Traces found: %d', method, len(trs))
self.assertEqual(len(trs),2)
def test_post(self):
method = 'test_post_new'
rv = self.app.post('/api/traces', content_type='application/json', data=jsonpickle.encode(self.new_tr_dict))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg, 'Dataset policy / Certificate ubiquity created')
def test_delete(self):
method = 'test_delete'
rv = self.app.delete('/api/traces/from_type/requirement/from_name/Dataset%20policy/to_type/vulnerability/to_name/Certificate%20ubiquity?session_id=test', content_type='application/json')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg, 'Dataset policy / Certificate ubiquity deleted')
| apache-2.0 |
sunilghai/avahi-llmnr | avahi-python/avahi-discover/SimpleGladeApp.py | 14 | 11794 | """
SimpleGladeApp.py
Module that provides an object oriented abstraction to pygtk and libglade.
Copyright (C) 2004 Sandino Flores Moreno
"""
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os
import sys
import re
import tokenize
import gtk
import gtk.glade
import weakref
import inspect
__version__ = "1.0"
__author__ = 'Sandino "tigrux" Flores-Moreno'
def bindtextdomain(app_name, locale_dir=None):
"""
Bind the domain represented by app_name to the locale directory locale_dir.
It has the effect of loading translations, enabling applications for different
languages.
app_name:
a domain to look for translations, tipically the name of an application.
locale_dir:
a directory with locales like locale_dir/lang_isocode/LC_MESSAGES/app_name.mo
If omitted or None, then the current binding for app_name is used.
"""
try:
import locale
import gettext
locale.setlocale(locale.LC_ALL, "")
gtk.glade.bindtextdomain(app_name, locale_dir)
gettext.install(app_name, locale_dir, unicode=1)
except (IOError,locale.Error), e:
print "Warning", app_name, e
__builtins__.__dict__["_"] = lambda x : x
class SimpleGladeApp:
def __init__(self, path, root=None, domain=None, **kwargs):
"""
Load a glade file specified by glade_filename, using root as
root widget and domain as the domain for translations.
If it receives extra named arguments (argname=value), then they are used
as attributes of the instance.
path:
path to a glade filename.
If glade_filename cannot be found, then it will be searched in the
same directory of the program (sys.argv[0])
root:
the name of the widget that is the root of the user interface,
usually a window or dialog (a top level widget).
If None or ommited, the full user interface is loaded.
domain:
A domain to use for loading translations.
If None or ommited, no translation is loaded.
**kwargs:
a dictionary representing the named extra arguments.
It is useful to set attributes of new instances, for example:
glade_app = SimpleGladeApp("ui.glade", foo="some value", bar="another value")
sets two attributes (foo and bar) to glade_app.
"""
if os.path.isfile(path):
self.glade_path = path
else:
glade_dir = os.path.dirname( sys.argv[0] )
self.glade_path = os.path.join(glade_dir, path)
for key, value in kwargs.items():
try:
setattr(self, key, weakref.proxy(value) )
except TypeError:
setattr(self, key, value)
self.glade = None
self.install_custom_handler(self.custom_handler)
self.glade = self.create_glade(self.glade_path, root, domain)
if root:
self.main_widget = self.get_widget(root)
else:
self.main_widget = None
self.normalize_names()
self.add_callbacks(self)
self.new()
def __repr__(self):
class_name = self.__class__.__name__
if self.main_widget:
root = gtk.Widget.get_name(self.main_widget)
repr = '%s(path="%s", root="%s")' % (class_name, self.glade_path, root)
else:
repr = '%s(path="%s")' % (class_name, self.glade_path)
return repr
def new(self):
"""
Method called when the user interface is loaded and ready to be used.
At this moment, the widgets are loaded and can be refered as self.widget_name
"""
pass
def add_callbacks(self, callbacks_proxy):
"""
It uses the methods of callbacks_proxy as callbacks.
The callbacks are specified by using:
Properties window -> Signals tab
in glade-2 (or any other gui designer like gazpacho).
Methods of classes inheriting from SimpleGladeApp are used as
callbacks automatically.
callbacks_proxy:
an instance with methods as code of callbacks.
It means it has methods like on_button1_clicked, on_entry1_activate, etc.
"""
self.glade.signal_autoconnect(callbacks_proxy)
def normalize_names(self):
"""
It is internally used to normalize the name of the widgets.
It means a widget named foo:vbox-dialog in glade
is refered self.vbox_dialog in the code.
It also sets a data "prefixes" with the list of
prefixes a widget has for each widget.
"""
for widget in self.get_widgets():
widget_name = gtk.Widget.get_name(widget)
prefixes_name_l = widget_name.split(":")
prefixes = prefixes_name_l[ : -1]
widget_api_name = prefixes_name_l[-1]
widget_api_name = "_".join( re.findall(tokenize.Name, widget_api_name) )
gtk.Widget.set_name(widget, widget_api_name)
if hasattr(self, widget_api_name):
raise AttributeError("instance %s already has an attribute %s" % (self,widget_api_name))
else:
setattr(self, widget_api_name, widget)
if prefixes:
gtk.Widget.set_data(widget, "prefixes", prefixes)
def add_prefix_actions(self, prefix_actions_proxy):
"""
By using a gui designer (glade-2, gazpacho, etc)
widgets can have a prefix in theirs names
like foo:entry1 or foo:label3
It means entry1 and label3 has a prefix action named foo.
Then, prefix_actions_proxy must have a method named prefix_foo which
is called everytime a widget with prefix foo is found, using the found widget
as argument.
prefix_actions_proxy:
An instance with methods as prefix actions.
It means it has methods like prefix_foo, prefix_bar, etc.
"""
prefix_s = "prefix_"
prefix_pos = len(prefix_s)
is_method = lambda t : callable( t[1] )
is_prefix_action = lambda t : t[0].startswith(prefix_s)
drop_prefix = lambda (k,w): (k[prefix_pos:],w)
members_t = inspect.getmembers(prefix_actions_proxy)
methods_t = filter(is_method, members_t)
prefix_actions_t = filter(is_prefix_action, methods_t)
prefix_actions_d = dict( map(drop_prefix, prefix_actions_t) )
for widget in self.get_widgets():
prefixes = gtk.Widget.get_data(widget, "prefixes")
if prefixes:
for prefix in prefixes:
if prefix in prefix_actions_d:
prefix_action = prefix_actions_d[prefix]
prefix_action(widget)
def custom_handler(self,
glade, function_name, widget_name,
str1, str2, int1, int2):
"""
Generic handler for creating custom widgets, internally used to
enable custom widgets (custom widgets of glade).
The custom widgets have a creation function specified in design time.
Those creation functions are always called with str1,str2,int1,int2 as
arguments, that are values specified in design time.
Methods of classes inheriting from SimpleGladeApp are used as
creation functions automatically.
If a custom widget has create_foo as creation function, then the
method named create_foo is called with str1,str2,int1,int2 as arguments.
"""
try:
handler = getattr(self, function_name)
return handler(str1, str2, int1, int2)
except AttributeError:
return None
def gtk_widget_show(self, widget, *args):
"""
Predefined callback.
The widget is showed.
Equivalent to widget.show()
"""
widget.show()
def gtk_widget_hide(self, widget, *args):
"""
Predefined callback.
The widget is hidden.
Equivalent to widget.hide()
"""
widget.hide()
def gtk_widget_grab_focus(self, widget, *args):
"""
Predefined callback.
The widget grabs the focus.
Equivalent to widget.grab_focus()
"""
widget.grab_focus()
def gtk_widget_destroy(self, widget, *args):
"""
Predefined callback.
The widget is destroyed.
Equivalent to widget.destroy()
"""
widget.destroy()
def gtk_window_activate_default(self, window, *args):
"""
Predefined callback.
The default widget of the window is activated.
Equivalent to window.activate_default()
"""
widget.activate_default()
def gtk_true(self, *args):
"""
Predefined callback.
Equivalent to return True in a callback.
Useful for stopping propagation of signals.
"""
return True
def gtk_false(self, *args):
"""
Predefined callback.
Equivalent to return False in a callback.
"""
return False
def gtk_main_quit(self, *args):
"""
Predefined callback.
Equivalent to self.quit()
"""
self.quit()
def main(self):
"""
Starts the main loop of processing events.
The default implementation calls gtk.main()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with gst.main()
Do not directly call this method in your programs.
Use the method run() instead.
"""
gtk.main()
def quit(self):
"""
Quit processing events.
The default implementation calls gtk.main_quit()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with gst.main_quit()
"""
gtk.main_quit()
def run(self):
"""
Starts the main loop of processing events checking for Control-C.
The default implementation checks wheter a Control-C is pressed,
then calls on_keyboard_interrupt().
Use this method for starting programs.
"""
try:
self.main()
except KeyboardInterrupt:
self.on_keyboard_interrupt()
def on_keyboard_interrupt(self):
"""
This method is called by the default implementation of run()
after a program is finished by pressing Control-C.
"""
pass
def install_custom_handler(self, custom_handler):
gtk.glade.set_custom_handler(custom_handler)
def create_glade(self, glade_path, root, domain):
return gtk.glade.XML(self.glade_path, root, domain)
def get_widget(self, widget_name):
return self.glade.get_widget(widget_name)
def get_widgets(self):
return self.glade.get_widget_prefix("")
| lgpl-2.1 |
edx/ansible | v2/ansible/utils/path.py | 14 | 1306 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
__all__ = ['is_executable', 'unfrackpath']
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
| gpl-3.0 |
RiccardoPecora/MP | Lib/distutils/bcppcompiler.py | 59 | 15335 | """distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
__revision__ = "$Id$"
import os
from distutils.errors import (DistutilsExecError, CompileError, LibError,
LinkError, UnknownFileError)
from distutils.ccompiler import CCompiler, gen_preprocess_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()
| gpl-3.0 |
hs634/algorithms | python/test.py | 1 | 1435 | #
# inputs outputs
# single sin
# simple sim
# solution so
# a a
#
class Node:
def __init__(self, val):
self.val = val
self.children = [0] * 26
self.is_end = False
self.word_count = 1
def get_unique_prefixes(words):
root = Node(0)
root.word_count += 1
cur = root
for word in words:
cur = root
for ch in word:
index = ord(ch) - 97
if cur.children[index] == 0:
n = Node(ch)
cur.children[index] = n
cur = n
else:
cur.word_count += 1
cur = cur.children[index]
cur.is_end = True
# print root.children[ord('s')-97].word_count
output = []
for word in words:
prefix = ''
cur = root
for ch in word:
prefix += ch
if cur.word_count <= 1:
break
cur = cur.children[ord(ch) - 97]
output.append(prefix)
return output
words = ['single', 'simple', 'solution', 'a']
print get_unique_prefixes(words)
words = ['single', 'simple']
print get_unique_prefixes(words)
words = ['abcd', 'geft', 'aaaa']
print get_unique_prefixes(words)
words = ['abcd', 'abcx']
print get_unique_prefixes(words)
# /usr/bin/python /Users/harsh/giths634/algorithms/python/test.py
# ['si', 'si', 'so', 'a']
# ['si', 'si']
# ['a', 'g', 'a']
# ['abc', 'abc']
| mit |
credativ/pulp | server/test/unit/plugins/file/test_distributor.py | 4 | 13411 | from os import readlink
import copy
import csv
import errno
import os
import shutil
import tempfile
import unittest
from mock import Mock, MagicMock, patch
from pulp.common.plugins.distributor_constants import MANIFEST_FILENAME
from pulp.devel.mock_distributor import get_publish_conduit
from pulp.plugins.file.distributor import FileDistributor, FilePublishProgressReport, BUILD_DIRNAME
from pulp.plugins.model import Repository, Unit
DATA_DIR = os.path.realpath("../../../data/")
SAMPLE_RPM = 'pulp-test-package-0.3.1-1.fc11.x86_64.rpm'
SAMPLE_FILE = 'test-override-pulp.conf'
class FileDistributorTest(unittest.TestCase):
"""
Tests the file distributor base class
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.target_dir = os.path.join(self.temp_dir, "target")
self.repo = MagicMock(spec=Repository)
self.repo.id = "foo"
self.repo.working_dir = self.temp_dir
self.unit = Unit('RPM', {'name': SAMPLE_RPM, 'size': 1, 'checksum': 'sum1'}, {},
os.path.join(DATA_DIR, SAMPLE_RPM))
self.publish_conduit = get_publish_conduit(existing_units=[self.unit, ])
def tearDown(self):
shutil.rmtree(self.temp_dir)
def create_distributor_with_mocked_api_calls(self):
distributor = FileDistributor()
distributor.get_hosting_locations = Mock()
distributor.get_hosting_locations.return_value = [self.target_dir, ]
distributor.post_repo_publish = Mock()
return distributor
def test_metadata_not_implemented(self):
self.assertRaises(NotImplementedError, FileDistributor.metadata)
def test_validate_config_not_implemented(self):
distributor = FileDistributor()
self.assertRaises(NotImplementedError, distributor.validate_config, None, None, None)
def test_get_hosting_locations_not_implemented(self):
distributor = FileDistributor()
host_locations = distributor.get_hosting_locations(None, None)
self.assertEquals(0, len(host_locations))
def test_post_repo_publish_not_implemented(self):
distributor = FileDistributor()
# ensure that this doesn't raise an error
distributor.post_repo_publish(None, None)
def test_repo_publish_api_calls(self):
distributor = self.create_distributor_with_mocked_api_calls()
result = distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(result.success_flag)
self.assertTrue(distributor.get_hosting_locations.called)
self.assertTrue(distributor.post_repo_publish.called)
# The publish_conduit should have had two set_progress calls. One to start the IN_PROGRESS
# state, and the second to mark it as complete
self.assertEqual(self.publish_conduit.set_progress.call_count, 2)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[0][1][0]['state'],
FilePublishProgressReport.STATE_IN_PROGRESS)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[1][1][0]['state'],
FilePublishProgressReport.STATE_COMPLETE)
def test_repo_publish_files_placed_properly(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
target_file = os.path.join(self.target_dir, SAMPLE_RPM)
# test if the link was created
self.assertTrue(os.path.islink(target_file))
# test if the link points to the correct place
link_target = os.readlink(target_file)
self.assertEquals(link_target, os.path.join(DATA_DIR, SAMPLE_RPM))
def test_repo_publish_metadata_writing(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
with open(os.path.join(self.target_dir, MANIFEST_FILENAME), 'rb') as f:
reader = csv.reader(f)
row = reader.next()
self.assertEquals(row[0], self.unit.unit_key['name'])
self.assertEquals(row[1], self.unit.unit_key['checksum'])
self.assertEquals(row[2], str(self.unit.unit_key['size']))
def test_repo_publish_handles_errors(self):
"""
Make sure that publish() does the right thing with the report when there is an error.
"""
distributor = self.create_distributor_with_mocked_api_calls()
distributor.post_repo_publish.side_effect = Exception('Rawr!')
report = distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertFalse(report.success_flag)
self.assertEqual(report.summary['state'], FilePublishProgressReport.STATE_FAILED)
self.assertEqual(report.summary['error_message'], 'Rawr!')
self.assertTrue('Rawr!' in report.summary['traceback'])
# The publish_conduit should have had two set_progress calls. One to start the IN_PROGRESS
# state, and the second to mark it as failed
self.assertEqual(self.publish_conduit.set_progress.call_count, 2)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[0][1][0]['state'],
FilePublishProgressReport.STATE_IN_PROGRESS)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[1][1][0]['state'],
FilePublishProgressReport.STATE_FAILED)
def test_republish_after_unit_removal(self):
"""
This test checks for an issue[0] we had where publishing an ISO repository, removing an ISO,
and then republishing would leave that removed ISO's symlink in the repository even though
it had been removed from the manifest. This test asserts that the republished repository no
longer contains the removed ISO.
[0] https://bugzilla.redhat.com/show_bug.cgi?id=970795
:param delete_protected_repo: The mocked version of delete_protected_repo
:type delete_protected_repo: function
"""
# Publish a repository
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
target_file = os.path.join(self.target_dir, SAMPLE_RPM)
# test if the link was created
self.assertTrue(os.path.islink(target_file))
# publish a new repo with a different unit in it
cloned_unit = copy.deepcopy(self.unit)
cloned_unit.unit_key['name'] = 'foo.rpm'
new_conduit = get_publish_conduit(existing_units=[cloned_unit, ])
distributor.publish_repo(self.repo, new_conduit, {})
# Make sure the new rpm is linked
self.assertTrue(os.path.islink(os.path.join(self.target_dir, 'foo.rpm')))
# Ensure the old rpm is no longer included
self.assertFalse(os.path.islink(target_file))
def test_distributor_removed_calls_unpublish(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.unpublish_repo = Mock()
distributor.distributor_removed(self.repo, {})
self.assertTrue(distributor.unpublish_repo.called)
def test_unpublish_repo(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(os.path.exists(self.target_dir))
distributor.unpublish_repo(self.repo, {})
self.assertFalse(os.path.exists(self.target_dir))
def test__rmtree_if_exists(self):
"""
Let's just make sure this simple thing doesn't barf.
"""
a_directory = os.path.join(self.temp_dir, 'a_directory')
test_filename = os.path.join(a_directory, 'test.txt')
os.makedirs(a_directory)
with open(test_filename, 'w') as test:
test.write("Please don't barf.")
# This should not cause any problems, and test.txt should still exist
distributor = self.create_distributor_with_mocked_api_calls()
distributor._rmtree_if_exists(os.path.join(self.temp_dir, 'fake_path'))
self.assertTrue(os.path.exists(test_filename))
# Now let's remove a_directory
distributor._rmtree_if_exists(a_directory)
self.assertFalse(os.path.exists(a_directory))
def test__symlink_units(self):
"""
Make sure that the _symlink_units creates all the correct symlinks.
"""
distributor = self.create_distributor_with_mocked_api_calls()
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
os.symlink('/some/weird/path',
os.path.join(build_dir, self.unit.unit_key['name']))
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name'], ])
expected_symlink_path = os.path.join(build_dir, self.unit.unit_key['name'])
self.assertTrue(os.path.islink(expected_symlink_path))
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
self.assertEqual(os.path.realpath(expected_symlink_path), expected_symlink_destination)
@patch('os.symlink', side_effect=os.symlink)
def test__symlink_units_existing_correct_link(self, symlink):
"""
Make sure that the _symlink_units handles an existing correct link well.
"""
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
os.symlink(expected_symlink_destination,
os.path.join(build_dir, self.unit.unit_key['name']))
# Now let's reset the Mock so that we can make sure it doesn't get called during _symlink
symlink.reset_mock()
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
# The call count for symlink should be 0, because the _symlink_units call should have
# noticed that the symlink was already correct and thus should have skipped it
self.assertEqual(symlink.call_count, 0)
expected_symlink_path = os.path.join(build_dir, self.unit.unit_key['name'])
self.assertTrue(os.path.islink(expected_symlink_path))
self.assertEqual(os.path.realpath(expected_symlink_path),
os.path.realpath(expected_symlink_destination))
@patch('os.readlink')
def test__symlink_units_os_error(self, readlink):
"""
Make sure that the _symlink_units handles an OSError correctly, for the case where it
doesn't raise EINVAL. We already have a test that raises EINVAL (test__symlink_units places
an ordinary file there.)
"""
os_error = OSError()
# This would be an unexpected error for reading a symlink!
os_error.errno = errno.ENOSPC
readlink.side_effect = os_error
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
os.symlink(expected_symlink_destination,
os.path.join(build_dir, self.unit.unit_key['name']))
try:
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
self.fail('An OSError should have been raised, but was not!')
except OSError, e:
self.assertEqual(e.errno, errno.ENOSPC)
@patch('os.readlink')
def test__symlink_units_EINVAL_os_error(self, mock_readlink):
"""
Make sure that the _symlink_units handles an OSError correctly, for the case where it
raises EINVAL. We already have a test that raises EINVAL (test__symlink_units places
an ordinary file there.)
"""
os_error = OSError()
# This would be an unexpected error for reading a symlink!
os_error.errno = errno.EINVAL
mock_readlink.side_effect = os_error
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
original_link = os.path.join(build_dir, self.unit.unit_key['name'])
old_target = os.path.join(DATA_DIR, SAMPLE_FILE)
os.symlink(old_target, original_link)
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
# make sure the symlink was deleted
self.assertTrue(os.path.islink(original_link))
created_link = readlink(original_link)
self.assertNotEqual(old_target, created_link)
| gpl-2.0 |
imcsk8/origin | vendor/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
pacoqueen/ginn | extra/patches/upgrade_db_etiquetas_norma13.py | 1 | 8959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Crea los campos y actualiza los productos para las nuevas etiquetas de la
norma del 1 de julio de 2013.
"""
import os, sys
sys.path.insert(0, (os.path.join(os.path.abspath(os.path.dirname(__file__)),
"..", "..", "ginn")))
os.chdir(os.path.join(os.path.abspath(os.path.dirname(__file__)), "..", "..",
"ginn"))
from framework import pclases
def alter_tables():
cmd = """echo "ALTER TABLE producto_venta ADD COLUMN anno_certificacion INT DEFAULT NULL; ALTER TABLE producto_venta ADD COLUMN dni TEXT DEFAULT ''; ALTER TABLE producto_venta ADD COLUMN uso TEXT DEFAULT ''; UPDATE producto_venta SET anno_certificacion = NULL; UPDATE producto_venta SET dni = ''; UPDATE producto_venta SET uso = '';" | psql dev_ginn """
os.system(cmd)
cmd = """echo "ALTER TABLE producto_venta ADD COLUMN anno_certificacion INT DEFAULT NULL; ALTER TABLE producto_venta ADD COLUMN dni TEXT DEFAULT ''; ALTER TABLE producto_venta ADD COLUMN uso TEXT DEFAULT ''; UPDATE producto_venta SET anno_certificacion = NULL; UPDATE producto_venta SET dni = ''; UPDATE producto_venta SET uso = '';" | psql ginn """
os.system(cmd)
def update_values_producto(p):
modificado = True
if "GEOTESAN" in p.nombre.upper() and " 10 " in p.descripcion:
p.annoCertificacion = 8
p.dni = "0001 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 11 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0002 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 12 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0003 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 13 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0004 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre and " 14 " in p.descripcion:
p.annoCertificacion = 8
p.dni = "0005 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 120 " in p.descripcion:
p.annoCertificacion = 13
p.dni = "0006 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 15 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0007 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 155 " in p.descripcion:
p.annoCertificacion = 11
p.dni = "0008 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 17 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0009 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 175 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0010 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 18 " in p.descripcion:
p.annoCertificacion = 8
p.dni = "0011 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 21 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0012 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 23 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0013 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 235 " in p.descripcion:
p.annoCertificacion = 11
p.dni = "0014 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 25 " in p.descripcion:
p.annoCertificacion = 8
p.dni = "0015 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 30 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0016 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 305 " in p.descripcion:
p.annoCertificacion = 11
p.dni = "0017 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 35 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0018 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 40 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0019 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 46 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0020 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 58 " in p.descripcion:
p.annoCertificacion = 4
p.dni = "0021 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 69 " in p.descripcion:
p.annoCertificacion = 8
p.dni = "0022 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 70 " in p.descripcion:
p.annoCertificacion = 8
p.dni = "0023 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 80 " in p.descripcion:
p.annoCertificacion = 9
p.dni = "0024 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 85 " in p.descripcion:
p.annoCertificacion = 13
p.dni = "0025 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOTESAN" in p.nombre.upper() and " 200 " in p.descripcion:
p.annoCertificacion = 8
p.dni = "0026 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo y separación"
elif "GEOTESAN" in p.nombre.upper() and " 90 " in p.descripcion:
p.dni = "0027 - GEOTEXTIL - 20130701"
p.uso = "Drenaje, filtración, refuerzo, separación y protección"
elif "GEOCEM" in p.nombre.upper():
if p.es_caja():
p.uso = "Fibra de polipropileno virgen embolsada en papel hidrosoluble para su uso como aditivo del hormigón"
if p.camposEspecificosBala.dtex == 6.7:
p.nombre = "GEOCEM 31 - %d" % p.camposEspecificosBala.corte
p.annoCertificacion = 9
if p.camposEspecificosBala.corte == 6:
p.dni = "0001 – GEOCEM - 20130701"
elif p.camposEspecificosBala.corte == 12:
p.dni = "0002 – GEOCEM - 20130701"
elif p.camposEspecificosBala.corte == 18:
p.dni = "0003 – GEOCEM - 20130701"
elif p.camposEspecificosBala.corte == 24:
p.dni = "0004 – GEOCEM - 20130701"
else:
modificado = False
elif p.camposEspecificosBala.dtex == 4.4:
p.nombre = "GEOCEM 31 - 12"
p.dni = "0005 – GEOCEM - 20130701"
p.annoCertificacion = 13
else:
modificado = False
else:
modificado = False
print p.dni
return modificado
def set_values():
"""
Establece los valores por defecto de acuerdo a la tabla de Jesús.
"""
no_modificados = []
for p in pclases.ProductoVenta.select():
print "\t", p.descripcion, "...",
sys.stdout.flush()
modificado = update_values_producto(p)
if not modificado:
no_modificados.append(p)
print "-"*80
print
print "Productos no modificados:"
print
for p in no_modificados:
print p.nombre, "-", p.descripcion
def main():
print "Altering tables..."
alter_tables()
print "Setting values..."
set_values()
print "Done!"
if __name__ == "__main__":
main()
| gpl-2.0 |
code-for-india/sahana_shelter_worldbank | tests/unit_tests/modules/s3/s3gis/LayerFailures.py | 43 | 3905 |
import unittest
s3gis = local_import("s3.s3gis")
test_utils = local_import("test_utils")
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
class FailingMethod(object):
def __init__(self, method_spec, method):
self.LayerClass, self.method_name = method_spec
self.method = method
def __enter__(self):
self.method_impl = getattr(self.LayerClass, self.method_name)
setattr(self.LayerClass, self.method_name, self.method)
def __exit__(self, type, value, traceback):
setattr(self.LayerClass, self.method_name, self.method_impl)
ExpectSessionWarning = s3gis_tests.ExpectSessionWarning
def check_map_accepts_layer_failure(warning):
# mock logging
with ExpectSessionWarning(session, warning):
test_gis = s3gis.GIS()
test_gis.show_map(
catalogue_layers = True
)
def thrower(exception_message):
def fail(*a, **kw):
raise Exception(exception_message)
return fail
class LayerFailures(unittest.TestCase):
def setUp(test):
current.session.s3.debug = False
def single_record_layer(test, LayerClass):
layer_type_name = LayerClass.__name__
warning = "%s not shown due to error" % layer_type_name
for method_name in ("__init__", "as_javascript"):
with FailingMethod(
(LayerClass, method_name),
thrower(
"Test %s.SubLayer %s failure exception" % (
layer_type_name,
method_name
)
)
):
check_map_accepts_layer_failure(warning)
def multiple_record_layer(test, LayerClass, table, **data):
layer_type_name = LayerClass.__name__
warning = "%s not shown due to error" % layer_type_name
test.single_record_layer(LayerClass)
with s3gis_tests.InsertedRecord(
db,
table,
dict(
data,
name = "Test "+layer_type_name,
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
)
):
for method_name in ("__init__", "as_dict"):
with FailingMethod(
(LayerClass.SubLayer, method_name),
thrower(
"Test %s.SubLayer %s failure exception" % (
layer_type_name,
method_name
)
)
):
check_map_accepts_layer_failure(warning)
def test_google_layer_failure(test):
test.single_record_layer(s3gis.GoogleLayer)
def test_yahoo_layer_failure(test):
test.single_record_layer(s3gis.YahooLayer)
def test_bing_layer_failure(test):
test.single_record_layer(s3gis.BingLayer)
def test_GPX_layer_failure(test):
test.multiple_record_layer(s3gis.GPXLayer, db.gis_layer_gpx)
def test_WMS_layer_failure(test):
test.multiple_record_layer(s3gis.WMSLayer, db.gis_layer_wms)
def test_geojson_layer_failure(test):
test.multiple_record_layer(s3gis.GeoJSONLayer, db.gis_layer_geojson)
def test_GeoRSS_layer_failure(test):
test.multiple_record_layer(s3gis.GeoRSSLayer, db.gis_layer_georss)
def test_KML_layer_failure(test):
test.multiple_record_layer(s3gis.KMLLayer, db.gis_layer_kml)
def test_TMS_layer_failure(test):
test.multiple_record_layer(s3gis.TMSLayer, db.gis_layer_tms)
def test_WFS_layer_failure(test):
test.multiple_record_layer(s3gis.WFSLayer, db.gis_layer_wfs)
def test_feature_layer_failure(test):
test.multiple_record_layer(s3gis.FeatureLayer, db.gis_layer_feature,
module = "default"
)
| mit |
embeddedarm/android_external_chromium_org | chrome/common/extensions/docs/server2/caching_file_system.py | 25 | 4645 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
import sys
from file_system import FileSystem, StatInfo, FileNotFoundError
from future import Future
class _AsyncUncachedFuture(object):
def __init__(self,
uncached_read_futures,
stats_for_uncached,
current_results,
file_system,
object_store):
self._uncached_read_futures = uncached_read_futures
self._stats_for_uncached = stats_for_uncached
self._current_results = current_results
self._file_system = file_system
self._object_store = object_store
def Get(self):
new_results = self._uncached_read_futures.Get()
# Update the cached data in the object store. This is a path -> (read,
# version) mapping.
self._object_store.SetMulti(dict(
(path, (new_result, self._stats_for_uncached[path].version))
for path, new_result in new_results.iteritems()))
new_results.update(self._current_results)
return new_results
class CachingFileSystem(FileSystem):
'''FileSystem which implements a caching layer on top of |file_system|. It's
smart, using Stat() to decided whether to skip Read()ing from |file_system|,
and only Stat()ing directories never files.
'''
def __init__(self, file_system, object_store_creator):
self._file_system = file_system
def create_object_store(category, **optargs):
return object_store_creator.Create(
CachingFileSystem,
category='%s/%s' % (file_system.GetIdentity(), category),
**optargs)
self._stat_object_store = create_object_store('stat')
# The read caches can start populated (start_empty=False) because file
# updates are picked up by the stat, so it doesn't need the force-refresh
# which starting empty is designed for. Without this optimisation, cron
# runs are extra slow.
self._read_object_store = create_object_store('read', start_empty=False)
def Refresh(self):
return self._file_system.Refresh()
def Stat(self, path):
'''Stats the directory given, or if a file is given, stats the file's parent
directory to get info about the file.
'''
# Always stat the parent directory, since it will have the stat of the child
# anyway, and this gives us an entire directory's stat info at once.
dir_path, file_path = posixpath.split(path)
if dir_path and not dir_path.endswith('/'):
dir_path += '/'
# ... and we only ever need to cache the dir stat, too.
dir_stat = self._stat_object_store.Get(dir_path).Get()
if dir_stat is None:
dir_stat = self._file_system.Stat(dir_path)
assert dir_stat is not None # should raise a FileNotFoundError
self._stat_object_store.Set(dir_path, dir_stat)
if path == dir_path:
stat_info = dir_stat
else:
file_version = dir_stat.child_versions.get(file_path)
if file_version is None:
raise FileNotFoundError('No stat found for %s in %s (found %s)' %
(path, dir_path, dir_stat.child_versions))
stat_info = StatInfo(file_version)
return stat_info
def Read(self, paths):
'''Reads a list of files. If a file is in memcache and it is not out of
date, it is returned. Otherwise, the file is retrieved from the file system.
'''
read_values = self._read_object_store.GetMulti(paths).Get()
stat_values = self._stat_object_store.GetMulti(paths).Get()
results = {} # maps path to read value
uncached = {} # maps path to stat value
for path in paths:
stat_value = stat_values.get(path)
if stat_value is None:
# TODO(cduvall): do a concurrent Stat with the missing stat values.
try:
stat_value = self.Stat(path)
except:
return Future(exc_info=sys.exc_info())
read_value = read_values.get(path)
if read_value is None:
uncached[path] = stat_value
continue
read_data, read_version = read_value
if stat_value.version != read_version:
uncached[path] = stat_value
continue
results[path] = read_data
if not uncached:
return Future(value=results)
return Future(delegate=_AsyncUncachedFuture(
self._file_system.Read(uncached.keys()),
uncached,
results,
self,
self._read_object_store))
def GetIdentity(self):
return self._file_system.GetIdentity()
def __repr__(self):
return '%s of <%s>' % (type(self).__name__, repr(self._file_system))
| bsd-3-clause |
saram-kon/beepmusic | midi_to_beep/midi_to_beep.py | 1 | 8039 | #! python
# COPYRIGHT: BALTHASAR SCHLOTMANN 2017
# LICENSED UNDER THE GNU GENERAL PUBLIC LICENSE VERSION 3
import sys
import notedict as nd
filename = sys.argv[1]
# from here: https://github.com/bspaans/python-mingus/blob/master/mingus/midi/midi_file_in.py
def parse_varbyte_as_int( array, i):
"""Read a variable length byte from the file and return thecorresponding integer."""
result = 0
bytes_read = 0
r = 0x80
while r & 0x80:
r = array[i + bytes_read]
bytes_read += 1
if r & 0x80:
result = (result << 7) + (r & 0x7F)
else:
result = (result << 7) + r
return (result, bytes_read)
def midi_event(HEXCODE, incr, i):
if byteseq[i] >= HEXCODE : i += 1
# i to next delta
return (HEXCODE, i + incr)
f = open(filename,"rb")
byteseq = f.read()
f.close()
HEAD = True
i=0
tempo = list()
note = list()
tracknotes = list()
current = dict()
j = 0
ON = 0
# Check global header
if byteseq[i:i+4] == b'MThd':
if byteseq[i+9] == 3:
print("asynchrnous midi files are not handled by me")
exit()
delta_timer = byteseq[i+12:i+14]
i += 14
print("GLOBAL HEADER")
else:
print("NO GLOBAL HEADER; IS THIS A MIDI FILE??")
exit()
while i<len(byteseq):
# Global header
#print("NEW EVENT: ", hex(i), hex(ON))
if HEAD:
ON = 0
# Begin of track:
if byteseq[i:i+4] == b'MTrk':
# i to next delta
i += 8
HEAD = False
varint, bytesread = parse_varbyte_as_int( byteseq, i )
delta = varint
print("LOCAL HEADER - SET DELTA TO", byteseq[i], hex(i))
tracknotes.append(note)
note = list()
current = dict()
# i to next midi event
i += bytesread
else:
raise ValueError("EXPECTED HEADER: " + str( hex(i) ) )
# If not HEAD = True
else:
if byteseq[i:i+1] == b'\xff':
ON = 0
#print("META-EVENT i:", hex(i))
# i to midi event type byte 2
i += 1
# if I want to detect more I have to use a dictonary
# strange input without length
if byteseq[i] > 240:
print("ERROR UNKNOWN")
exit()
#tempo change
elif byteseq[i:i+1] == b'\x51':
j += 1
#print("TEMPO CHANGE Nr:",j, hex(i))
tempo.append([delta, int.from_bytes(byteseq[i+2:i+5], byteorder='big', signed=False)])
#end of track
elif byteseq[i:i+1] == b'\x2F':
HEAD = True
NOTEON = False
print(current)
print("END OF TRACK")
i += 2
continue
# i to delta
i += byteseq[i+1] +2
#print("DELTA position", hex(i), hex(byteseq[i]))
# delta is of variable length
varint, bytesread = parse_varbyte_as_int( byteseq, i )
#print("READ", bytesread)
#print("VARINT", varint)
delta += varint
# i to new midi event
i += bytesread
#print("NEW I", hex(i))
else:
if byteseq[i] >= 0xF0 or (0xF0 == ON and 0x80 > byteseq[i]):
#if byteseq[i] >= 0xF0 : i += 1 ????
ON = 0xF0
#move to length
i += 1
#move to next delta
i += byteseq[i] + 1
# Pitch wheel change
elif byteseq[i] >= 0xE0 or (0xE0 == ON and 0x80 > byteseq[i]):
if byteseq[i] >= 0xE0 : i += 1
ON = 0xE0
i += 2
# Channel after touch
elif byteseq[i] >= 0xD0 or (0xD0 == ON and 0x80 > byteseq[i]):
if byteseq[i] >= 0xD0 : i += 1
ON = 0xD0
i += 1
# Program change
elif byteseq[i] >= 0xC0 or (0xC0 == ON and 0x80 > byteseq[i]):
if byteseq[i] >= 0xC0 : i += 1
ON = 0xC0
i += 1
# Control change
elif byteseq[i] >= 0xB0 or (0xB0 == ON and 0x80 > byteseq[i]):
if byteseq[i] >= 0xB0 : i += 1
ON = 0xB0
i += 2
# Key after-touch
elif byteseq[i] >= 0xA0 or (0xA0 == ON and 0x80 > byteseq[i]):
if byteseq[i] >= 0xA0 : i += 1
ON = 0xA0
i += 2
# Note ON
elif byteseq[i] >= 0x90 or (0x90 == ON and 0x80 > byteseq[i]):
if byteseq[i] >= 0x90 : i += 1
#print("NOTE ", hex(i), byteseq[i], hex(ON))
ON = 0x90
# i to note
# check for pseudo OFF
if byteseq[i+1] != 0:
# The note is added to the note array
note.append( [ byteseq[i], delta, delta ])
# the note is saved in a dict together with its position
if byteseq[i] in current:
print(" ERROR NOTE ALREADY IN DICT", hex(i), byteseq[i])
#exit()
current[ byteseq[i] ] = len(note)-1
else:
# set the endtime and remove the dict entry
temp = current.pop( byteseq[i], 9999 )
if temp != 9999:
note[temp][2] = delta
else:
print("PSEUDO OFF: WARNING NOTE NOT IN DICT", hex(i), byteseq[i])
# i to next delta
i += 2
# Note OFF
elif byteseq[i] >= 0x80 or ON == 0x80:
if byteseq[i] >= 0x80 : i += 1
ON = 0x80
# i to note
# set the endtime and remove the dict entry
#print(hex(i))
temp = current.pop( byteseq[i], 9999 )
if temp != 9999:
note[temp][2] = delta
else:
raise ValueError("OFF: WARNING NOTE NOT IN DICT " + str(hex(i)) + str( byteseq[i]))
# i to next delta
i += 2
else:
print(hex(ON))
raise ValueError("BAD INPUT: NOT A VALID MIDI EVENT i: " +str( hex(i)) +str( hex(byteseq[i]) ) )
varint, bytesread = parse_varbyte_as_int( byteseq, i )
delta += varint
# i to new midi event
i += bytesread
tracknotes.append(note)
print("SUCCESS")
import numpy as np
newnotes = list()
for i in range(len(tracknotes)):
newnotes += tracknotes[i]
#print(newnotes)
npnot = np.asarray(newnotes)
ind = np.lexsort((npnot[::-1,0],npnot[:,1]))
npnot = npnot[ind]
#print(list(npnot))
#npnot = np.asarray(tracknotes[2])
# quick and dirty may need change
index = np.asarray([True] * npnot.shape[0])
for i in range(npnot.shape[0]-1):
if npnot[i+1,1] >= npnot[i,1] and npnot[i+1,1] <= npnot[i,2]:
npnot[i , 2] = npnot[i+1,1]
#print("HERE", i)
if npnot[i,2] - npnot[i,1] <= 0:
index[i] = False
#print(npnot)
#print(index)
npnot = npnot[index]
#print(list(npnot))
if len(tempo) == 0:
tempo.append([0,1])
tempo.append([9999999999,9999999999])
tempo = np.asarray(tempo)
tempo[:,1] = tempo[:,1] / np.amin(tempo[:,1])
tempo[-1,1] = 0
j = 0
length = np.zeros( npnot.shape[0])
pause = np.zeros( npnot.shape[0])
for i in range(len(npnot[:,0])):
while npnot[i,1] >= tempo[j,0]:
z = 1
if npnot[i,1] < tempo[j+z,0]:
if npnot[i,2] < tempo[j+z,0]:
length[i] = tempo[j,1] * (npnot[i,2] - npnot[i,1])
#print(tempo[j,1])
#exit while loop
break
else:
length[i] = tempo[j,1] * (tempo[j+z,0] - npnot[i,1])
while npnot[i,2] >= tempo[j+z,0]:
# Maybe use temp variables?
length[i] += tempo[j+z,1] * (min(npnot[i,2],tempo[j+z+1,0]) - max(npnot[i,1],tempo[j+z,0]))
z += 1
#exit while loop
break
else:
j += 1
j = 0
for i in range(len(npnot[:,0])-1):
while npnot[i,2] >= tempo[j,0]:
z = 1
if npnot[i,2] < tempo[j+z,0]:
if npnot[i+1,1] < tempo[j+z,0]:
pause[i] = tempo[j,1] * (npnot[i+1,1] - npnot[i,2])
#print(tempo[j,1])
#exit while loop
break
else:
pause[i] = tempo[j,1] * (tempo[j+z,0] - npnot[i,2])
while npnot[i+1,1] >= tempo[j+z,0]:
# Maybe use temp variables?
pause[i] += tempo[j+z,1] * (min(npnot[i+1,1],tempo[j+z+1,0]) - tempo[j+z,0])
z += 1
#exit while loop
break
else:
j += 1
index = length > 0
npnot = npnot[index]
length = length[index]
pause = pause[index]
pause = pause.clip(min=0)
final = "#! /bin/bash\nbeep"
tfactor = 2
for i in range(npnot.shape[0]-1):
final += " -f " + str(nd.notedict[str(npnot[i][0])]) + " -l " +str(abs(length[i])*tfactor) + " -D " + str( pause[i] * tfactor ) + " -n"
i += 1
final += " -f " + str(nd.notedict[str(npnot[i][0])]) + " -l " +str(abs(length[i])*tfactor) + " -D " + str( pause[i] * tfactor )
#print(filename.replace(".mid",".sh"))
f = open(filename.replace(".mid",".sh"),"w")
f.write(final)
f.close()
print("Number of Tracks : ", len(tracknotes))
print("Number of Notes : ", len(npnot))
print("Characters written: ", len(final))
print("Output file : ", filename.replace(".mid",".sh"))
| gpl-3.0 |
abetkin/django-qfilter | qfilters/exotic_types.py | 1 | 5664 | # -*- coding: utf-8 -*-
from itertools import groupby
from functools import reduce
from . import QuerySetFilter, ValuesDictFilter
from .utils import CallablesList
class QuerysetIterationHook(QuerySetFilter):
def __init__(self, hook_function):
self.hook_function = hook_function
def __and__(self, other):
if isinstance(other, QuerysetIterationHook):
# now __and__ and __or__ are identical
# TODO: add support for lazy operations
return self.__class__(hook_function=CallablesList.from_callables(
[self.hook_function, other.hook_function], None))
return super(QuerysetIterationHook, self).__and__(other)
def __or__(self, other):
if isinstance(other, QuerysetIterationHook):
# now __and__ and __or__ are identical
# TODO: add support for lazy operations
return self.__class__(hook_function=CallablesList.from_callables(
[self.hook_function, other.hook_function], None))
return super(QuerysetIterationHook, self).__or__(other)
def __call__(self, queryset):
class QuerysetWrapper(type(queryset)):
def iterator(this):
for obj in super(QuerysetWrapper, this).iterator():
self.hook_function(obj) #TODO: maybe let it throw exception
yield obj
queryset.__class__ = QuerysetWrapper
return queryset
class _Attribute(object):
_empty = True # does not contain other attributes
def __init__(self, name=None, values_dict=None):
self.name = name
self._dict = values_dict
@classmethod
def make_class_from_fields_list(cls, fields_list, parent_field=None):
fields_list = list(filter(None, fields_list))
if not fields_list:
return cls
if parent_field:
result = type('%s_%s' % (parent_field, cls.__name__),
(cls,), {})
else:
transform_dict = {field: field.transform
for field in fields_list if hasattr(field, 'transform')}
class Object(cls):
def __getitem__(self, item):
return self._dict[item]
def __init__(self, name=None, values_dict=None):
class look_for_transforms(dict):
def __getitem__(this, item):
rv = super(look_for_transforms, this).__getitem__(item)
if item not in transform_dict:
return rv
transform = transform_dict[item]
return transform(self, rv)
values_dict = values_dict and look_for_transforms(values_dict)
return super(Object, self).__init__(name, values_dict)
result = Object
head__tail = [field.partition('__') for field in fields_list]
for head, head__tail in groupby(head__tail, key=lambda t: t[0]):
if not parent_field:
parent = head
else:
parent = '__'.join([parent_field, head])
attr_class = cls.make_class_from_fields_list(
(parts[-1] for parts in head__tail),
parent_field=parent)
setattr(result, head, attr_class(parent))
result._empty = False
return result
def get_value(self):
assert self._dict and self.name in self._dict, str(self._dict.items()) + str(self.name)
return self._dict[self.name]
def __get__(self, instance, owner):
if not instance:
return self
self._dict = instance._dict
return self if not self._empty else self.get_value()
class PropertyBasedFilter(ValuesDictFilter):
def __init__(self, filter_func, fields_list=None, properties=None):
super(PropertyBasedFilter, self).__init__(filter_func, fields_list)
if properties:
self.properties = properties
def __mod__(self, other):
if not isinstance(other, ValuesDictFilter):
return NotImplemented
fields_list=self.fields_list + other.fields_list
properties = set(self.properties)
if isinstance(other, PropertyBasedFilter):
properties |= set(other.properties)
return self.__class__(None, fields_list, properties)
__rmod__ = __mod__
def _fetch_objects(self, queryset):
fields_list = ['pk'] + self.fields_list
Object = _Attribute.make_class_from_fields_list(fields_list)
def get_related_model(model, field_name):
return getattr(model, field_name).field.related.parent_model
for property_name in self.properties:
split_name = property_name.split('.')
model_class = reduce(lambda model, field: get_related_model(model, field),
split_name[:-1],
queryset.model)
if not split_name[:-1]:
attribute_class = Object
else:
get_attr = lambda cls, name: getattr(cls, name).__class__
attribute_class = reduce(get_attr, split_name[:-1], Object)
prop = getattr(model_class, split_name[-1])
setattr(attribute_class, split_name[-1], property(prop.fget))
objects = queryset.values(*fields_list)
return [Object(values_dict=dic) for dic in objects]
| mit |
eloquence/unisubs | apps/videos/migrations/0148_set_video_title.py | 5 | 31537 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
class Migration(SchemaMigration):
def forwards(self, orm):
if not db.dry_run:
for video in orm.Video.objects.all():
new_title = self.calc_title_for_video(video).strip() or video.title.strip()
video.title = new_title
video.save()
def calc_title_for_video(self, video):
qs = video.subtitlelanguage_set.filter(is_original=True)[:1]
try:
lang = qs.get()
except ObjectDoesNotExist:
return ''
public_status_values = [
"not__under_moderation",
"approved",
]
try:
latest_version = lang.subtitleversion_set.filter(
moderation_status__in=public_status_values).order_by(
'-version_no')[0]
except (ObjectDoesNotExist, IndexError):
return ''
return latest_version.title.strip()
def backwards(self, orm):
# title was not really used or defined before this migration, so going
# backwards can be a no-op
pass
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user', 'status'),)", 'object_name': 'Application'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.partner': {
'Meta': {'object_name': 'Partner'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'managed_partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.CustomUser']"}),
'can_request_paid_captions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['teams.Partner']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'videos.action': {
'Meta': {'object_name': 'Action'},
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamMember']", 'null': 'True', 'blank': 'True'}),
'new_video_title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']", 'null': 'True', 'blank': 'True'})
},
'videos.subtitle': {
'Meta': {'unique_together': "(('version', 'subtitle_id'),)", 'object_name': 'Subtitle'},
'end_time': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'db_column': "'end_time_ms'"}),
'end_time_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'end_time'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_of_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'start_time': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'db_column': "'start_time_ms'"}),
'start_time_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'start_time'"}),
'subtitle_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subtitle_order': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitlemetadata': {
'Meta': {'object_name': 'SubtitleMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Subtitle']"})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'forked_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'moderation_status': ('django.db.models.fields.CharField', [], {'default': "'not__under_moderation'", 'max_length': '32', 'db_index': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'result_of_rollback': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.subtitleversionmetadata': {
'Meta': {'unique_together': "(('key', 'subtitle_version'),)", 'object_name': 'SubtitleVersionMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['videos.SubtitleVersion']"})
},
'videos.usertestresult': {
'Meta': {'object_name': 'UserTestResult'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task1': ('django.db.models.fields.TextField', [], {}),
'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.videofeed': {
'Meta': {'object_name': 'VideoFeed'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'videos.videometadata': {
'Meta': {'object_name': 'VideoMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.videourl': {
'Meta': {'object_name': 'VideoUrl'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'owner_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'videoid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['videos']
| agpl-3.0 |
jnewland/home-assistant | homeassistant/components/bom/weather.py | 8 | 3320 | """Support for Australian BOM (Bureau of Meteorology) weather service."""
import logging
import voluptuous as vol
from homeassistant.components.weather import PLATFORM_SCHEMA, WeatherEntity
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS)
from homeassistant.helpers import config_validation as cv
# Reuse data and API logic from the sensor implementation
from .sensor import (
CONF_STATION, BOMCurrentData, closest_station, validate_station)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BOM weather platform."""
station = config.get(CONF_STATION) or closest_station(
config.get(CONF_LATITUDE),
config.get(CONF_LONGITUDE),
hass.config.config_dir)
if station is None:
_LOGGER.error("Could not get BOM weather station from lat/lon")
return False
bom_data = BOMCurrentData(station)
try:
bom_data.update()
except ValueError as err:
_LOGGER.error("Received error from BOM_Current: %s", err)
return False
add_entities([BOMWeather(bom_data, config.get(CONF_NAME))], True)
class BOMWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, bom_data, stationname=None):
"""Initialise the platform with a data instance and station name."""
self.bom_data = bom_data
self.stationname = stationname or self.bom_data.latest_data.get('name')
def update(self):
"""Update current conditions."""
self.bom_data.update()
@property
def name(self):
"""Return the name of the sensor."""
return 'BOM {}'.format(self.stationname or '(unknown station)')
@property
def condition(self):
"""Return the current condition."""
return self.bom_data.get_reading('weather')
# Now implement the WeatherEntity interface
@property
def temperature(self):
"""Return the platform temperature."""
return self.bom_data.get_reading('air_temp')
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the mean sea-level pressure."""
return self.bom_data.get_reading('press_msl')
@property
def humidity(self):
"""Return the relative humidity."""
return self.bom_data.get_reading('rel_hum')
@property
def wind_speed(self):
"""Return the wind speed."""
return self.bom_data.get_reading('wind_spd_kmh')
@property
def wind_bearing(self):
"""Return the wind bearing."""
directions = ['N', 'NNE', 'NE', 'ENE',
'E', 'ESE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW',
'W', 'WNW', 'NW', 'NNW']
wind = {name: idx * 360 / 16 for idx, name in enumerate(directions)}
return wind.get(self.bom_data.get_reading('wind_dir'))
@property
def attribution(self):
"""Return the attribution."""
return "Data provided by the Australian Bureau of Meteorology"
| apache-2.0 |
theblacklion/diamond-framework | tools/hotshot2calltree.py | 3 | 14127 | #!/usr/bin/env python
# _*_ coding: latin1 _*_
#
# Copyright (c) 2003 by WEB.DE, Karlsruhe
# Autor: Jörg Beyer <[email protected]>
#
# hotshot2cachegrind is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
#
# This script transforms the pstat output of the hotshot
# python profiler into the input of kcachegrind.
#
# example usage:
# modify you python script to run this code:
#
# import hotshot
# filename = "pythongrind.prof"
# prof = hotshot.Profile(filename, lineevents=1)
# prof.runcall(run) # assuming that "run" should be called.
# prof.close()
#
# it will run the "run"-method under profiling and write
# the results in a file, called "pythongrind.prof".
#
# then call this script:
# hotshot2cachegrind -o <output> <input>
# or here:
# hotshot2cachegrind cachegrind.out.0 pythongrind.prof
#
# then call kcachegrind:
# kcachegrind cachegrind.out.0
#
# TODO:
# * es gibt Probleme mit rekursiven (direkt und indirekt) Aufrufen - dann
# stimmen die Kosten nicht.
#
# * einige Funktionen werden mit "?" als Name angezeigt. Evtl sind
# das nur die C/C++ extensions.
#
# * es fehlt noch ein Funktionsnamen Mangling, dass die Filenamen berücksichtigt,
# zZ sind alle __init__'s und alle run's schwer unterscheidbar :-(
#
version = "$Revision: 910524 $"
progname = "hotshot2cachegrind"
import os, sys
from hotshot import stats,log
import os.path
file_limit=0
what2text = {
log.WHAT_ADD_INFO : "ADD_INFO",
log.WHAT_DEFINE_FUNC : "DEFINE_FUNC",
log.WHAT_DEFINE_FILE : "DEFINE_FILE",
log.WHAT_LINENO : "LINENO",
log.WHAT_EXIT : "EXIT",
log.WHAT_ENTER : "ENTER"}
# a pseudo caller on the caller stack. This represents
# the Python interpreter that executes the given python
# code.
root_caller = ("PythonInterpreter",0,"execute")
class CallStack:
"""A tiny Stack implementation, based on python lists"""
def __init__(self):
self.stack = []
self.recursion_counter = {}
def push(self, elem):
"""put something on the stack"""
self.stack.append(elem)
rc = self.recursion_counter.get(elem, 0)
self.recursion_counter[elem] = rc + 1
def pop(self):
"""get the head element of the stack and remove it from the stack"""
elem = self.stack[-1:][0]
rc = self.recursion_counter.get(elem) - 1
if rc>0:
self.recursion_counter[elem] = rc
else:
del self.recursion_counter[elem]
return self.stack.pop()
def top(self):
"""get the head element of the stack, stack is unchanged."""
return self.stack[-1:][0]
def handleLineCost(self, tdelta):
p, c = self.stack.pop()
self.stack.append( (p,c + tdelta) )
def size(self):
""" return how many elements the stack has"""
return len(self.stack)
def __str__(self):
return "[stack: %s]" % self.stack
def recursion(self, pos):
return self.recursion_counter.get(pos, 0)
#return self.recursion_dict.has_key((entry[0][0], entry[0][2]))
def return_from_call(caller_stack, call_dict, cost_now):
"""return from a function call
remove the function from the caller stack,
add the costs to the calling function.
"""
called, cost_at_enter = caller_stack.pop()
caller, caller_cost = caller_stack.top()
#print "return_from_call: %s ruft %s" % (caller, called,)
per_file_dict = call_dict.get(called[0], {})
per_caller_dict = per_file_dict.get(called[2], {})
cost_so_far, call_counter = per_caller_dict.get(caller, (0, 0))
if caller_stack.recursion(called):
per_caller_dict[caller] = (cost_so_far, call_counter + 1)
else:
per_caller_dict[caller] = (cost_so_far + cost_now - cost_at_enter, call_counter + 1)
per_file_dict[called[2]] = per_caller_dict
call_dict[called[0]] = per_file_dict
def updateStatus(filecount):
sys.stdout.write("reading File #%d \r" % filecount)
sys.stdout.flush()
def convertProfFiles(output, inputfilenames):
"""convert all the given input files into one kcachegrind
input file.
"""
call_dict = {}
cost_per_pos = {}
cost_per_function = {}
caller_stack = CallStack()
caller_stack.push((root_caller, 0))
total_cost = 0
filecount = 1
number_of_files = len(inputfilenames)
for inputfilename in inputfilenames:
updateStatus(filecount)
cost, filecount = convertHandleFilename(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount)
total_cost += cost
if (file_limit > 0) and (filecount > file_limit):
break
print
print "total_cost: % d Ticks",total_cost
dumpResults(output, call_dict, total_cost, cost_per_pos, cost_per_function)
def convertHandleFilename(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount):
updateStatus(filecount)
if not ((file_limit > 0) and (filecount > file_limit)):
if os.path.isdir(inputfilename):
cost, filecount = convertProfDir(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount)
elif os.path.isfile(inputfilename):
cost = convertProfFile(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function)
filecount += 1
else:
sys.stderr.write("warn: ignoring '%s', is no file and no directory\n" % inputfilename)
cost = 0
return (cost, filecount)
def convertProfDir(start, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount):
cost = 0
filenames = os.listdir(start)
for f in filenames:
if (file_limit > 0) and (filecount > file_limit):
break
full = os.path.join(start, f)
c, filecount = convertHandleFilename(full, caller_stack, call_dict, cost_per_pos, cost_per_function, filecount)
cost += c;
return (cost, filecount)
def handleCostPerPos(cost_per_pos, pos, current_cost):
"""
the cost per source position are managed in a dict in a dict.
the cost are handled per file and there per function.
so, the per-file-dict contains some per-function-dicts
which sum up the cost per line (in this function and in
this file).
"""
filename = pos[0]
lineno = pos[1]
funcname = pos[2]
file_dict = cost_per_pos.get(filename, {})
func_dict = file_dict.get(funcname, {})
func_dict.setdefault(lineno, 0)
func_dict[lineno] += current_cost
file_dict[funcname] = func_dict
cost_per_pos[filename] = file_dict
def convertProfFile(inputfilename, caller_stack, call_dict, cost_per_pos, cost_per_function):
"""convert a single input file into one kcachegrind
data.
this is the most expensive function in this python source :-)
"""
total_cost = 0
try:
logreader = log.LogReader(inputfilename)
current_cost = 0
hc = handleCostPerPos # shortcut
for item in logreader:
what, pos ,tdelta = item
(file, lineno, func) = pos
#line = "%s %s %d %s %d" % (what2text[what], file, lineno, func, tdelta)
#print line
# most common cases first
if what == log.WHAT_LINENO:
# add the current cost to the current function
hc(cost_per_pos, pos, tdelta)
total_cost += tdelta
elif what == log.WHAT_ENTER:
caller_stack.push((pos, total_cost))
hc(cost_per_pos, pos, tdelta)
total_cost += tdelta
elif what == log.WHAT_EXIT:
hc(cost_per_pos, pos, tdelta)
total_cost += tdelta
return_from_call(caller_stack, call_dict, total_cost)
else:
assert 0, "duh: %d" % what
# I have no idea, why sometimes the stack is not empty - we
# have to rewind the stack to get 100% for the root_caller
while caller_stack.size() > 1:
return_from_call(caller_stack, call_dict, total_cost)
except IOError:
print "could not open inputfile '%s', ignore this." % inputfilename
except EOFError, m:
print "EOF: %s" % (m,)
return total_cost
def pretty_name(file, function):
#pfile = os.path.splitext(os.path.basename(file)) [0]
#return "%s_[%s]" % (function, file)
return "%s" % function
#return "%s::%s" % (file, function)
#return "%s_%s" % (pfile, function)
class TagWriter:
def __init__(self, output):
self.output = output
self.last_values = {}
def clearTag(self, tag):
if self.last_values.has_key(tag):
del self.last_values[ tag ]
def clear(self):
self.last_values = {}
def write(self, tag, value):
self.output.write("%s=%s\n" % (tag, value))
#if (not self.last_values.has_key(tag)) or self.last_values[tag] != value:
# self.last_values[ tag ] = value
# self.output.write("%s=%s\n" % (tag, value))
def dumpResults(output, call_dict, total_cost, cost_per_pos, cost_per_function):
"""write the collected results in the format kcachegrind
could read.
"""
# the intro
output.write("events: Tick\n")
output.write("summary: %d\n" % total_cost)
output.write("cmd: your python script\n")
output.write("\n")
tagwriter = TagWriter(output)
# now the costs per line
for file in cost_per_pos.keys():
func_dict = cost_per_pos[file]
for func in func_dict.keys():
line_dict = func_dict[func]
tagwriter.write("ob", file)
tagwriter.write("fn", func)# pretty_name(file, func)) ; output.write("# ^--- 2\n")
tagwriter.write("fl", file)
for line in line_dict:
output.write("%d %d\n" %( line, line_dict[line] ))
output.write("\n\n")
# now the function calls. For each caller all the called
# functions and their costs are written.
for file in call_dict.keys():
per_file_dict = call_dict[file]
#print "file %s -> %s" % (file, per_file_dict)
for called_x in per_file_dict.keys():
#print "called_x:",called_x
per_caller_dict = per_file_dict[called_x]
#print "called_x %s wird gerufen von: %s" % (called_x, per_caller_dict)
for caller_x in per_caller_dict.keys():
tagwriter.write("ob", caller_x[0])
tagwriter.write("fn", caller_x[2])# pretty_name(caller_x[2], caller_x[0])) ; output.write("# ^--- 1\n")
tagwriter.write("fl", caller_x[0])
tagwriter.write("cob", file)
tagwriter.write("cfn", called_x) #pretty_name(file, called_x))
tagwriter.write("cfl", file)
cost, count = per_caller_dict[caller_x]
#print "called_x:",called_x
output.write("calls=%d\n%d %d\n" % (count, caller_x[1], cost))
tagwriter.clear()
#tagwriter.clearTag("cob")
# is it a bug in kcachegrind, that the "cob=xxx" line has
# to be rewritten after a calls entry with costline ?
#assert cost <= total_cost, "caller_x: %s, per_caller_dict: %s " % (caller_x, per_caller_dict, )
#output.write("calls=%d\n%d %d\n" % (count, caller_x[1], cost))
output.write("\n")
def run_without_optparse():
"""parse the options without optparse, use sys.argv"""
if len(sys.argv) < 4 or sys.argv[1] != "-o" :
print "usage: hotshot2cachegrind -o outputfile in1 [in2 [in3 [...]]]"
return
outputfilename = sys.argv[2]
try:
output = file(outputfilename, "w")
args = sys.argv[3:]
convertProfFiles(output, args)
output.close()
except IOError:
print "could not open '%s' for writing." % outputfilename
def run_with_optparse():
"""parse the options with optparse"""
global file_limit
versiontext = "%s version: %s" % ( progname, version.split()[1], )
parser = OptionParser(version=versiontext)
parser.add_option("-o", "--output",
action="store", type="string", dest="outputfilename",
help="write output into FILE")
parser.add_option("--file-limit",
action="store", dest="file_limit", default=0,
help="stop after given number of input files")
output = sys.stdout
close_output = 0
(options, args) = parser.parse_args()
file_limit = int(options.file_limit)
try:
if options.outputfilename and options.outputfilename != "-":
output = file(options.outputfilename, "w")
close_output = 1
except IOError:
print "could not open '%s' for writing." % options.outputfilename
if output:
convertProfFiles(output, args)
if close_output:
output.close()
def profile_myself():
import hotshot
filename = "self.prof"
if not os.path.exists(filename):
prof = hotshot.Profile(filename, lineevents=1)
prof.runcall(run)
prof.close()
else:
print "not profiling myself, since '%s' exists, running normal" % filename
run()
# check if optparse is available.
try:
from optparse import OptionParser
run = run_with_optparse
except ImportError:
run = run_without_optparse
if __name__ == "__main__":
try:
run()
#profile_myself()
except KeyboardInterrupt:
sys.exit(1)
| mit |
Andrey-Pavlov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py | 118 | 37734 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models.test_configuration import *
from webkitpy.layout_tests.models.test_expectations import *
from webkitpy.layout_tests.models.test_configuration import *
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
class Base(unittest.TestCase):
# Note that all of these tests are written assuming the configuration
# being tested is Windows XP, Release build.
def __init__(self, testFunc):
host = MockHost()
self._port = host.port_factory.get('test-win-xp', None)
self._exp = None
unittest.TestCase.__init__(self, testFunc)
def get_test(self, test_name):
# FIXME: Remove this routine and just reference test names directly.
return test_name
def get_basic_tests(self):
return [self.get_test('failures/expected/text.html'),
self.get_test('failures/expected/image_checksum.html'),
self.get_test('failures/expected/crash.html'),
self.get_test('failures/expected/missing_text.html'),
self.get_test('failures/expected/image.html'),
self.get_test('passes/text.html')]
def get_basic_expectations(self):
return """
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/crash.html [ WontFix ]
Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
Bug(test) failures/expected/image_checksum.html [ WontFix ]
Bug(test) failures/expected/image.html [ WontFix Mac ]
"""
def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
self._port.expectations_dict = lambda: expectations_dict
expectations_to_lint = expectations_dict if is_lint_mode else None
self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_to_lint=expectations_to_lint)
def assert_exp(self, test, result):
self.assertEqual(self._exp.get_expectations(self.get_test(test)),
set([result]))
def assert_bad_expectations(self, expectations, overrides=None):
self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/image_checksum.html', PASS)
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
class MiscTests(Base):
def test_multiple_results(self):
self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
self.assertEqual(self._exp.get_expectations(
self.get_test('failures/expected/text.html')),
set([FAIL, CRASH]))
def test_result_was_expected(self):
# test basics
self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), True)
self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
# test handling of SKIPped tests and results
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=True), True)
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=False), False)
# test handling of MISSING results and the REBASELINE modifier
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True, test_is_skipped=False), True)
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
def test_remove_pixel_failures(self):
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
def test_suffixes_for_expectations(self):
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set())
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
# present in the expectations.
exp_str = 'Bug(x) failures/expected [ WontFix ]'
self.parse_exp(exp_str)
test_name = 'failures/expected/unknown-test.html'
unknown_test = self.get_test(test_name)
self.assertRaises(KeyError, self._exp.get_expectations,
unknown_test)
self.assert_exp('failures/expected/crash.html', PASS)
def test_get_modifiers(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_modifiers(
self.get_test('passes/text.html')), [])
def test_get_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_expectations_string(
self.get_test('failures/expected/text.html')),
'FAIL')
def test_expectation_to_string(self):
# Normal cases are handled by other tests.
self.parse_exp(self.get_basic_expectations())
self.assertRaises(ValueError, self._exp.expectation_to_string,
-1)
def test_get_test_set(self):
# Handle some corner cases for this routine not covered by other tests.
self.parse_exp(self.get_basic_expectations())
s = self._exp.get_test_set(WONTFIX)
self.assertEqual(s,
set([self.get_test('failures/expected/crash.html'),
self.get_test('failures/expected/image_checksum.html')]))
def test_parse_warning(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
self.get_test('disabled-test.html-disabled'),
self.parse_exp("[ FOO ] failures/expected/text.html [ Failure ]\n"
"Bug(rniwa) non-existent-test.html [ Failure ]\n"
"Bug(rniwa) disabled-test.html-disabled [ ImageOnlyFailure ]", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = ("expectations:1 Unrecognized modifier 'foo' failures/expected/text.html\n"
"expectations:2 Path does not exist. non-existent-test.html")
self.assertEqual(str(e), warnings)
def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
oc = OutputCapture()
try:
oc.capture_output()
self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
finally:
_, _, logs = oc.restore_output()
self.assertNotEquals(logs, '')
def test_error_on_different_platform(self):
# parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_error_on_different_build_type(self):
# parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_overrides(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
self.assert_exp('failures/expected/text.html', IMAGE)
def test_overrides__directory(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected [ Crash ]")
self.assert_exp('failures/expected/text.html', CRASH)
self.assert_exp('failures/expected/image.html', CRASH)
def test_overrides__duplicate(self):
self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
"Bug(override) failures/expected/text.html [ Crash ]\n")
def test_pixel_tests_flag(self):
def match(test, result, pixel_tests_enabled):
return self._exp.matches_an_expected_result(
self.get_test(test), result, pixel_tests_enabled)
self.parse_exp(self.get_basic_expectations())
self.assertTrue(match('failures/expected/text.html', FAIL, True))
self.assertTrue(match('failures/expected/text.html', FAIL, False))
self.assertFalse(match('failures/expected/text.html', CRASH, True))
self.assertFalse(match('failures/expected/text.html', CRASH, False))
self.assertTrue(match('failures/expected/image_checksum.html', PASS,
True))
self.assertTrue(match('failures/expected/image_checksum.html', PASS,
False))
self.assertTrue(match('failures/expected/crash.html', PASS, False))
self.assertTrue(match('passes/text.html', PASS, False))
def test_more_specific_override_resets_skip(self):
self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
"Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
self.assert_exp('failures/expected/text.html', IMAGE)
self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
'failures/expected/text.html') in
self._exp.get_tests_with_result_type(SKIP))
class SkippedTests(Base):
def check(self, expectations, overrides, skips, lint=False):
port = MockHost().port_factory.get('qt')
port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(skips)
expectations_to_lint = expectations_dict if lint else None
exp = TestExpectations(port, ['failures/expected/text.html'], expectations_to_lint=expectations_to_lint)
# Check that the expectation is for BUG_DUMMY SKIP : ... [ Pass ]
self.assertEqual(exp.get_modifiers('failures/expected/text.html'),
[TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER])
self.assertEqual(exp.get_expectations('failures/expected/text.html'), set([PASS]))
def test_skipped_tests_work(self):
self.check(expectations='', overrides=None, skips=['failures/expected/text.html'])
def test_duplicate_skipped_test_fails_lint(self):
self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html'], lint=True)
def test_skipped_file_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected'])
def test_skipped_file_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected'])
def test_skipped_entry_dont_exist(self):
port = MockHost().port_factory.get('qt')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = ''
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
capture = OutputCapture()
capture.capture_output()
exp = TestExpectations(port)
_, _, logs = capture.restore_output()
self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
class ExpectationSyntaxTests(Base):
def test_unrecognized_expectation(self):
self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
def test_macro(self):
exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
def assert_tokenize_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
bugs = bugs or []
modifiers = modifiers or []
expectations = expectations or []
warnings = warnings or []
filename = 'TestExpectations'
line_number = 1
expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
self.assertEqual(expectation_line.warnings, warnings)
self.assertEqual(expectation_line.name, name)
self.assertEqual(expectation_line.filename, filename)
self.assertEqual(expectation_line.line_number, line_number)
if not warnings:
self.assertEqual(expectation_line.modifiers, modifiers)
self.assertEqual(expectation_line.expectations, expectations)
def test_bare_name(self):
self.assert_tokenize_exp('foo.html', modifiers=['SKIP'], expectations=['PASS'])
def test_bare_name_and_bugs(self):
self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('webkit.org/b/12345 webkit.org/b/34567 foo.html', modifiers=['BUGWK12345', 'BUGWK34567', 'SKIP'], expectations=['PASS'])
def test_comments(self):
self.assert_tokenize_exp("# comment", name=None, comment="# comment")
self.assert_tokenize_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP'])
def test_config_modifiers(self):
self.assert_tokenize_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL'])
def test_unknown_config(self):
self.assert_tokenize_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS'])
def test_unknown_expectation(self):
self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"'])
def test_skip(self):
self.assert_tokenize_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS'])
def test_slow(self):
self.assert_tokenize_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS'])
def test_wontfix(self):
self.assert_tokenize_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', modifiers=['WONTFIX'], expectations=['IMAGE'])
self.assert_tokenize_exp('foo.html [ WontFix Pass Failure ]', modifiers=['WONTFIX'], expectations=['PASS', 'FAIL'])
def test_blank_line(self):
self.assert_tokenize_exp('', name=None)
def test_warnings(self):
self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None)
self.assert_tokenize_exp('[ [', warnings=['unexpected "["'], name=None)
self.assert_tokenize_exp('webkit.org/b/12345 ]', warnings=['unexpected "]"'], name=None)
self.assert_tokenize_exp('foo.html webkit.org/b/12345 ]', warnings=['"webkit.org/b/12345" is not at the start of the line.'])
class SemanticTests(Base):
def test_bug_format(self):
self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
def test_bad_bugid(self):
try:
self.parse_exp('BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
self.fail('should have raised an error about a bad bug identifier')
except ParseError, exp:
self.assertEqual(len(exp.warnings), 1)
def test_missing_bugid(self):
self.parse_exp('failures/expected/text.html [ Failure ]')
self.assertFalse(self._exp.has_warnings())
self._port.warn_if_bug_missing_in_test_expectations = lambda: True
self.parse_exp('failures/expected/text.html [ Failure ]')
line = self._exp._model.get_expectation_line('failures/expected/text.html')
self.assertFalse(line.is_invalid())
self.assertEqual(line.warnings, ['Test lacks BUG modifier.'])
def test_skip_and_wontfix(self):
# Skip is not allowed to have other expectations as well, because those
# expectations won't be exercised and may become stale .
self.parse_exp('failures/expected/text.html [ Failure Skip ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
self.assertFalse(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
self.assertFalse(self._exp.has_warnings())
def test_slow_and_timeout(self):
# A test cannot be SLOW and expected to TIMEOUT.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/timeout.html [ Slow Timeout ]', is_lint_mode=True)
def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
is_lint_mode=True)
def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
Bug(exp) failures/expected/text.html [ Failure ]
Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
self.assertRaises(ParseError, self.parse_exp,
self.get_basic_expectations(), overrides="""
Bug(override) failures/expected/text.html [ Failure ]
Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
def test_missing_file(self):
self.parse_exp('Bug(test) missing_file.html [ Failure ]')
self.assertTrue(self._exp.has_warnings(), 1)
class PrecedenceTests(Base):
def test_file_over_directory(self):
# This tests handling precedence of specific lines over directories
# and tests expectations covering entire directories.
exp_str = """
Bug(x) failures/expected/text.html [ Failure ]
Bug(y) failures/expected [ WontFix ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/crash.html', PASS)
exp_str = """
Bug(x) failures/expected [ WontFix ]
Bug(y) failures/expected/text.html [ Failure ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/crash.html', PASS)
def test_ambiguous(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win ] passes/text.html [ Failure ]\n")
def test_more_modifiers(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win Release ] passes/text.html [ Failure ]\n")
def test_order_in_file(self):
self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n"
"Bug(test) [ Release ] : passes/text.html [ Pass ]\n")
def test_macro_overrides(self):
self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n"
"Bug(test) [ XP ] passes/text.html [ Failure ]\n")
class RemoveConfigurationsTest(Base):
def test_remove(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
self.assertEqual("""Bug(x) [ Linux Vista Win7 Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-vista', None).test_configuration())
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
class RebaseliningTest(Base):
"""Test rebaselining-specific functionality."""
def assertRemove(self, input_expectations, input_overrides, tests, expected_expectations, expected_overrides):
self.parse_exp(input_expectations, is_lint_mode=False, overrides=input_overrides)
actual_expectations = self._exp.remove_rebaselined_tests(tests, 'expectations')
self.assertEqual(expected_expectations, actual_expectations)
actual_overrides = self._exp.remove_rebaselined_tests(tests, 'overrides')
self.assertEqual(expected_overrides, actual_overrides)
def test_remove(self):
self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n'
'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n'
'Bug(z) failures/expected/crash.html [ Crash ]\n',
'Bug(x0) failures/expected/image.html [ Crash ]\n',
['failures/expected/text.html'],
'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n'
'Bug(z) failures/expected/crash.html [ Crash ]\n',
'Bug(x0) failures/expected/image.html [ Crash ]\n')
# Ensure that we don't modify unrelated lines, even if we could rewrite them.
# i.e., the second line doesn't get rewritten to "Bug(y) failures/expected/skip.html"
self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n'
'Bug(Y) failures/expected/image.html [ Skip ]\n'
'Bug(z) failures/expected/crash.html\n',
'',
['failures/expected/text.html'],
'Bug(Y) failures/expected/image.html [ Skip ]\n'
'Bug(z) failures/expected/crash.html\n',
'')
def test_get_rebaselining_failures(self):
# Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
self.parse_exp(self.get_basic_expectations())
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
class TestExpectationSerializationTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
def _tokenize(self, line):
return TestExpectationParser._tokenize_line('path', line, 0)
def assert_round_trip(self, in_string, expected_string=None):
expectation = self._tokenize(in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, expectation.to_string(self._converter))
def assert_list_round_trip(self, in_string, expected_string=None):
host = MockHost()
parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], allow_rebaseline_modifier=False)
expectations = parser.parse('path', in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter))
def test_unparsed_to_string(self):
expectation = TestExpectationLine()
self.assertEqual(expectation.to_string(self._converter), '')
expectation.comment = ' Qux.'
self.assertEqual(expectation.to_string(self._converter), '# Qux.')
expectation.name = 'bar'
self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.')
expectation.modifiers = ['foo']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.')
expectation.expectations = ['bAz']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.')
expectation.expectations = ['bAz1', 'baZ2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.modifiers = ['foo1', 'foO2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.warnings.append('Oh the horror.')
self.assertEqual(expectation.to_string(self._converter), '')
expectation.original_string = 'Yes it is!'
self.assertEqual(expectation.to_string(self._converter), 'Yes it is!')
def test_unparsed_list_to_string(self):
expectation = TestExpectationLine()
expectation.comment = 'Qux.'
expectation.name = 'bar'
expectation.modifiers = ['foo']
expectation.expectations = ['bAz1', 'baZ2']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.')
def test_parsed_to_string(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_bug_modifiers = ['BUGX']
expectation_line.name = 'test/name/for/realz.html'
expectation_line.parsed_expectations = set([IMAGE])
self.assertEqual(expectation_line.to_string(self._converter), None)
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]')
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]')
def test_serialize_parsed_expectations(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_expectations = set([])
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '')
expectation_line.parsed_expectations = set([FAIL])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail')
expectation_line.parsed_expectations = set([PASS, IMAGE])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass image')
expectation_line.parsed_expectations = set([FAIL, PASS])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail')
def test_serialize_parsed_modifier_string(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_bug_modifiers = ['garden-o-matic']
expectation_line.parsed_modifiers = ['for', 'the']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), 'garden-o-matic for the')
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic for the win')
expectation_line.parsed_bug_modifiers = []
expectation_line.parsed_modifiers = []
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), '')
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'win')
expectation_line.parsed_bug_modifiers = ['garden-o-matic', 'total', 'is']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win')
expectation_line.parsed_bug_modifiers = []
expectation_line.parsed_modifiers = ['garden-o-matic', 'total', 'is']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win')
def test_format_line(self):
self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment')
self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]')
def test_string_roundtrip(self):
self.assert_round_trip('')
self.assert_round_trip('FOO')
self.assert_round_trip('[')
self.assert_round_trip('FOO [')
self.assert_round_trip('FOO ] bar')
self.assert_round_trip(' FOO [')
self.assert_round_trip(' [ FOO ] ')
self.assert_round_trip('[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] ] ] bar BAZ')
self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]')
self.assert_round_trip('FOO ] ] bar ==== BAZ')
self.assert_round_trip('=')
self.assert_round_trip('#')
self.assert_round_trip('# ')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo :')
self.assert_round_trip('# Foo : =')
def test_list_roundtrip(self):
self.assert_list_round_trip('')
self.assert_list_round_trip('\n')
self.assert_list_round_trip('\n\n')
self.assert_list_round_trip('bar')
self.assert_list_round_trip('bar\n# Qux.')
self.assert_list_round_trip('bar\n# Qux.\n')
def test_reconstitute_only_these(self):
lines = []
reconstitute_only_these = []
def add_line(matching_configurations, reconstitute):
expectation_line = TestExpectationLine()
expectation_line.original_string = "Nay"
expectation_line.parsed_bug_modifiers = ['BUGX']
expectation_line.name = 'Yay'
expectation_line.parsed_expectations = set([IMAGE])
expectation_line.matching_configurations = matching_configurations
lines.append(expectation_line)
if reconstitute:
reconstitute_only_these.append(expectation_line)
add_line(set([TestConfiguration('xp', 'x86', 'release')]), True)
add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False)
serialized = TestExpectations.list_to_string(lines, self._converter)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]")
serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay")
def disabled_test_string_whitespace_stripping(self):
# FIXME: Re-enable this test once we rework the code to no longer support the old syntax.
self.assert_round_trip('\n', '')
self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
| bsd-3-clause |
schmidtc/pysal | pysal/weights/Wsets.py | 7 | 18482 | """
Set-like manipulation of weights matrices.
"""
__author__ = "Sergio J. Rey <[email protected]>, Charles Schmidt <[email protected]>, David Folch <[email protected]>, Dani Arribas-Bel <[email protected]>"
import pysal
import copy
from scipy.sparse import isspmatrix_csr
from numpy import ones
__all__ = ['w_union', 'w_intersection', 'w_difference',
'w_symmetric_difference', 'w_subset', 'w_clip']
def w_union(w1, w2, silent_island_warning=False):
"""
Returns a binary weights object, w, that includes all neighbor pairs that
exist in either w1 or w2.
Parameters
----------
w1 : W
object
w2 : W
object
silent_island_warning : boolean
Switch to turn off (default on) print statements
for every observation with islands
Returns
-------
w : W
object
Notes
-----
ID comparisons are performed using ==, therefore the integer ID 2 is
equivalent to the float ID 2.0. Returns a matrix with all the unique IDs
from w1 and w2.
Examples
--------
Construct rook weights matrices for two regions, one is 4x4 (16 areas)
and the other is 6x4 (24 areas). A union of these two weights matrices
results in the new weights matrix matching the larger one.
>>> import pysal
>>> w1 = pysal.lat2W(4,4)
>>> w2 = pysal.lat2W(6,4)
>>> w = pysal.weights.w_union(w1, w2)
>>> w1[0] == w[0]
True
>>> w1.neighbors[15]
[11, 14]
>>> w2.neighbors[15]
[11, 14, 19]
>>> w.neighbors[15]
[19, 11, 14]
>>>
"""
neighbors = dict(w1.neighbors.items())
for i in w2.neighbors:
if i in neighbors:
add_neigh = set(neighbors[i]).union(set(w2.neighbors[i]))
neighbors[i] = list(add_neigh)
else:
neighbors[i] = copy.copy(w2.neighbors[i])
return pysal.W(neighbors, silent_island_warning=silent_island_warning)
def w_intersection(w1, w2, w_shape='w1', silent_island_warning=False):
"""
Returns a binary weights object, w, that includes only
those neighbor pairs that exist in both w1 and w2.
Parameters
----------
w1 : W
object
w2 : W
object
w_shape : string
Defines the shape of the returned weights matrix. 'w1' returns a
matrix with the same IDs as w1; 'all' returns a matrix with all
the unique IDs from w1 and w2; and 'min' returns a matrix with
only the IDs occurring in both w1 and w2.
silent_island_warning : boolean
Switch to turn off (default on) print statements
for every observation with islands
Returns
-------
w : W
object
Notes
-----
ID comparisons are performed using ==, therefore the integer ID 2 is
equivalent to the float ID 2.0.
Examples
--------
Construct rook weights matrices for two regions, one is 4x4 (16 areas)
and the other is 6x4 (24 areas). An intersection of these two weights
matrices results in the new weights matrix matching the smaller one.
>>> import pysal
>>> w1 = pysal.lat2W(4,4)
>>> w2 = pysal.lat2W(6,4)
>>> w = pysal.weights.w_intersection(w1, w2)
>>> w1[0] == w[0]
True
>>> w1.neighbors[15]
[11, 14]
>>> w2.neighbors[15]
[11, 14, 19]
>>> w.neighbors[15]
[11, 14]
>>>
"""
if w_shape == 'w1':
neigh_keys = w1.neighbors.keys()
elif w_shape == 'all':
neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys()))
elif w_shape == 'min':
neigh_keys = set(w1.neighbors.keys(
)).intersection(set(w2.neighbors.keys()))
else:
raise Exception("invalid string passed to w_shape")
neighbors = {}
for i in neigh_keys:
if i in w1.neighbors and i in w2.neighbors:
add_neigh = set(w1.neighbors[i]).intersection(set(w2.neighbors[i]))
neighbors[i] = list(add_neigh)
else:
neighbors[i] = []
return pysal.W(neighbors, silent_island_warning=silent_island_warning)
def w_difference(w1, w2, w_shape='w1', constrained=True, silent_island_warning=False):
"""
Returns a binary weights object, w, that includes only neighbor pairs
in w1 that are not in w2. The w_shape and constrained parameters
determine which pairs in w1 that are not in w2 are returned.
Parameters
----------
w1 : W
object
w2 : W
object
w_shape : string
Defines the shape of the returned weights matrix. 'w1' returns a
matrix with the same IDs as w1; 'all' returns a matrix with all
the unique IDs from w1 and w2; and 'min' returns a matrix with
the IDs occurring in w1 and not in w2.
constrained : boolean
If False then the full set of neighbor pairs in w1 that are
not in w2 are returned. If True then those pairs that would
not be possible if w_shape='min' are dropped. Ignored if
w_shape is set to 'min'.
silent_island_warning : boolean
Switch to turn off (default on) print statements
for every observation with islands
Returns
-------
w : W
object
Notes
-----
ID comparisons are performed using ==, therefore the integer ID 2 is
equivalent to the float ID 2.0.
Examples
--------
Construct rook (w2) and queen (w1) weights matrices for two 4x4 regions
(16 areas). A queen matrix has all the joins a rook matrix does plus joins
between areas that share a corner. The new matrix formed by the difference
of rook from queen contains only join at corners (typically called a
bishop matrix). Note that the difference of queen from rook would result
in a weights matrix with no joins.
>>> import pysal
>>> w1 = pysal.lat2W(4,4,rook=False)
>>> w2 = pysal.lat2W(4,4,rook=True)
>>> w = pysal.weights.w_difference(w1, w2, constrained=False)
>>> w1[0] == w[0]
False
>>> w1.neighbors[15]
[10, 11, 14]
>>> w2.neighbors[15]
[11, 14]
>>> w.neighbors[15]
[10]
>>>
"""
if w_shape == 'w1':
neigh_keys = w1.neighbors.keys()
elif w_shape == 'all':
neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys()))
elif w_shape == 'min':
neigh_keys = set(
w1.neighbors.keys()).difference(set(w2.neighbors.keys()))
if not neigh_keys:
raise Exception("returned an empty weights matrix")
else:
raise Exception("invalid string passed to w_shape")
neighbors = {}
for i in neigh_keys:
if i in w1.neighbors:
if i in w2.neighbors:
add_neigh = set(w1.neighbors[i]
).difference(set(w2.neighbors[i]))
neighbors[i] = list(add_neigh)
else:
neighbors[i] = copy.copy(w1.neighbors[i])
else:
neighbors[i] = []
if constrained or w_shape == 'min':
constrained_keys = set(
w1.neighbors.keys()).difference(set(w2.neighbors.keys()))
island_keys = set(neighbors.keys()).difference(constrained_keys)
for i in island_keys:
neighbors[i] = []
for i in constrained_keys:
neighbors[i] = list(
set(neighbors[i]).intersection(constrained_keys))
return pysal.W(neighbors, silent_island_warning=silent_island_warning)
def w_symmetric_difference(w1, w2, w_shape='all', constrained=True, silent_island_warning=False):
"""
Returns a binary weights object, w, that includes only neighbor pairs
that are not shared by w1 and w2. The w_shape and constrained parameters
determine which pairs that are not shared by w1 and w2 are returned.
Parameters
----------
w1 : W
object
w2 : W
object
w_shape : string
Defines the shape of the returned weights matrix. 'all' returns a
matrix with all the unique IDs from w1 and w2; and 'min' returns
a matrix with the IDs not shared by w1 and w2.
constrained : boolean
If False then the full set of neighbor pairs that are not
shared by w1 and w2 are returned. If True then those pairs
that would not be possible if w_shape='min' are dropped.
Ignored if w_shape is set to 'min'.
silent_island_warning : boolean
Switch to turn off (default on) print statements
for every observation with islands
Returns
-------
w : W
object
Notes
-----
ID comparisons are performed using ==, therefore the integer ID 2 is
equivalent to the float ID 2.0.
Examples
--------
Construct queen weights matrix for a 4x4 (16 areas) region (w1) and a rook
matrix for a 6x4 (24 areas) region (w2). The symmetric difference of these
two matrices (with w_shape set to 'all' and constrained set to False)
contains the corner joins in the overlap area, all the joins in the
non-overlap area.
>>> import pysal
>>> w1 = pysal.lat2W(4,4,rook=False)
>>> w2 = pysal.lat2W(6,4,rook=True)
>>> w = pysal.weights.w_symmetric_difference(w1, w2, constrained=False)
>>> w1[0] == w[0]
False
>>> w1.neighbors[15]
[10, 11, 14]
>>> w2.neighbors[15]
[11, 14, 19]
>>> w.neighbors[15]
[10, 19]
>>>
"""
if w_shape == 'all':
neigh_keys = set(w1.neighbors.keys()).union(set(w2.neighbors.keys()))
elif w_shape == 'min':
neigh_keys = set(w1.neighbors.keys(
)).symmetric_difference(set(w2.neighbors.keys()))
else:
raise Exception("invalid string passed to w_shape")
neighbors = {}
for i in neigh_keys:
if i in w1.neighbors:
if i in w2.neighbors:
add_neigh = set(w1.neighbors[i]).symmetric_difference(
set(w2.neighbors[i]))
neighbors[i] = list(add_neigh)
else:
neighbors[i] = copy.copy(w1.neighbors[i])
elif i in w2.neighbors:
neighbors[i] = copy.copy(w2.neighbors[i])
else:
neighbors[i] = []
if constrained or w_shape == 'min':
constrained_keys = set(
w1.neighbors.keys()).difference(set(w2.neighbors.keys()))
island_keys = set(neighbors.keys()).difference(constrained_keys)
for i in island_keys:
neighbors[i] = []
for i in constrained_keys:
neighbors[i] = list(
set(neighbors[i]).intersection(constrained_keys))
return pysal.W(neighbors, silent_island_warning=silent_island_warning)
def w_subset(w1, ids, silent_island_warning=False):
"""
Returns a binary weights object, w, that includes only those
observations in ids.
Parameters
----------
w1 : W
object
ids : list
A list containing the IDs to be include in the returned weights
object.
silent_island_warning : boolean
Switch to turn off (default on) print statements
for every observation with islands
Returns
-------
w : W
object
Examples
--------
Construct a rook weights matrix for a 6x4 region (24 areas). By default
PySAL assigns integer IDs to the areas in a region. By passing in a list
of integers from 0 to 15, the first 16 areas are extracted from the
previous weights matrix, and only those joins relevant to the new region
are retained.
>>> import pysal
>>> w1 = pysal.lat2W(6,4)
>>> ids = range(16)
>>> w = pysal.weights.w_subset(w1, ids)
>>> w1[0] == w[0]
True
>>> w1.neighbors[15]
[11, 14, 19]
>>> w.neighbors[15]
[11, 14]
>>>
"""
neighbors = {}
ids_set = set(ids)
for i in ids:
if i in w1.neighbors:
neigh_add = ids_set.intersection(set(w1.neighbors[i]))
neighbors[i] = list(neigh_add)
else:
neighbors[i] = []
return pysal.W(neighbors, id_order=ids, silent_island_warning=silent_island_warning)
def w_clip(w1, w2, outSP=True, silent_island_warning=False):
'''
Clip a continuous W object (w1) with a different W object (w2) so only cells where
w2 has a non-zero value remain with non-zero values in w1.
Checks on w1 and w2 are performed to make sure they conform to the
appropriate format and, if not, they are converted.
Parameters
----------
w1 : W
pysal.W, scipy.sparse.csr.csr_matrix
Potentially continuous weights matrix to be clipped. The clipped
matrix wc will have at most the same elements as w1.
w2 : W
pysal.W, scipy.sparse.csr.csr_matrix
Weights matrix to use as shell to clip w1. Automatically
converted to binary format. Only non-zero elements in w2 will be
kept non-zero in wc. NOTE: assumed to be of the same shape as w1
outSP : boolean
If True (default) return sparse version of the clipped W, if
False, return pysal.W object of the clipped matrix
silent_island_warning : boolean
Switch to turn off (default on) print statements
for every observation with islands
Returns
-------
wc : W
pysal.W, scipy.sparse.csr.csr_matrix
Clipped W object (sparse if outSP=Ture). It inherits
``id_order`` from w1.
Examples
--------
>>> import pysal as ps
First create a W object from a lattice using queen contiguity and
row-standardize it (note that these weights will stay when we clip the
object, but they will not neccesarily represent a row-standardization
anymore):
>>> w1 = ps.lat2W(3, 2, rook=False)
>>> w1.transform = 'R'
We will clip that geography assuming observations 0, 2, 3 and 4 belong to
one group and 1, 5 belong to another group and we don't want both groups
to interact with each other in our weights (i.e. w_ij = 0 if i and j in
different groups). For that, we use the following method:
>>> w2 = ps.block_weights(['r1', 'r2', 'r1', 'r1', 'r1', 'r2'])
To illustrate that w2 will only be considered as binary even when the
object passed is not, we can row-standardize it
>>> w2.transform = 'R'
The clipped object ``wc`` will contain only the spatial queen
relationships that occur within one group ('r1' or 'r2') but will have
gotten rid of those that happen across groups
>>> wcs = ps.weights.Wsets.w_clip(w1, w2, outSP=True)
This will create a sparse object (recommended when n is large).
>>> wcs.sparse.toarray()
array([[ 0. , 0. , 0.33333333, 0.33333333, 0. ,
0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. ],
[ 0.2 , 0. , 0. , 0.2 , 0.2 ,
0. ],
[ 0.2 , 0. , 0.2 , 0. , 0.2 ,
0. ],
[ 0. , 0. , 0.33333333, 0.33333333, 0. ,
0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. ]])
If we wanted an original W object, we can control that with the argument
``outSP``:
>>> wc = ps.weights.Wsets.w_clip(w1, w2, outSP=False)
WARNING: there are 2 disconnected observations
Island ids: [1, 5]
>>> wc.full()[0]
array([[ 0. , 0. , 0.33333333, 0.33333333, 0. ,
0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. ],
[ 0.2 , 0. , 0. , 0.2 , 0.2 ,
0. ],
[ 0.2 , 0. , 0.2 , 0. , 0.2 ,
0. ],
[ 0. , 0. , 0.33333333, 0.33333333, 0. ,
0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. ]])
You can check they are actually the same:
>>> wcs.sparse.toarray() == wc.full()[0]
array([[ True, True, True, True, True, True],
[ True, True, True, True, True, True],
[ True, True, True, True, True, True],
[ True, True, True, True, True, True],
[ True, True, True, True, True, True],
[ True, True, True, True, True, True]], dtype=bool)
'''
if not w1.id_order:
w1.id_order = None
id_order = w1.id_order
if not isspmatrix_csr(w1):
w1 = w1.sparse
if not isspmatrix_csr(w2):
w2 = w2.sparse
w2.data = ones(w2.data.shape)
wc = w1.multiply(w2)
wc = pysal.weights.WSP(wc, id_order=id_order)
if not outSP:
wc = pysal.weights.WSP2W(wc, silent_island_warning=silent_island_warning)
return wc
| bsd-3-clause |
deandunbar/html2bwml | venv/lib/python2.7/site-packages/django/core/files/move.py | 103 | 3164 | """
Move a file in the safest way possible::
>>> from django.core.files.move import file_move_safe
>>> file_move_safe("/tmp/old_file", "/tmp/new_file")
"""
import os
from django.core.files import locks
try:
from shutil import copystat
except ImportError:
import stat
def copystat(src, dst):
"""Copy all stat info (mode bits, atime and mtime) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
__all__ = ['file_move_safe']
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False):
"""
Moves a file from one location to another in the safest way possible.
First, tries ``os.rename``, which is simple but will break across filesystems.
If that fails, streams manually from one file to another in pure Python.
If the destination file exists and ``allow_overwrite`` is ``False``, this
function will throw an ``IOError``.
"""
# There's no reason to move if we don't have to.
if _samefile(old_file_name, new_file_name):
return
try:
# If the destination file exists and allow_overwrite is False then raise an IOError
if not allow_overwrite and os.access(new_file_name, os.F_OK):
raise IOError("Destination file %s exists and allow_overwrite is False" % new_file_name)
os.rename(old_file_name, new_file_name)
return
except OSError:
# This will happen with os.rename if moving to another filesystem
# or when moving opened files on certain operating systems
pass
# first open the old file, so that it won't go away
with open(old_file_name, 'rb') as old_file:
# now open the new file, not forgetting allow_overwrite
fd = os.open(new_file_name, (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) |
(os.O_EXCL if not allow_overwrite else 0)))
try:
locks.lock(fd, locks.LOCK_EX)
current_chunk = None
while current_chunk != b'':
current_chunk = old_file.read(chunk_size)
os.write(fd, current_chunk)
finally:
locks.unlock(fd)
os.close(fd)
copystat(old_file_name, new_file_name)
try:
os.remove(old_file_name)
except OSError as e:
# Certain operating systems (Cygwin and Windows)
# fail when deleting opened files, ignore it. (For the
# systems where this happens, temporary files will be auto-deleted
# on close anyway.)
if getattr(e, 'winerror', 0) != 32 and getattr(e, 'errno', 0) != 13:
raise
| mit |
haad/ansible | test/units/modules/network/f5/test_bigip_iapp_service.py | 28 | 12006 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_iapp_service import Parameters
from library.bigip_iapp_service import ModuleManager
from library.bigip_iapp_service import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_iapp_service import Parameters
from ansible.modules.network.f5.bigip_iapp_service import ModuleManager
from ansible.modules.network.f5.bigip_iapp_service import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_keys(self):
args = load_fixture('create_iapp_service_parameters_f5_http.json')
p = Parameters(params=args)
# Assert the top-level keys
assert p.name == 'http_example'
assert p.partition == 'Common'
assert p.template == '/Common/f5.http'
assert p.deviceGroup == 'none'
assert p.inheritedTrafficGroup == 'true'
assert p.inheritedDevicegroup == 'true'
assert p.traffic_group == '/Common/traffic-group-local-only'
def test_module_parameters_lists(self):
args = load_fixture('create_iapp_service_parameters_f5_http.json')
p = Parameters(params=args)
assert 'lists' in p._values
assert p.lists[0]['name'] == 'irules__irules'
assert p.lists[0]['encrypted'] == 'no'
assert len(p.lists[0]['value']) == 1
assert p.lists[0]['value'][0] == '/Common/lgyft'
assert p.lists[1]['name'] == 'net__client_vlan'
assert p.lists[1]['encrypted'] == 'no'
assert len(p.lists[1]['value']) == 1
assert p.lists[1]['value'][0] == '/Common/net2'
def test_module_parameters_tables(self):
args = load_fixture('create_iapp_service_parameters_f5_http.json')
p = Parameters(params=args)
assert 'tables' in p._values
assert 'columnNames' in p.tables[0]
assert len(p.tables[0]['columnNames']) == 1
assert p.tables[0]['columnNames'][0] == 'name'
assert 'name' in p.tables[0]
assert p.tables[0]['name'] == 'pool__hosts'
assert 'rows' in p.tables[0]
assert len(p.tables[0]['rows']) == 1
assert 'row' in p.tables[0]['rows'][0]
assert len(p.tables[0]['rows'][0]['row']) == 1
assert p.tables[0]['rows'][0]['row'][0] == 'demo.example.com'
assert len(p.tables[1]['rows']) == 2
assert 'row' in p.tables[0]['rows'][0]
assert len(p.tables[1]['rows'][0]['row']) == 2
assert p.tables[1]['rows'][0]['row'][0] == '10.1.1.1'
assert p.tables[1]['rows'][0]['row'][1] == '0'
assert p.tables[1]['rows'][1]['row'][0] == '10.1.1.2'
assert p.tables[1]['rows'][1]['row'][1] == '0'
def test_module_parameters_variables(self):
args = load_fixture('create_iapp_service_parameters_f5_http.json')
p = Parameters(params=args)
assert 'variables' in p._values
assert len(p.variables) == 34
# Assert one configuration value
assert 'name' in p.variables[0]
assert 'value' in p.variables[0]
assert p.variables[0]['name'] == 'afm__dos_security_profile'
assert p.variables[0]['value'] == '/#do_not_use#'
# Assert a second configuration value
assert 'name' in p.variables[1]
assert 'value' in p.variables[1]
assert p.variables[1]['name'] == 'afm__policy'
assert p.variables[1]['value'] == '/#do_not_use#'
def test_module_strict_updates_from_top_level(self):
# Assumes the user did not provide any parameters
args = dict(
strict_updates=True
)
p = Parameters(params=args)
assert p.strict_updates == 'enabled'
args = dict(
strict_updates=False
)
p = Parameters(params=args)
assert p.strict_updates == 'disabled'
def test_module_strict_updates_override_from_top_level(self):
args = dict(
strict_updates=True,
parameters=dict(
strictUpdates='disabled'
)
)
p = Parameters(params=args)
assert p.strict_updates == 'enabled'
args = dict(
strict_updates=False,
parameters=dict(
strictUpdates='enabled'
)
)
p = Parameters(params=args)
assert p.strict_updates == 'disabled'
def test_module_strict_updates_only_parameters(self):
args = dict(
parameters=dict(
strictUpdates='disabled'
)
)
p = Parameters(params=args)
assert p.strict_updates == 'disabled'
args = dict(
parameters=dict(
strictUpdates='enabled'
)
)
p = Parameters(params=args)
assert p.strict_updates == 'enabled'
def test_api_strict_updates_from_top_level(self):
args = dict(
strictUpdates='enabled'
)
p = Parameters(params=args)
assert p.strict_updates == 'enabled'
args = dict(
strictUpdates='disabled'
)
p = Parameters(params=args)
assert p.strict_updates == 'disabled'
def test_api_parameters_variables(self):
args = dict(
variables=[
dict(
name="client__http_compression",
encrypted="no",
value="/#create_new#"
)
]
)
p = Parameters(params=args)
assert p.variables[0]['name'] == 'client__http_compression'
def test_api_parameters_tables(self):
args = dict(
tables=[
{
"name": "pool__members",
"columnNames": [
"addr",
"port",
"connection_limit"
],
"rows": [
{
"row": [
"12.12.12.12",
"80",
"0"
]
},
{
"row": [
"13.13.13.13",
"443",
10
]
}
]
}
]
)
p = Parameters(params=args)
assert p.tables[0]['name'] == 'pool__members'
assert p.tables[0]['columnNames'] == ['addr', 'port', 'connection_limit']
assert len(p.tables[0]['rows']) == 2
assert 'row' in p.tables[0]['rows'][0]
assert 'row' in p.tables[0]['rows'][1]
assert p.tables[0]['rows'][0]['row'] == ['12.12.12.12', '80', '0']
assert p.tables[0]['rows'][1]['row'] == ['13.13.13.13', '443', '10']
def test_api_parameters_device_group(self):
args = dict(
deviceGroup='none'
)
p = Parameters(params=args)
assert p.deviceGroup == 'none'
def test_api_parameters_inherited_traffic_group(self):
args = dict(
inheritedTrafficGroup='true'
)
p = Parameters(params=args)
assert p.inheritedTrafficGroup == 'true'
def test_api_parameters_inherited_devicegroup(self):
args = dict(
inheritedDevicegroup='true'
)
p = Parameters(params=args)
assert p.inheritedDevicegroup == 'true'
def test_api_parameters_traffic_group(self):
args = dict(
trafficGroup='/Common/traffic-group-local-only'
)
p = Parameters(params=args)
assert p.traffic_group == '/Common/traffic-group-local-only'
def test_module_template_same_partition(self):
args = dict(
template='foo',
partition='bar'
)
p = Parameters(params=args)
assert p.template == '/bar/foo'
def test_module_template_same_partition_full_path(self):
args = dict(
template='/bar/foo',
partition='bar'
)
p = Parameters(params=args)
assert p.template == '/bar/foo'
def test_module_template_different_partition_full_path(self):
args = dict(
template='/Common/foo',
partition='bar'
)
p = Parameters(params=args)
assert p.template == '/Common/foo'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_service(self, *args):
parameters = load_fixture('create_iapp_service_parameters_f5_http.json')
set_module_args(dict(
name='foo',
template='f5.http',
parameters=parameters,
state='present',
password='passsword',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_update_agent_status_traps(self, *args):
parameters = load_fixture('update_iapp_service_parameters_f5_http.json')
set_module_args(dict(
name='foo',
template='f5.http',
parameters=parameters,
state='present',
password='passsword',
server='localhost',
user='admin'
))
# Configure the parameters that would be returned by querying the
# remote device
parameters = load_fixture('create_iapp_service_parameters_f5_http.json')
current = Parameters(parameters)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=True)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
valkjsaaa/sl4a | python/src/Lib/encodings/cp863.py | 593 | 34508 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xb6' # 0x0086 -> PILCROW SIGN
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u2017' # 0x008d -> DOUBLE LOW LINE
u'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa7' # 0x008f -> SECTION SIGN
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xa4' # 0x0098 -> CURRENCY SIGN
u'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xa6' # 0x00a0 -> BROKEN BAR
u'\xb4' # 0x00a1 -> ACUTE ACCENT
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xa8' # 0x00a4 -> DIAERESIS
u'\xb8' # 0x00a5 -> CEDILLA
u'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
u'\xaf' # 0x00a7 -> MACRON
u'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
dlew/joda-time-android | utils/resource_extractor.py | 9 | 2399 | #!/usr/bin/env python
"""
Extracts selected resources from resources directories.
Useful for grabbing translations from AOSP.
Point towards base files in the library (in /res/values) and it
will find all the alternate versions in other directories.
"""
import os
import shutil
from optparse import OptionParser
import xml.etree.ElementTree as ET
ET.register_namespace('android', "http://schemas.android.com/apk/res/android")
ET.register_namespace('xliff', "urn:oasis:names:tc:xliff:document:1.2")
ANDROID_XML_DECLARATION = '<?xml version="1.0" encoding="UTF-8"?>'
def extract_names(path):
names = {}
tree = ET.parse(path)
for child in tree.getroot().iter():
if 'name' not in child.attrib:
continue
names[child.attrib['name']] = None
return names
def extract(names, res_dir, out_dir):
# Clear the current output directory
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
for root, dirs, files in os.walk(res_dir):
for file in files:
path = os.path.join(root, file)
fileName, fileExtension = os.path.splitext(path)
if fileExtension == ".xml":
xml = extract_file(names, path)
if xml is not None:
val_dir = os.path.join(out_dir, os.path.relpath(root, res_dir))
if not os.path.exists(val_dir):
os.makedirs(val_dir)
xml.write(os.path.join(val_dir, file),
encoding = 'utf-8',
xml_declaration = ANDROID_XML_DECLARATION,
method = 'xml')
def extract_file(names, path):
tree = ET.parse(path)
root = tree.getroot()
if root.tag != "resources":
return
to_remove = []
found = False
for child in tree.iter():
# Only loook at second-level nodes
if child not in root:
continue
if 'name' not in child.attrib or child.attrib['name'] not in names:
to_remove.append(child)
else:
found = True
for child in to_remove:
root.remove(child)
if found:
return tree
if __name__ == "__main__":
usage = "usage: %prog [options] baseResFile1 baseResFile2 ..."
parser = OptionParser(usage=usage)
parser.add_option('-r', '--res', action="store", help="Resources directory location (/res/)", default="res/")
parser.add_option('-o', '--out', action="store", help="Output directory", default="out/")
options, args = parser.parse_args()
names = {}
for arg in args:
names = dict(names.items() + extract_names(arg).items())
extract(names, options.res, options.out)
| apache-2.0 |
frankvdp/django | tests/filtered_relation/tests.py | 43 | 16742 | from django.db import connection, transaction
from django.db.models import Case, Count, F, FilteredRelation, Q, When
from django.test import TestCase
from django.test.testcases import skipUnlessDBFeature
from .models import Author, Book, Borrower, Editor, RentalSession, Reservation
class FilteredRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.author1 = Author.objects.create(name='Alice')
cls.author2 = Author.objects.create(name='Jane')
cls.editor_a = Editor.objects.create(name='a')
cls.editor_b = Editor.objects.create(name='b')
cls.book1 = Book.objects.create(
title='Poem by Alice',
editor=cls.editor_a,
author=cls.author1,
)
cls.book1.generic_author.set([cls.author2])
cls.book2 = Book.objects.create(
title='The book by Jane A',
editor=cls.editor_b,
author=cls.author2,
)
cls.book3 = Book.objects.create(
title='The book by Jane B',
editor=cls.editor_b,
author=cls.author2,
)
cls.book4 = Book.objects.create(
title='The book by Alice',
editor=cls.editor_a,
author=cls.author1,
)
cls.author1.favorite_books.add(cls.book2)
cls.author1.favorite_books.add(cls.book3)
def test_select_related(self):
qs = Author.objects.annotate(
book_join=FilteredRelation('book'),
).select_related('book_join__editor').order_by('pk', 'book_join__pk')
with self.assertNumQueries(1):
self.assertQuerysetEqual(qs, [
(self.author1, self.book1, self.editor_a, self.author1),
(self.author1, self.book4, self.editor_a, self.author1),
(self.author2, self.book2, self.editor_b, self.author2),
(self.author2, self.book3, self.editor_b, self.author2),
], lambda x: (x, x.book_join, x.book_join.editor, x.book_join.author))
def test_select_related_foreign_key(self):
qs = Book.objects.annotate(
author_join=FilteredRelation('author'),
).select_related('author_join').order_by('pk')
with self.assertNumQueries(1):
self.assertQuerysetEqual(qs, [
(self.book1, self.author1),
(self.book2, self.author2),
(self.book3, self.author2),
(self.book4, self.author1),
], lambda x: (x, x.author_join))
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_select_related_foreign_key_for_update_of(self):
with transaction.atomic():
qs = Book.objects.annotate(
author_join=FilteredRelation('author'),
).select_related('author_join').select_for_update(of=('self',)).order_by('pk')
with self.assertNumQueries(1):
self.assertQuerysetEqual(qs, [
(self.book1, self.author1),
(self.book2, self.author2),
(self.book3, self.author2),
(self.book4, self.author1),
], lambda x: (x, x.author_join))
def test_without_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
),
[self.author1, self.author2]
)
def test_with_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False),
[self.author1]
)
def test_with_join_and_complex_condition(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
'book', condition=Q(
Q(book__title__iexact='poem by alice') |
Q(book__state=Book.RENTED)
),
),
).filter(book_alice__isnull=False),
[self.author1]
)
def test_internal_queryset_alias_mapping(self):
queryset = Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False)
self.assertIn(
'INNER JOIN {} book_alice ON'.format(connection.ops.quote_name('filtered_relation_book')),
str(queryset.query)
)
def test_with_multiple_filter(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_editor_a=FilteredRelation(
'book',
condition=Q(book__title__icontains='book', book__editor_id=self.editor_a.pk),
),
).filter(book_editor_a__isnull=False),
[self.author1]
)
def test_multiple_times(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_title_alice=FilteredRelation('book', condition=Q(book__title__icontains='alice')),
).filter(book_title_alice__isnull=False).filter(book_title_alice__isnull=False).distinct(),
[self.author1]
)
def test_exclude_relation_with_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation('book', condition=~Q(book__title__icontains='alice')),
).filter(book_alice__isnull=False).distinct(),
[self.author2]
)
def test_with_m2m(self):
qs = Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
'favorite_books', condition=Q(favorite_books__in=[self.book2]),
),
).filter(favorite_books_written_by_jane__isnull=False)
self.assertSequenceEqual(qs, [self.author1])
def test_with_m2m_deep(self):
qs = Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
'favorite_books', condition=Q(favorite_books__author=self.author2),
),
).filter(favorite_books_written_by_jane__title='The book by Jane B')
self.assertSequenceEqual(qs, [self.author1])
def test_with_m2m_multijoin(self):
qs = Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
'favorite_books', condition=Q(favorite_books__author=self.author2),
)
).filter(favorite_books_written_by_jane__editor__name='b').distinct()
self.assertSequenceEqual(qs, [self.author1])
def test_values_list(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False).values_list('book_alice__title', flat=True),
['Poem by Alice']
)
def test_values(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False).values(),
[{'id': self.author1.pk, 'name': 'Alice', 'content_type_id': None, 'object_id': None}]
)
def test_extra(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False).extra(where=['1 = 1']),
[self.author1]
)
@skipUnlessDBFeature('supports_select_union')
def test_union(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.union(qs2), [self.author1, self.author2])
@skipUnlessDBFeature('supports_select_intersection')
def test_intersection(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.intersection(qs2), [])
@skipUnlessDBFeature('supports_select_difference')
def test_difference(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.difference(qs2), [self.author1])
def test_select_for_update(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')),
).filter(book_jane__isnull=False).select_for_update(),
[self.author2]
)
def test_defer(self):
# One query for the list and one query for the deferred title.
with self.assertNumQueries(2):
self.assertQuerysetEqual(
Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False).select_related('book_alice').defer('book_alice__title'),
['Poem by Alice'], lambda author: author.book_alice.title
)
def test_only_not_supported(self):
msg = 'only() is not supported with FilteredRelation.'
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False).select_related('book_alice').only('book_alice__state')
def test_as_subquery(self):
inner_qs = Author.objects.annotate(
book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')),
).filter(book_alice__isnull=False)
qs = Author.objects.filter(id__in=inner_qs)
self.assertSequenceEqual(qs, [self.author1])
def test_with_foreign_key_error(self):
msg = (
"FilteredRelation's condition doesn't support nested relations "
"(got 'author__favorite_books__author')."
)
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.annotate(
alice_favorite_books=FilteredRelation(
'author__favorite_books',
condition=Q(author__favorite_books__author=self.author1),
)
))
def test_with_foreign_key_on_condition_error(self):
msg = (
"FilteredRelation's condition doesn't support nested relations "
"(got 'book__editor__name__icontains')."
)
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.annotate(
book_edited_by_b=FilteredRelation('book', condition=Q(book__editor__name__icontains='b')),
))
def test_with_empty_relation_name_error(self):
with self.assertRaisesMessage(ValueError, 'relation_name cannot be empty.'):
FilteredRelation('', condition=Q(blank=''))
def test_with_condition_as_expression_error(self):
msg = 'condition argument must be a Q() instance.'
expression = Case(
When(book__title__iexact='poem by alice', then=True), default=False,
)
with self.assertRaisesMessage(ValueError, msg):
FilteredRelation('book', condition=expression)
def test_with_prefetch_related(self):
msg = 'prefetch_related() is not supported with FilteredRelation.'
qs = Author.objects.annotate(
book_title_contains_b=FilteredRelation('book', condition=Q(book__title__icontains='b')),
).filter(
book_title_contains_b__isnull=False,
)
with self.assertRaisesMessage(ValueError, msg):
qs.prefetch_related('book_title_contains_b')
with self.assertRaisesMessage(ValueError, msg):
qs.prefetch_related('book_title_contains_b__editor')
def test_with_generic_foreign_key(self):
self.assertSequenceEqual(
Book.objects.annotate(
generic_authored_book=FilteredRelation(
'generic_author',
condition=Q(generic_author__isnull=False)
),
).filter(generic_authored_book__isnull=False),
[self.book1]
)
class FilteredRelationAggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.author1 = Author.objects.create(name='Alice')
cls.editor_a = Editor.objects.create(name='a')
cls.book1 = Book.objects.create(
title='Poem by Alice',
editor=cls.editor_a,
author=cls.author1,
)
cls.borrower1 = Borrower.objects.create(name='Jenny')
cls.borrower2 = Borrower.objects.create(name='Kevin')
# borrower 1 reserves, rents, and returns book1.
Reservation.objects.create(
borrower=cls.borrower1,
book=cls.book1,
state=Reservation.STOPPED,
)
RentalSession.objects.create(
borrower=cls.borrower1,
book=cls.book1,
state=RentalSession.STOPPED,
)
# borrower2 reserves, rents, and returns book1.
Reservation.objects.create(
borrower=cls.borrower2,
book=cls.book1,
state=Reservation.STOPPED,
)
RentalSession.objects.create(
borrower=cls.borrower2,
book=cls.book1,
state=RentalSession.STOPPED,
)
def test_aggregate(self):
"""
filtered_relation() not only improves performance but also creates
correct results when aggregating with multiple LEFT JOINs.
Books can be reserved then rented by a borrower. Each reservation and
rental session are recorded with Reservation and RentalSession models.
Every time a reservation or a rental session is over, their state is
changed to 'stopped'.
Goal: Count number of books that are either currently reserved or
rented by borrower1 or available.
"""
qs = Book.objects.annotate(
is_reserved_or_rented_by=Case(
When(reservation__state=Reservation.NEW, then=F('reservation__borrower__pk')),
When(rental_session__state=RentalSession.NEW, then=F('rental_session__borrower__pk')),
default=None,
)
).filter(
Q(is_reserved_or_rented_by=self.borrower1.pk) | Q(state=Book.AVAILABLE)
).distinct()
self.assertEqual(qs.count(), 1)
# If count is equal to 1, the same aggregation should return in the
# same result but it returns 4.
self.assertSequenceEqual(qs.annotate(total=Count('pk')).values('total'), [{'total': 4}])
# With FilteredRelation, the result is as expected (1).
qs = Book.objects.annotate(
active_reservations=FilteredRelation(
'reservation', condition=Q(
reservation__state=Reservation.NEW,
reservation__borrower=self.borrower1,
)
),
).annotate(
active_rental_sessions=FilteredRelation(
'rental_session', condition=Q(
rental_session__state=RentalSession.NEW,
rental_session__borrower=self.borrower1,
)
),
).filter(
(Q(active_reservations__isnull=False) | Q(active_rental_sessions__isnull=False)) |
Q(state=Book.AVAILABLE)
).distinct()
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs.annotate(total=Count('pk')).values('total'), [{'total': 1}])
| bsd-3-clause |
anbangr/trusted-nova | nova/api/openstack/compute/contrib/flavormanage.py | 8 | 3129 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
import webob
from nova.api.openstack.compute import flavors as flavors_api
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import instance_types
from nova import exception
from nova import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""
The Flavor Lifecycle API controller for the OpenStack API.
"""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = instance_types.get_instance_type_by_flavor_id(id)
except exception.NotFound, e:
raise webob.exc.HTTPNotFound(explanation=str(e))
instance_types.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
@wsgi.serializers(xml=flavors_api.FlavorTemplate)
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
vals = body['flavor']
name = vals['name']
flavorid = vals['id']
memory_mb = vals.get('ram')
vcpus = vals.get('vcpus')
root_gb = vals.get('disk')
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral')
swap = vals.get('swap')
rxtx_factor = vals.get('rxtx_factor')
try:
flavor = instance_types.create(name, memory_mb, vcpus,
root_gb, ephemeral_gb, flavorid,
swap, rxtx_factor)
except exception.InstanceTypeExists as err:
raise webob.exc.HTTPConflict(explanation=str(err))
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""
Flavor create/delete API support
"""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00+00:00"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| apache-2.0 |
andaag/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
yanchen036/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py | 15 | 15481 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Wishart."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
distributions = distributions_lib
def make_pd(start, n):
"""Deterministically create a positive definite matrix."""
x = np.tril(linalg.circulant(np.arange(start, start + n)))
return np.dot(x, x.T)
def chol(x):
"""Compute Cholesky factorization."""
return linalg.cholesky(x).T
def wishart_var(df, x):
"""Compute Wishart variance for numpy scale matrix."""
x = np.sqrt(df) * np.asarray(x)
d = np.expand_dims(np.diag(x), -1)
return x**2 + np.dot(d, d.T)
class WishartCholeskyTest(test.TestCase):
def testEntropy(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
# sp.stats.wishart(df=4, scale=make_pd(1., 2)).entropy()
self.assertAllClose(6.301387092430769, w.entropy().eval())
w = distributions.WishartCholesky(df=1, scale=[[1.]])
# sp.stats.wishart(df=1,scale=1).entropy()
self.assertAllClose(0.78375711047393404, w.entropy().eval())
def testMeanLogDetAndLogNormalizingConstant(self):
with self.test_session():
def entropy_alt(w):
return (
w.log_normalization()
- 0.5 * (w.df - w.dimension - 1.) * w.mean_log_det()
+ 0.5 * w.df * w.dimension).eval()
w = distributions.WishartCholesky(df=4,
scale=chol(make_pd(1., 2)))
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
w = distributions.WishartCholesky(df=5, scale=[[1.]])
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
def testMean(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(df * scale, w.mean().eval())
def testMode(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual((df - 2. - 1.) * scale, w.mode().eval())
def testStd(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(chol(wishart_var(df, scale)), w.stddev().eval())
def testVariance(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(wishart_var(df, scale), w.variance().eval())
def testSample(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
chol_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
full_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = math_ops.reduce_mean(x, reduction_indices=[0]).eval()
self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (math_ops.reduce_mean(
math_ops.square(x), reduction_indices=[0]) -
math_ops.square(moment1_estimate)).eval()
self.assertAllClose(
chol_w.variance().eval(), variance_estimate, rtol=0.05)
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
with self.test_session():
df = 4.
n_val = 100
random_seed.set_random_seed(654321)
chol_w1 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart1")
samples1 = chol_w1.sample(n_val, seed=123456).eval()
random_seed.set_random_seed(654321)
chol_w2 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart2")
samples2 = chol_w2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testProb(self):
with self.test_session():
# Generate some positive definite (pd) matrices and their Cholesky
# factorizations.
x = np.array(
[make_pd(1., 2), make_pd(2., 2), make_pd(3., 2), make_pd(4., 2)])
chol_x = np.array([chol(x[0]), chol(x[1]), chol(x[2]), chol(x[3])])
# Since Wishart wasn"t added to SciPy until 0.16, we'll spot check some
# pdfs with hard-coded results from upstream SciPy.
log_prob_df_seq = np.array([
# math.log(stats.wishart.pdf(x[0], df=2+0, scale=x[0]))
-3.5310242469692907,
# math.log(stats.wishart.pdf(x[1], df=2+1, scale=x[1]))
-7.689907330328961,
# math.log(stats.wishart.pdf(x[2], df=2+2, scale=x[2]))
-10.815845159537895,
# math.log(stats.wishart.pdf(x[3], df=2+3, scale=x[3]))
-13.640549882916691,
])
# This test checks that batches don't interfere with correctness.
w = distributions.WishartCholesky(
df=[2, 3, 4, 5],
scale=chol_x,
cholesky_input_output_matrices=True)
self.assertAllClose(log_prob_df_seq, w.log_prob(chol_x).eval())
# Now we test various constructions of Wishart with different sample
# shape.
log_prob = np.array([
# math.log(stats.wishart.pdf(x[0], df=4, scale=x[0]))
-4.224171427529236,
# math.log(stats.wishart.pdf(x[1], df=4, scale=x[0]))
-6.3378770664093453,
# math.log(stats.wishart.pdf(x[2], df=4, scale=x[0]))
-12.026946850193017,
# math.log(stats.wishart.pdf(x[3], df=4, scale=x[0]))
-20.951582705289454,
])
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=False),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=False)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=True),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=True)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(chol_x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(chol_x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
def testBatchShape(self):
with self.test_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([], w.batch_shape)
self.assertAllEqual([], w.batch_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2], w.batch_shape)
self.assertAllEqual([2], w.batch_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[], sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2],
sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testEventShape(self):
with self.test_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testValidateArgs(self):
with self.test_session() as sess:
df_deferred = array_ops.placeholder(dtypes.float32)
chol_scale_deferred = array_ops.placeholder(dtypes.float32)
x = make_pd(1., 3)
chol_scale = chol(x)
# Check expensive, deferred assertions.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"cannot be less than"):
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=True)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 2.,
chol_scale_deferred: chol_scale})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful"):
chol_w = distributions.WishartFull(
df=df_deferred, scale=chol_scale_deferred)
# np.ones((3, 3)) is not positive, definite.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={
df_deferred: 4.,
chol_scale_deferred: np.ones(
(3, 3), dtype=np.float32)
})
with self.assertRaisesOpError("scale must be square"):
chol_w = distributions.WishartCholesky(
df=4.,
scale=np.array([[2., 3., 4.], [1., 2., 3.]], dtype=np.float32),
validate_args=True)
sess.run(chol_w.scale().eval())
# Ensure no assertions.
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=False)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: chol_scale})
# Bogus log_prob, but since we have no checks running... c"est la vie.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: np.ones((3, 3))})
def testStaticAsserts(self):
with self.test_session():
x = make_pd(1., 3)
chol_scale = chol(x)
# Still has these assertions because they're resolveable at graph
# construction
with self.assertRaisesRegexp(ValueError, "cannot be less than"):
distributions.WishartCholesky(
df=2, scale=chol_scale, validate_args=False)
with self.assertRaisesRegexp(TypeError, "Argument tril must have dtype"):
distributions.WishartCholesky(
df=4.,
scale=np.asarray(
chol_scale, dtype=np.int32),
validate_args=False)
def testSampleBroadcasts(self):
dims = 2
batch_shape = [2, 3]
sample_shape = [2, 1]
scale = np.float32([
[[1., 0.5],
[0.5, 1.]],
[[0.5, 0.25],
[0.25, 0.75]],
])
scale = np.reshape(np.concatenate([scale, scale, scale], axis=0),
batch_shape + [dims, dims])
wishart = distributions.WishartFull(df=5, scale=scale)
x = wishart.sample(sample_shape, seed=42)
with self.test_session() as sess:
x_ = sess.run(x)
expected_shape = sample_shape + batch_shape + [dims, dims]
self.assertAllEqual(expected_shape, x.shape)
self.assertAllEqual(expected_shape, x_.shape)
if __name__ == "__main__":
test.main()
| apache-2.0 |
bzennn/blog_flask | python/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| gpl-3.0 |
levkar/odoo-addons | stock_remit/wizard/stock_print_remit.py | 1 | 2063 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class stock_print_remit(osv.osv_memory):
_name = 'stock.print_remit'
_description = "Print Remit"
_columns = {
'remit_number': fields.char('Remit Number'),
}
def default_get(self, cr, uid, fields, context=None):
res = super(stock_print_remit, self).default_get(
cr, uid, fields, context=context)
if 'active_id' not in context:
return res
picking_obj = self.pool.get('stock.picking')
picking_id = context['active_id']
picking = picking_obj.browse(cr, uid, picking_id, context=context)
if isinstance(picking, list):
picking = picking[0]
print 'picking', picking
if not picking.remit_number:
picking_obj.set_remit_number(
cr, uid, picking_id, context=context)
picking = picking_obj.browse(cr, uid, picking_id, context=context)
res['remit_number'] = picking.remit_number
return res
def recompute_sequence_number(self, cr, uid, ids, context=None):
if 'active_id' not in context:
return False
picking_obj = self.pool.get('stock.picking')
picking_id = context['active_id']
picking = picking_obj.browse(cr, uid, picking_id, context=context)
if isinstance(picking, list):
picking = picking[0]
picking_obj.set_remit_number(
cr, uid, picking_id, context=context)
picking = picking_obj.browse(cr, uid, picking_id, context=context)
vals = {'remit_number': picking.remit_number}
return {'value': vals}
def print_stock_picking(self, cr, uid, ids, context=None):
if context is None:
context = {}
picking_obj = self.pool['stock.picking']
if 'active_id' not in context:
return False
picking_id = context['active_id']
context['from_wizard'] = True
return picking_obj.do_print_picking(
cr, uid, picking_id, context=context)
| agpl-3.0 |
arupiot/deskcontrol | deskcontrol/config.py | 1 | 4175 | import os
NAME_AUTHORITY = os.environ.get("NAME_AUTHORITY", "eightfitzroy.arupiot.com")
DEVICE_NAME = os.environ.get("DEVICE_NAME", "TST-1")
HOST = os.environ.get("BRICKD_HOST", "localhost")
PORT = int(os.environ.get("BRICKD_PORT", "4223"))
SHORT_IDENT = os.environ.get("SHORT_IDENT", "test")
MODULES = []
MENU_MODULES = []
MQTT_CONFIG = {}
ZMQ_CONFIG = {}
GCLOUD_CONFIG = {}
INFLUX_AUTH = {}
ENVAR_MODULES = {
"ENABLE_MODULE_MENU": ("MenuModule", "navigation", "Navigation"),
"ENABLE_MODULE_INPUT": ("InputModule", "inputs", "Inputs"),
"ENABLE_MODULE_SLEEP": ("SleepModule", "sleep", "Sleep"),
"ENABLE_MODULE_RFID": ("RFIDModule", "rfid", "RFID"),
"ENABLE_MODULE_INFLUX": ("InfluxModule", "influx", "InfluxDB"),
"ENABLE_MODULE_HTTP_PUSH": ("HttpPushModule", "httppush", "HTTPpush"),
"ENABLE_MODULE_TF_SCREEN": ("TFScreen", "tfscreen", "TF Screen"),
"ENABLE_MODULE_KIVY_SCREEN": ("KivyScreen", "kivyscreen", "Kivy Screen"),
"ENABLE_MODULE_GOOGLE_IOT": ("GoogleIoTModule", "googleiot", "GoogleIoT"),
"ENABLE_MODULE_MQTT": ("MQTTModule", "mqtt_module", "MQTT"),
"ENABLE_MODULE_ZMQ": ("ZMQModule", "zmq_module", "ZMQ"),
"ENABLE_MODULE_KILN": ("KilnModule", "kiln", "Kiln"),
"ENABLE_MODULE_PICKLE": ("PickleModule", "pickle", "Local Storage"),
}
for envar in ENVAR_MODULES:
if os.environ.get(envar):
MODULES.append(ENVAR_MODULES[envar])
ENVAR_MENU_MODULES = {
"ENABLE_MENU_SENSOR": ("SensorModule", "sensors", "Sensors"),
"ENABLE_MENU_LIGHTING": ("LightingModule", "lighting", "Lighting"),
"ENABLE_MENU_DC_POWER": ("DCPowerModule", "dcpower", "Power"),
"ENABLE_MENU_AC_POWER": ("ACPowerModule", "acpower", "Power"), # Works on RPi only
"ENABLE_MENU_NETWORK": ("NetworkModule", "network", "Network")
}
for envar in ENVAR_MENU_MODULES:
if os.environ.get(envar):
MODULES.append(ENVAR_MENU_MODULES[envar])
if os.environ.get("ENABLE_MODULE_INFLUX"):
INFLUX_AUTH = {
"host": os.environ.get("INFLUXDB_HOST", "127.0.0.1"),
"port": int(os.environ.get("INFLUXDB_PORT", "8086")),
"user": os.environ.get("INFLUXDB_USER", "admin"),
"pass": os.environ.get("INFLUXDB_PASS", "admin"),
"db": os.environ.get("INFLUXDB_DB", "iotdesks"),
"ssl": bool(os.environ.get("INFLUXDB_HOST"))
}
if os.environ.get("ENABLE_MODULE_GOOGLE_IOT"):
GCLOUD_CONFIG = {
"project_id": os.environ.get("GCLOUD_PROJECT_ID", "digital-building-0000000000000"),
"cloud_region": os.environ.get("GCLOUD_REGION", "europe-west1"),
"registry_id": os.environ.get("GCLOUD_REGISTRY_ID", "iotdesks"),
"device_id": os.environ.get("GCLOUD_DEVICE_ID", "XXXX"),
"private_key_file": os.environ.get("GCLOUD_PRIVATE_KEY_FILE", "keys/rsa_private.pem"),
"algorithm": os.environ.get("GCLOUD_ALGORITHM", "RS256"),
"ca_certs": os.environ.get("GCLOUD_CA_CERTS", "keys/google.pem"),
"mqtt_bridge_hostname": os.environ.get("GCLOUD_MQTT_HOST", "mqtt.googleapis.com"),
"mqtt_bridge_port": int(os.environ.get("GCLOUD_MQTT_PORT", "8883")),
}
if os.environ.get("ENABLE_MODULE_MQTT"):
MQTT_CONFIG = {
"mqtt_username": os.environ.get("MQTT_USERNAME"),
"mqtt_password": os.environ.get("MQTT_PASSWORD"),
"mqtt_client_id": os.environ.get("MQTT_CLIENT_ID", "test"),
"mqtt_broker_host": os.environ.get("MQTT_BROKER_HOST"),
"mqtt_broker_port": int(os.environ.get("MQTT_BROKER_PORT", "8883")),
"mqtt_publish_topic": os.environ.get("MQTT_PUBLISH_TOPIC", "/ishiki/test/events"),
"mqtt_subscribe_topic": os.environ.get("MQTT_SUBSCRIBE_TOPIC", "/ishiki/test/commands"),
}
if os.environ.get("ENABLE_MODULE_ZMQ"):
ZMQ_CONFIG = {
"zmq_port": os.environ.get("ZMQ_PORT"),
"zmq_topic": os.environ.get("ZMQ_TOPIC"),
}
SCHEMA_POST_URL = ""
PICKLEDB = "deskcontrol.db"
try:
config_module = __import__('config_local',
globals(), locals())
for setting in dir(config_module):
if setting == setting.upper():
locals()[setting] = getattr(config_module, setting)
except Exception:
pass
| mit |
Robpol86/coveralls-multi-ci | tests/test_git_stats_detached.py | 1 | 2466 | import subprocess
from coveralls_multi_ci import git_stats
def test_master(repo_dir, hashes):
hex_sha = hashes['master']
assert 0 == subprocess.check_call(['git', 'checkout', '-qf', hex_sha], cwd=repo_dir)
actual = git_stats(repo_dir)
expected = dict(
branch='master',
remotes=[dict(name='origin', url='http://localhost/git.git'), ],
head=dict(
id=hex_sha,
author_name='MrsAuthor',
author_email='[email protected]',
committer_name='MrCommit',
committer_email='[email protected]',
message='Committing empty file.'
)
)
assert expected == actual
def test_feature_branch(repo_dir, hashes):
hex_sha = hashes['feature']
assert 0 == subprocess.check_call(['git', 'checkout', '-qf', hex_sha], cwd=repo_dir)
actual = git_stats(repo_dir)
expected = dict(
branch='feature',
remotes=[dict(name='origin', url='http://localhost/git.git'), ],
head=dict(
id=hex_sha,
author_name='MrCommit',
author_email='[email protected]',
committer_name='MrCommit',
committer_email='[email protected]',
message='Wrote to file.'
)
)
assert expected == actual
def test_tag_annotated(repo_dir, hashes):
hex_sha = hashes['tag_annotated']
assert 0 == subprocess.check_call(['git', 'checkout', '-qf', hex_sha], cwd=repo_dir)
actual = git_stats(repo_dir)
expected = dict(
branch='v1.0',
remotes=[dict(name='origin', url='http://localhost/git.git'), ],
head=dict(
id=hex_sha,
author_name='MrCommit',
author_email='[email protected]',
committer_name='MrCommit',
committer_email='[email protected]',
message='Wrote to file2.'
)
)
assert expected == actual
def test_tag_light(repo_dir, hashes):
hex_sha = hashes['tag_light']
assert 0 == subprocess.check_call(['git', 'checkout', '-qf', hex_sha], cwd=repo_dir)
actual = git_stats(repo_dir)
expected = dict(
branch='v1.0l',
remotes=[dict(name='origin', url='http://localhost/git.git'), ],
head=dict(
id=hex_sha,
author_name='MrCommit',
author_email='[email protected]',
committer_name='MrCommit',
committer_email='[email protected]',
message='Wrote to file3.'
)
)
assert expected == actual
| mit |
BT-ojossen/l10n-switzerland | l10n_ch_bank/migrations/8.0.9.0.0/pre-migration.py | 10 | 1402 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The banks have been created on the l10n_ch module because they used
the wrong namespace (ie ``l10_ch.bank_0``). Now, the records are created
in the correct module but we have to correct the existing records.
"""
def migrate(cr, version):
if not version:
return
query = ("UPDATE ir_model_data "
"SET module = 'l10n_ch_bank' "
"WHERE module = 'l10n_ch' "
"AND model = 'res.bank' ")
cr.execute(query)
| agpl-3.0 |
romankagan/DDBWorkbench | plugins/hg4idea/testData/bin/hgext/largefiles/basestore.py | 92 | 7659 | # Copyright 2009-2010 Gregory P. Ward
# Copyright 2009-2010 Intelerad Medical Systems Incorporated
# Copyright 2010-2011 Fog Creek Software
# Copyright 2010-2011 Unity Technologies
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''base class for store implementations and store-related utility code'''
import re
from mercurial import util, node, hg
from mercurial.i18n import _
import lfutil
class StoreError(Exception):
'''Raised when there is a problem getting files from or putting
files to a central store.'''
def __init__(self, filename, hash, url, detail):
self.filename = filename
self.hash = hash
self.url = url
self.detail = detail
def longmessage(self):
return (_("error getting id %s from url %s for file %s: %s\n") %
(self.hash, self.url, self.filename, self.detail))
def __str__(self):
return "%s: %s" % (self.url, self.detail)
class basestore(object):
def __init__(self, ui, repo, url):
self.ui = ui
self.repo = repo
self.url = url
def put(self, source, hash):
'''Put source file into the store so it can be retrieved by hash.'''
raise NotImplementedError('abstract method')
def exists(self, hashes):
'''Check to see if the store contains the given hashes. Given an
iterable of hashes it returns a mapping from hash to bool.'''
raise NotImplementedError('abstract method')
def get(self, files):
'''Get the specified largefiles from the store and write to local
files under repo.root. files is a list of (filename, hash)
tuples. Return (success, missing), lists of files successfully
downloaded and those not found in the store. success is a list
of (filename, hash) tuples; missing is a list of filenames that
we could not get. (The detailed error message will already have
been presented to the user, so missing is just supplied as a
summary.)'''
success = []
missing = []
ui = self.ui
util.makedirs(lfutil.storepath(self.repo, ''))
at = 0
available = self.exists(set(hash for (_filename, hash) in files))
for filename, hash in files:
ui.progress(_('getting largefiles'), at, unit='lfile',
total=len(files))
at += 1
ui.note(_('getting %s:%s\n') % (filename, hash))
if not available.get(hash):
ui.warn(_('%s: largefile %s not available from %s\n')
% (filename, hash, self.url))
missing.append(filename)
continue
storefilename = lfutil.storepath(self.repo, hash)
tmpfile = util.atomictempfile(storefilename + '.tmp',
createmode=self.repo.store.createmode)
try:
hhash = self._getfile(tmpfile, filename, hash)
except StoreError, err:
ui.warn(err.longmessage())
hhash = ""
tmpfile.close()
if hhash != hash:
if hhash != "":
ui.warn(_('%s: data corruption (expected %s, got %s)\n')
% (filename, hash, hhash))
util.unlink(storefilename + '.tmp')
missing.append(filename)
continue
util.rename(storefilename + '.tmp', storefilename)
lfutil.linktousercache(self.repo, hash)
success.append((filename, hhash))
ui.progress(_('getting largefiles'), None)
return (success, missing)
def verify(self, revs, contents=False):
'''Verify the existence (and, optionally, contents) of every big
file revision referenced by every changeset in revs.
Return 0 if all is well, non-zero on any errors.'''
failed = False
self.ui.status(_('searching %d changesets for largefiles\n') %
len(revs))
verified = set() # set of (filename, filenode) tuples
for rev in revs:
cctx = self.repo[rev]
cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
for standin in cctx:
if self._verifyfile(cctx, cset, contents, standin, verified):
failed = True
numrevs = len(verified)
numlfiles = len(set([fname for (fname, fnode) in verified]))
if contents:
self.ui.status(
_('verified contents of %d revisions of %d largefiles\n')
% (numrevs, numlfiles))
else:
self.ui.status(
_('verified existence of %d revisions of %d largefiles\n')
% (numrevs, numlfiles))
return int(failed)
def _getfile(self, tmpfile, filename, hash):
'''Fetch one revision of one file from the store and write it
to tmpfile. Compute the hash of the file on-the-fly as it
downloads and return the hash. Close tmpfile. Raise
StoreError if unable to download the file (e.g. it does not
exist in the store).'''
raise NotImplementedError('abstract method')
def _verifyfile(self, cctx, cset, contents, standin, verified):
'''Perform the actual verification of a file in the store.
'cset' is only used in warnings.
'contents' controls verification of content hash.
'standin' is the standin path of the largefile to verify.
'verified' is maintained as a set of already verified files.
Returns _true_ if it is a standin and any problems are found!
'''
raise NotImplementedError('abstract method')
import localstore, wirestore
_storeprovider = {
'file': [localstore.localstore],
'http': [wirestore.wirestore],
'https': [wirestore.wirestore],
'ssh': [wirestore.wirestore],
}
_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
# During clone this function is passed the src's ui object
# but it needs the dest's ui object so it can read out of
# the config file. Use repo.ui instead.
def _openstore(repo, remote=None, put=False):
ui = repo.ui
if not remote:
lfpullsource = getattr(repo, 'lfpullsource', None)
if lfpullsource:
path = ui.expandpath(lfpullsource)
else:
path = ui.expandpath('default-push', 'default')
# ui.expandpath() leaves 'default-push' and 'default' alone if
# they cannot be expanded: fallback to the empty string,
# meaning the current directory.
if path == 'default-push' or path == 'default':
path = ''
remote = repo
else:
path, _branches = hg.parseurl(path)
remote = hg.peer(repo, {}, path)
# The path could be a scheme so use Mercurial's normal functionality
# to resolve the scheme to a repository and use its path
path = util.safehasattr(remote, 'url') and remote.url() or remote.path
match = _scheme_re.match(path)
if not match: # regular filesystem path
scheme = 'file'
else:
scheme = match.group(1)
try:
storeproviders = _storeprovider[scheme]
except KeyError:
raise util.Abort(_('unsupported URL scheme %r') % scheme)
for classobj in storeproviders:
try:
return classobj(ui, repo, remote)
except lfutil.storeprotonotcapable:
pass
raise util.Abort(_('%s does not appear to be a largefile store') % path)
| apache-2.0 |
zenodo/invenio | invenio/modules/deposit/types/simplerecord.py | 1 | 3085 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask_login import current_user
from flask import render_template
from invenio.modules.deposit.models import DepositionType, Deposition
from invenio.modules.formatter import format_record
from invenio.modules.deposit.tasks import render_form, \
create_recid, \
prepare_sip, \
finalize_record_sip, \
upload_record_sip, \
prefill_draft, \
process_sip_metadata, \
hold_for_approval
class SimpleRecordDeposition(DepositionType):
"""Simple record submission - no support for editing nor REST API."""
workflow = [
# Pre-fill draft with values passed in from request
prefill_draft(draft_id='default'),
# Render form and wait for user to submit
render_form(draft_id='default'),
# Create the submission information package by merging form data
# from all drafts (in this case only one draft exists).
prepare_sip(),
# Process metadata to match your JSONAlchemy record model. This will
# call process_sip_metadata() on your subclass.
process_sip_metadata(),
# Reserve a new record id, so that we can provide proper feedback to
# user before the record has been uploaded.
create_recid(),
# Generate MARC based on metadata dictionary.
finalize_record_sip(is_dump=False),
# Hold the deposition for admin approval
hold_for_approval(),
# Seal the SIP and write MARCXML file and call bibupload on it
upload_record_sip(),
]
hold_for_upload = False
@classmethod
def render_completed(cls, d):
"""Page to render when deposition was successfully completed."""
ctx = dict(
deposition=d,
deposition_type=(
None if d.type.is_default() else d.type.get_identifier()
),
uuid=d.id,
my_depositions=list(Deposition.get_depositions(
current_user, type=d.type
)),
sip=d.get_latest_sip(),
format_record=format_record,
)
return render_template('deposit/completed.html', **ctx)
@classmethod
def process_sip_metadata(cls, deposition, metadata):
"""Implement this method in your subclass to process metadata prior to MARC generation."""
pass
| gpl-2.0 |
takeflight/cookiecutter | tests/replay/test_load.py | 8 | 1855 | # -*- coding: utf-8 -*-
"""
test_load
-----------
"""
import json
import os
import pytest
from cookiecutter import replay
@pytest.fixture
def template_name():
"""Fixture to return a valid template_name."""
return 'cookiedozer_load'
@pytest.fixture
def replay_file(replay_test_dir, template_name):
"""Fixture to return a actual file name of the dump."""
file_name = '{}.json'.format(template_name)
return os.path.join(replay_test_dir, file_name)
def test_type_error_if_no_template_name():
"""Test that replay.load raises if the tempate_name is not a valid str."""
with pytest.raises(TypeError):
replay.load(None)
def test_value_error_if_key_missing_in_context(mocker):
"""Test that replay.load raises if the loaded context does not contain
'cookiecutter'.
"""
with pytest.raises(ValueError):
replay.load('invalid_replay')
def test_io_error_if_no_replay_file(mocker, mock_user_config):
"""Test that replay.load raises if it cannot find a replay file."""
with pytest.raises(IOError):
replay.load('no_replay')
def test_run_json_load(mocker, mock_user_config, template_name,
context, replay_test_dir, replay_file):
"""Test that replay.load runs json.load under the hood and that the context
is correctly loaded from the file in replay_dir.
"""
spy_get_replay_file = mocker.spy(replay, 'get_file_name')
mock_json_load = mocker.patch('json.load', side_effect=json.load)
loaded_context = replay.load(template_name)
assert mock_user_config.call_count == 1
spy_get_replay_file.assert_called_once_with(replay_test_dir, template_name)
assert mock_json_load.call_count == 1
(infile_handler,), kwargs = mock_json_load.call_args
assert infile_handler.name == replay_file
assert loaded_context == context
| bsd-3-clause |
gonboy/python-for-android | src/buildlib/jinja2.egg/jinja2/exceptions.py | 17 | 4424 | # -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or ''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = 'none of the templates given were found: ' + \
', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return '\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
| mit |
uclouvain/osis | ddd/logic/application/use_case/read/get_attributions_about_to_expire_service.py | 1 | 3140 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from typing import List
from ddd.logic.application.commands import GetAttributionsAboutToExpireCommand
from ddd.logic.application.domain.builder.applicant_identity_builder import ApplicantIdentityBuilder
from ddd.logic.application.domain.service.attribution_about_to_expire_renew import AttributionAboutToExpireRenew
from ddd.logic.application.domain.service.i_learning_unit_service import ILearningUnitService
from ddd.logic.application.dtos import AttributionAboutToExpireDTO
from ddd.logic.application.repository.i_applicant_respository import IApplicantRepository
from ddd.logic.application.repository.i_application_calendar_repository import IApplicationCalendarRepository
from ddd.logic.application.repository.i_application_repository import IApplicationRepository
from ddd.logic.application.repository.i_vacant_course_repository import IVacantCourseRepository
def get_attributions_about_to_expire(
cmd: GetAttributionsAboutToExpireCommand,
application_repository: IApplicationRepository,
application_calendar_repository: IApplicationCalendarRepository,
applicant_repository: IApplicantRepository,
vacant_course_repository: IVacantCourseRepository,
learning_unit_service: ILearningUnitService,
) -> List[AttributionAboutToExpireDTO]:
# Given
application_calendar = application_calendar_repository.get_current_application_calendar()
applicant_id = ApplicantIdentityBuilder.build_from_global_id(global_id=cmd.global_id)
applicant = applicant_repository.get(applicant_id)
all_existing_applications = application_repository.search(global_id=cmd.global_id)
return AttributionAboutToExpireRenew.get_list_with_renewal_availability(
application_calendar,
applicant,
all_existing_applications,
vacant_course_repository,
learning_unit_service
)
| agpl-3.0 |
sebrandon1/nova | nova/tests/unit/api/openstack/compute/test_flavorextradata.py | 1 | 3374 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import test
from nova.tests.unit.api.openstack import fakes
class FlavorExtraDataTestV21(test.NoDBTestCase):
base_url = '/v2/fake/flavors'
def setUp(self):
super(FlavorExtraDataTestV21, self).setUp()
fakes.stub_out_flavor_get_all(self)
fakes.stub_out_flavor_get_by_flavor_id(self)
@property
def app(self):
return fakes.wsgi_app_v21(init_only=('flavors'))
def _verify_flavor_response(self, flavor, expected):
for key in expected:
self.assertEqual(flavor[key], expected[key])
def test_show(self):
expected = {
'flavor': {
'id': fakes.FLAVORS['1'].flavorid,
'name': fakes.FLAVORS['1'].name,
'ram': fakes.FLAVORS['1'].memory_mb,
'vcpus': fakes.FLAVORS['1'].vcpus,
'disk': fakes.FLAVORS['1'].root_gb,
'OS-FLV-EXT-DATA:ephemeral': fakes.FLAVORS['1'].ephemeral_gb,
}
}
url = self.base_url + '/1'
req = fakes.HTTPRequest.blank(url)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
body = jsonutils.loads(res.body)
self._verify_flavor_response(body['flavor'], expected['flavor'])
def test_detail(self):
expected = [
{
'id': fakes.FLAVORS['1'].flavorid,
'name': fakes.FLAVORS['1'].name,
'ram': fakes.FLAVORS['1'].memory_mb,
'vcpus': fakes.FLAVORS['1'].vcpus,
'disk': fakes.FLAVORS['1'].root_gb,
'OS-FLV-EXT-DATA:ephemeral': fakes.FLAVORS['1'].ephemeral_gb,
'rxtx_factor': fakes.FLAVORS['1'].rxtx_factor or u'',
'os-flavor-access:is_public': fakes.FLAVORS['1'].is_public,
},
{
'id': fakes.FLAVORS['2'].flavorid,
'name': fakes.FLAVORS['2'].name,
'ram': fakes.FLAVORS['2'].memory_mb,
'vcpus': fakes.FLAVORS['2'].vcpus,
'disk': fakes.FLAVORS['2'].root_gb,
'OS-FLV-EXT-DATA:ephemeral': fakes.FLAVORS['2'].ephemeral_gb,
'rxtx_factor': fakes.FLAVORS['2'].rxtx_factor or u'',
'os-flavor-access:is_public': fakes.FLAVORS['2'].is_public,
},
]
url = self.base_url + '/detail'
req = fakes.HTTPRequest.blank(url)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
body = jsonutils.loads(res.body)
for i, flavor in enumerate(body['flavors']):
self._verify_flavor_response(flavor, expected[i])
| apache-2.0 |
jashank/rust | src/etc/unicode.py | 15 | 24287 | #!/usr/bin/env python
#
# Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# This script uses the following Unicode tables:
# - DerivedCoreProperties.txt
# - DerivedNormalizationProps.txt
# - EastAsianWidth.txt
# - auxiliary/GraphemeBreakProperty.txt
# - PropList.txt
# - ReadMe.txt
# - Scripts.txt
# - UnicodeData.txt
#
# Since this should not require frequent updates, we just store this
# out-of-line and check the unicode.rs file into git.
import fileinput, re, os, sys, operator
preamble = '''// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
#![allow(missing_docs, non_upper_case_globals, non_snake_case)]
'''
# Mapping taken from Table 12 from:
# http://www.unicode.org/reports/tr44/#General_Category_Values
expanded_categories = {
'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'],
'Lm': ['L'], 'Lo': ['L'],
'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'],
'Nd': ['N'], 'Nl': ['N'], 'No': ['No'],
'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'],
'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'],
'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'],
'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'],
'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'],
}
# these are the surrogate codepoints, which are not valid rust characters
surrogate_codepoints = (0xd800, 0xdfff)
def fetch(f):
if not os.path.exists(os.path.basename(f)):
os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
% f)
if not os.path.exists(os.path.basename(f)):
sys.stderr.write("cannot load %s" % f)
exit(1)
def is_surrogate(n):
return surrogate_codepoints[0] <= n <= surrogate_codepoints[1]
def load_unicode_data(f):
fetch(f)
gencats = {}
to_lower = {}
to_upper = {}
to_title = {}
combines = {}
canon_decomp = {}
compat_decomp = {}
udict = {};
range_start = -1;
for line in fileinput.input(f):
data = line.split(';');
if len(data) != 15:
continue
cp = int(data[0], 16);
if is_surrogate(cp):
continue
if range_start >= 0:
for i in xrange(range_start, cp):
udict[i] = data;
range_start = -1;
if data[1].endswith(", First>"):
range_start = cp;
continue;
udict[cp] = data;
for code in udict:
[code_org, name, gencat, combine, bidi,
decomp, deci, digit, num, mirror,
old, iso, upcase, lowcase, titlecase ] = udict[code];
# generate char to char direct common and simple conversions
# uppercase to lowercase
if lowcase != "" and code_org != lowcase:
to_lower[code] = (int(lowcase, 16), 0, 0)
# lowercase to uppercase
if upcase != "" and code_org != upcase:
to_upper[code] = (int(upcase, 16), 0, 0)
# title case
if titlecase.strip() != "" and code_org != titlecase:
to_title[code] = (int(titlecase, 16), 0, 0)
# store decomposition, if given
if decomp != "":
if decomp.startswith('<'):
seq = []
for i in decomp.split()[1:]:
seq.append(int(i, 16))
compat_decomp[code] = seq
else:
seq = []
for i in decomp.split():
seq.append(int(i, 16))
canon_decomp[code] = seq
# place letter in categories as appropriate
for cat in [gencat, "Assigned"] + expanded_categories.get(gencat, []):
if cat not in gencats:
gencats[cat] = []
gencats[cat].append(code)
# record combining class, if any
if combine != "0":
if combine not in combines:
combines[combine] = []
combines[combine].append(code)
# generate Not_Assigned from Assigned
gencats["Cn"] = gen_unassigned(gencats["Assigned"])
# Assigned is not a real category
del(gencats["Assigned"])
# Other contains Not_Assigned
gencats["C"].extend(gencats["Cn"])
gencats = group_cats(gencats)
combines = to_combines(group_cats(combines))
return (canon_decomp, compat_decomp, gencats, combines, to_upper, to_lower, to_title)
def load_special_casing(f, to_upper, to_lower, to_title):
fetch(f)
for line in fileinput.input(f):
data = line.split('#')[0].split(';')
if len(data) == 5:
code, lower, title, upper, _comment = data
elif len(data) == 6:
code, lower, title, upper, condition, _comment = data
if condition.strip(): # Only keep unconditional mappins
continue
else:
continue
code = code.strip()
lower = lower.strip()
title = title.strip()
upper = upper.strip()
key = int(code, 16)
for (map_, values) in [(to_lower, lower), (to_upper, upper), (to_title, title)]:
if values != code:
values = [int(i, 16) for i in values.split()]
for _ in range(len(values), 3):
values.append(0)
assert len(values) == 3
map_[key] = values
def group_cats(cats):
cats_out = {}
for cat in cats:
cats_out[cat] = group_cat(cats[cat])
return cats_out
def group_cat(cat):
cat_out = []
letters = sorted(set(cat))
cur_start = letters.pop(0)
cur_end = cur_start
for letter in letters:
assert letter > cur_end, \
"cur_end: %s, letter: %s" % (hex(cur_end), hex(letter))
if letter == cur_end + 1:
cur_end = letter
else:
cat_out.append((cur_start, cur_end))
cur_start = cur_end = letter
cat_out.append((cur_start, cur_end))
return cat_out
def ungroup_cat(cat):
cat_out = []
for (lo, hi) in cat:
while lo <= hi:
cat_out.append(lo)
lo += 1
return cat_out
def gen_unassigned(assigned):
assigned = set(assigned)
return ([i for i in range(0, 0xd800) if i not in assigned] +
[i for i in range(0xe000, 0x110000) if i not in assigned])
def to_combines(combs):
combs_out = []
for comb in combs:
for (lo, hi) in combs[comb]:
combs_out.append((lo, hi, comb))
combs_out.sort(key=lambda comb: comb[0])
return combs_out
def format_table_content(f, content, indent):
line = " "*indent
first = True
for chunk in content.split(","):
if len(line) + len(chunk) < 98:
if first:
line += chunk
else:
line += ", " + chunk
first = False
else:
f.write(line + ",\n")
line = " "*indent + chunk
f.write(line)
def load_properties(f, interestingprops):
fetch(f)
props = {}
re1 = re.compile("^ *([0-9A-F]+) *; *(\w+)")
re2 = re.compile("^ *([0-9A-F]+)\.\.([0-9A-F]+) *; *(\w+)")
for line in fileinput.input(os.path.basename(f)):
prop = None
d_lo = 0
d_hi = 0
m = re1.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(1)
prop = m.group(2)
else:
m = re2.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(2)
prop = m.group(3)
else:
continue
if interestingprops and prop not in interestingprops:
continue
d_lo = int(d_lo, 16)
d_hi = int(d_hi, 16)
if prop not in props:
props[prop] = []
props[prop].append((d_lo, d_hi))
# optimize if possible
for prop in props:
props[prop] = group_cat(ungroup_cat(props[prop]))
return props
# load all widths of want_widths, except those in except_cats
def load_east_asian_width(want_widths, except_cats):
f = "EastAsianWidth.txt"
fetch(f)
widths = {}
re1 = re.compile("^([0-9A-F]+);(\w+) +# (\w+)")
re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+);(\w+) +# (\w+)")
for line in fileinput.input(f):
width = None
d_lo = 0
d_hi = 0
cat = None
m = re1.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(1)
width = m.group(2)
cat = m.group(3)
else:
m = re2.match(line)
if m:
d_lo = m.group(1)
d_hi = m.group(2)
width = m.group(3)
cat = m.group(4)
else:
continue
if cat in except_cats or width not in want_widths:
continue
d_lo = int(d_lo, 16)
d_hi = int(d_hi, 16)
if width not in widths:
widths[width] = []
widths[width].append((d_lo, d_hi))
return widths
def escape_char(c):
return "'\\u{%x}'" % c if c != 0 else "'\\0'"
def emit_bsearch_range_table(f):
f.write("""
fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool {
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
r.binary_search_by(|&(lo,hi)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}).is_ok()
}\n
""")
def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True,
pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1]))):
pub_string = ""
if is_pub:
pub_string = "pub "
f.write(" %sconst %s: %s = &[\n" % (pub_string, name, t_type))
data = ""
first = True
for dat in t_data:
if not first:
data += ","
first = False
data += pfun(dat)
format_table_content(f, data, 8)
f.write("\n ];\n\n")
def emit_property_module(f, mod, tbl, emit):
f.write("pub mod %s {\n" % mod)
for cat in sorted(emit):
emit_table(f, "%s_table" % cat, tbl[cat])
f.write(" pub fn %s(c: char) -> bool {\n" % cat)
f.write(" super::bsearch_range_table(c, %s_table)\n" % cat)
f.write(" }\n\n")
f.write("}\n\n")
def emit_conversions_module(f, to_upper, to_lower, to_title):
f.write("pub mod conversions {")
f.write("""
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
use core::option::Option;
use core::option::Option::{Some, None};
use core::result::Result::{Ok, Err};
pub fn to_lower(c: char) -> [char; 3] {
match bsearch_case_table(c, to_lowercase_table) {
None => [c, '\\0', '\\0'],
Some(index) => to_lowercase_table[index].1
}
}
pub fn to_upper(c: char) -> [char; 3] {
match bsearch_case_table(c, to_uppercase_table) {
None => [c, '\\0', '\\0'],
Some(index) => to_uppercase_table[index].1
}
}
fn bsearch_case_table(c: char, table: &'static [(char, [char; 3])]) -> Option<usize> {
match table.binary_search_by(|&(key, _)| {
if c == key { Equal }
else if key < c { Less }
else { Greater }
}) {
Ok(i) => Some(i),
Err(_) => None,
}
}
""")
t_type = "&'static [(char, [char; 3])]"
pfun = lambda x: "(%s,[%s,%s,%s])" % (
escape_char(x[0]), escape_char(x[1][0]), escape_char(x[1][1]), escape_char(x[1][2]))
emit_table(f, "to_lowercase_table",
sorted(to_lower.iteritems(), key=operator.itemgetter(0)),
is_pub=False, t_type = t_type, pfun=pfun)
emit_table(f, "to_uppercase_table",
sorted(to_upper.iteritems(), key=operator.itemgetter(0)),
is_pub=False, t_type = t_type, pfun=pfun)
f.write("}\n\n")
def emit_grapheme_module(f, grapheme_table, grapheme_cats):
f.write("""pub mod grapheme {
use core::slice::SliceExt;
pub use self::GraphemeCat::*;
use core::result::Result::{Ok, Err};
#[allow(non_camel_case_types)]
#[derive(Clone, Copy)]
pub enum GraphemeCat {
""")
for cat in grapheme_cats + ["Any"]:
f.write(" GC_" + cat + ",\n")
f.write(""" }
fn bsearch_range_value_table(c: char, r: &'static [(char, char, GraphemeCat)]) -> GraphemeCat {
use core::cmp::Ordering::{Equal, Less, Greater};
match r.binary_search_by(|&(lo, hi, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
Ok(idx) => {
let (_, _, cat) = r[idx];
cat
}
Err(_) => GC_Any
}
}
pub fn grapheme_category(c: char) -> GraphemeCat {
bsearch_range_value_table(c, grapheme_cat_table)
}
""")
emit_table(f, "grapheme_cat_table", grapheme_table, "&'static [(char, char, GraphemeCat)]",
pfun=lambda x: "(%s,%s,GC_%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]),
is_pub=False)
f.write("}\n")
def emit_charwidth_module(f, width_table):
f.write("pub mod charwidth {\n")
f.write(" use core::option::Option;\n")
f.write(" use core::option::Option::{Some, None};\n")
f.write(" use core::slice::SliceExt;\n")
f.write(" use core::result::Result::{Ok, Err};\n")
f.write("""
fn bsearch_range_value_table(c: char, is_cjk: bool, r: &'static [(char, char, u8, u8)]) -> u8 {
use core::cmp::Ordering::{Equal, Less, Greater};
match r.binary_search_by(|&(lo, hi, _, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
Ok(idx) => {
let (_, _, r_ncjk, r_cjk) = r[idx];
if is_cjk { r_cjk } else { r_ncjk }
}
Err(_) => 1
}
}
""")
f.write("""
pub fn width(c: char, is_cjk: bool) -> Option<usize> {
match c as usize {
_c @ 0 => Some(0), // null is zero width
cu if cu < 0x20 => None, // control sequences have no width
cu if cu < 0x7F => Some(1), // ASCII
cu if cu < 0xA0 => None, // more control sequences
_ => Some(bsearch_range_value_table(c, is_cjk, charwidth_table) as usize)
}
}
""")
f.write(" // character width table. Based on Markus Kuhn's free wcwidth() implementation,\n")
f.write(" // http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c\n")
emit_table(f, "charwidth_table", width_table, "&'static [(char, char, u8, u8)]", is_pub=False,
pfun=lambda x: "(%s,%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2], x[3]))
f.write("}\n\n")
def emit_norm_module(f, canon, compat, combine, norm_props):
canon_keys = canon.keys()
canon_keys.sort()
compat_keys = compat.keys()
compat_keys.sort()
canon_comp = {}
comp_exclusions = norm_props["Full_Composition_Exclusion"]
for char in canon_keys:
if True in map(lambda (lo, hi): lo <= char <= hi, comp_exclusions):
continue
decomp = canon[char]
if len(decomp) == 2:
if not canon_comp.has_key(decomp[0]):
canon_comp[decomp[0]] = []
canon_comp[decomp[0]].append( (decomp[1], char) )
canon_comp_keys = canon_comp.keys()
canon_comp_keys.sort()
f.write("pub mod normalization {\n")
def mkdata_fun(table):
def f(char):
data = "(%s,&[" % escape_char(char)
first = True
for d in table[char]:
if not first:
data += ","
first = False
data += escape_char(d)
data += "])"
return data
return f
f.write(" // Canonical decompositions\n")
emit_table(f, "canonical_table", canon_keys, "&'static [(char, &'static [char])]",
pfun=mkdata_fun(canon))
f.write(" // Compatibility decompositions\n")
emit_table(f, "compatibility_table", compat_keys, "&'static [(char, &'static [char])]",
pfun=mkdata_fun(compat))
def comp_pfun(char):
data = "(%s,&[" % escape_char(char)
canon_comp[char].sort(lambda x, y: x[0] - y[0])
first = True
for pair in canon_comp[char]:
if not first:
data += ","
first = False
data += "(%s,%s)" % (escape_char(pair[0]), escape_char(pair[1]))
data += "])"
return data
f.write(" // Canonical compositions\n")
emit_table(f, "composition_table", canon_comp_keys,
"&'static [(char, &'static [(char, char)])]", pfun=comp_pfun)
f.write("""
fn bsearch_range_value_table(c: char, r: &'static [(char, char, u8)]) -> u8 {
use core::cmp::Ordering::{Equal, Less, Greater};
use core::slice::SliceExt;
use core::result::Result::{Ok, Err};
match r.binary_search_by(|&(lo, hi, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
Ok(idx) => {
let (_, _, result) = r[idx];
result
}
Err(_) => 0
}
}\n
""")
emit_table(f, "combining_class_table", combine, "&'static [(char, char, u8)]", is_pub=False,
pfun=lambda x: "(%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]))
f.write(""" #[deprecated(reason = "use the crates.io `unicode-normalization` lib instead",
since = "1.0.0")]
#[unstable(feature = "unicode",
reason = "this functionality will be moved to crates.io")]
pub fn canonical_combining_class(c: char) -> u8 {
bsearch_range_value_table(c, combining_class_table)
}
}
""")
def remove_from_wtable(wtable, val):
wtable_out = []
while wtable:
if wtable[0][1] < val:
wtable_out.append(wtable.pop(0))
elif wtable[0][0] > val:
break
else:
(wt_lo, wt_hi, width, width_cjk) = wtable.pop(0)
if wt_lo == wt_hi == val:
continue
elif wt_lo == val:
wtable_out.append((wt_lo+1, wt_hi, width, width_cjk))
elif wt_hi == val:
wtable_out.append((wt_lo, wt_hi-1, width, width_cjk))
else:
wtable_out.append((wt_lo, val-1, width, width_cjk))
wtable_out.append((val+1, wt_hi, width, width_cjk))
if wtable:
wtable_out.extend(wtable)
return wtable_out
def optimize_width_table(wtable):
wtable_out = []
w_this = wtable.pop(0)
while wtable:
if w_this[1] == wtable[0][0] - 1 and w_this[2:3] == wtable[0][2:3]:
w_tmp = wtable.pop(0)
w_this = (w_this[0], w_tmp[1], w_tmp[2], w_tmp[3])
else:
wtable_out.append(w_this)
w_this = wtable.pop(0)
wtable_out.append(w_this)
return wtable_out
if __name__ == "__main__":
r = "tables.rs"
if os.path.exists(r):
os.remove(r)
with open(r, "w") as rf:
# write the file's preamble
rf.write(preamble)
# download and parse all the data
fetch("ReadMe.txt")
with open("ReadMe.txt") as readme:
pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode"
unicode_version = re.search(pattern, readme.read()).groups()
rf.write("""
/// The version of [Unicode](http://www.unicode.org/)
/// that the unicode parts of `CharExt` and `UnicodeStrPrelude` traits are based on.
pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s);
""" % unicode_version)
(canon_decomp, compat_decomp, gencats, combines,
to_upper, to_lower, to_title) = load_unicode_data("UnicodeData.txt")
load_special_casing("SpecialCasing.txt", to_upper, to_lower, to_title)
want_derived = ["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase",
"Cased", "Case_Ignorable"]
derived = load_properties("DerivedCoreProperties.txt", want_derived)
scripts = load_properties("Scripts.txt", [])
props = load_properties("PropList.txt",
["White_Space", "Join_Control", "Noncharacter_Code_Point"])
norm_props = load_properties("DerivedNormalizationProps.txt",
["Full_Composition_Exclusion"])
# bsearch_range_table is used in all the property modules below
emit_bsearch_range_table(rf)
# category tables
for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \
("derived_property", derived, want_derived), \
("property", props, ["White_Space"]):
emit_property_module(rf, name, cat, pfuns)
# normalizations and conversions module
emit_norm_module(rf, canon_decomp, compat_decomp, combines, norm_props)
emit_conversions_module(rf, to_upper, to_lower, to_title)
### character width module
width_table = []
for zwcat in ["Me", "Mn", "Cf"]:
width_table.extend(map(lambda (lo, hi): (lo, hi, 0, 0), gencats[zwcat]))
width_table.append((4448, 4607, 0, 0))
# get widths, except those that are explicitly marked zero-width above
ea_widths = load_east_asian_width(["W", "F", "A"], ["Me", "Mn", "Cf"])
# these are doublewidth
for dwcat in ["W", "F"]:
width_table.extend(map(lambda (lo, hi): (lo, hi, 2, 2), ea_widths[dwcat]))
width_table.extend(map(lambda (lo, hi): (lo, hi, 1, 2), ea_widths["A"]))
width_table.sort(key=lambda w: w[0])
# soft hyphen is not zero width in preformatted text; it's used to indicate
# a hyphen inserted to facilitate a linebreak.
width_table = remove_from_wtable(width_table, 173)
# optimize the width table by collapsing adjacent entities when possible
width_table = optimize_width_table(width_table)
emit_charwidth_module(rf, width_table)
### grapheme cluster module
# from http://www.unicode.org/reports/tr29/#Grapheme_Cluster_Break_Property_Values
grapheme_cats = load_properties("auxiliary/GraphemeBreakProperty.txt", [])
# Control
# Note 1:
# This category also includes Cs (surrogate codepoints), but Rust's `char`s are
# Unicode Scalar Values only, and surrogates are thus invalid `char`s.
# Thus, we have to remove Cs from the Control category
# Note 2:
# 0x0a and 0x0d (CR and LF) are not in the Control category for Graphemes.
# However, the Graphemes iterator treats these as a special case, so they
# should be included in grapheme_cats["Control"] for our implementation.
grapheme_cats["Control"] = group_cat(list(
(set(ungroup_cat(grapheme_cats["Control"]))
| set(ungroup_cat(grapheme_cats["CR"]))
| set(ungroup_cat(grapheme_cats["LF"])))
- set(ungroup_cat([surrogate_codepoints]))))
del(grapheme_cats["CR"])
del(grapheme_cats["LF"])
grapheme_table = []
for cat in grapheme_cats:
grapheme_table.extend([(x, y, cat) for (x, y) in grapheme_cats[cat]])
grapheme_table.sort(key=lambda w: w[0])
emit_grapheme_module(rf, grapheme_table, grapheme_cats.keys())
| apache-2.0 |
riolet/rioauth | provider/pages/login_github.py | 1 | 4700 | import web
import oauthlib.oauth2.rfc6749
import constants
import common
import base
import logging
import pprint
from models import oauth_consumer
class Login(base.Page):
def __init__(self):
base.Page.__init__(self, "Riolet Login")
self.redirect_uri = unicode(constants.config.get('github', 'redirect_uri'))
self.scope = unicode(constants.config.get('github', 'request_scope'))
self.oauth = oauth_consumer.Authorization(
session=common.session,
authorization_url=constants.config.get('github', 'authorization_url'),
token_url=constants.config.get('github', 'token_url'),
client_id=constants.config.get('github', 'client_id'),
client_secret=constants.config.get('github', 'client_secret'),
default_redirect_uri=constants.config.get('github', 'redirect_uri'),
default_scope_requested=constants.config.get('github', 'request_scope'))
def get_token(self):
authorization_response = self.uri
try:
# redirect_uri must match between get_auth_code and get_token.
# scope must match between get_auth_code and get_token
token = self.oauth.fetch_token(authorization_response, redirect_uri=self.redirect_uri, scope=self.scope)
except oauthlib.oauth2.rfc6749.errors.AccessDeniedError:
print("Access was denied. Reason unknown.")
return False
except oauthlib.oauth2.rfc6749.errors.InvalidGrantError:
print("Access was denied. Error: Invalid Grant.")
return False
print("\n\nToken acquired!")
pprint.pprint(token)
print("")
return True
def get_auth_code(self):
print("redirect_uri is {0}".format(self.redirect_uri))
# redirect_uri must match between get_auth_code and get_token.
# scope must match between get_auth_code and get_token
authorization_url = self.oauth.get_auth_url(redirect_uri=self.redirect_uri, scope=self.scope)
print("redirecting to {0}".format(authorization_url))
self.redirect(authorization_url)
def login(self):
public_emails = self.oauth.request(constants.config.get('github', 'resource_url'))
# Public emails should retrieve a list of dicts of emails addresses:
# [{u'email': u'[email protected]',
# u'primary': True,
# u'verified': True,
# u'visibility': u'public'}]
if len(public_emails) == 0:
return False
email = public_emails[0]['email']
for em in public_emails:
if em['primary'] is True:
email = em['email']
break
user = common.users.get_by_email(email)
if user is None:
# create user for that email!
# random password. Nobody should know it, ever. Login is done through GitHub.
# If user wants to choose password, they will reset it anyway.
user_id = common.users.add(email, common.generate_salt(32), email)
user = common.users.get_by_id(user_id)
self.user = user
return True
def GET(self):
if 'state' in self.data and 'code' in self.data:
print("state and code found. Assuming to be at fetch_token step.")
if self.get_token():
print("get_token returned True. setting logged_in to True")
success = self.login()
if not success:
print("should render page with errors: {}".format(self.errors))
self.redirect('/login')
common.session['logged_in'] = True
common.session['user_id'] = self.user['id']
destination = '/'
if 'login_redirect' in common.session:
destination = common.session['login_redirect']
self.redirect(destination, absolute=True)
else:
print("get_token returned False. setting logged_in to False")
common.session['logged_in'] = False
self.redirect('/login')
elif 'error' in self.data:
print("Error response.\n\t{0}".format(self.data['error']))
if 'error_description' in self.data:
print("\t{0}".format(self.data['error_description']))
return common.render.message(error=['Error logging in via GitHub.', 'Error: {}'.format(self.data['error_description'])], buttons=[('Login page', '/logout')])
else:
print("begin authentication process.")
self.get_auth_code()
# this code should be unreachable.
self.redirect('/login')
| gpl-3.0 |
sagark123/coala | tests/results/result_actions/ApplyPatchActionTest.py | 16 | 6917 | import unittest
import os
from os.path import isfile
from coala_utils.ContextManagers import make_temp
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction
from coalib.settings.Section import Section
class ApplyPatchActionTest(unittest.TestCase):
def test_apply(self):
uut = ApplyPatchAction()
with make_temp() as f_a, make_temp() as f_b, make_temp() as f_c:
file_dict = {
f_a: ['1\n', '2\n', '3\n'],
f_b: ['1\n', '2\n', '3\n'],
f_c: ['1\n', '2\n', '3\n']
}
expected_file_dict = {
f_a: ['1\n', '3_changed\n'],
f_b: ['1\n', '2\n', '3_changed\n'],
f_c: ['1\n', '2\n', '3\n']
}
file_diff_dict = {}
diff = Diff(file_dict[f_a])
diff.delete_line(2)
uut.apply_from_section(Result('origin', 'msg', diffs={f_a: diff}),
file_dict,
file_diff_dict,
Section('t'))
diff = Diff(file_dict[f_a])
diff.change_line(3, '3\n', '3_changed\n')
uut.apply_from_section(Result('origin', 'msg', diffs={f_a: diff}),
file_dict,
file_diff_dict,
Section('t'))
diff = Diff(file_dict[f_b])
diff.change_line(3, '3\n', '3_changed\n')
uut.apply(Result('origin', 'msg', diffs={f_b: diff}),
file_dict,
file_diff_dict)
for filename in file_diff_dict:
file_dict[filename] = file_diff_dict[filename].modified
self.assertEqual(file_dict, expected_file_dict)
with open(f_a) as fa:
self.assertEqual(file_dict[f_a], fa.readlines())
with open(f_b) as fb:
self.assertEqual(file_dict[f_b], fb.readlines())
with open(f_c) as fc:
# File c is unchanged and should be untouched
self.assertEqual([], fc.readlines())
def test_apply_orig_option(self):
uut = ApplyPatchAction()
with make_temp() as f_a, make_temp() as f_b:
file_dict = {
f_a: ['1\n', '2\n', '3\n'],
f_b: ['1\n', '2\n', '3\n']
}
expected_file_dict = {
f_a: ['1\n', '2\n', '3_changed\n'],
f_b: ['1\n', '2\n', '3_changed\n']
}
file_diff_dict = {}
diff = Diff(file_dict[f_a])
diff.change_line(3, '3\n', '3_changed\n')
uut.apply(Result('origin', 'msg', diffs={f_a: diff}),
file_dict,
file_diff_dict,
no_orig=True)
diff = Diff(file_dict[f_b])
diff.change_line(3, '3\n', '3_changed\n')
uut.apply(Result('origin', 'msg', diffs={f_b: diff}),
file_dict,
file_diff_dict,
no_orig=False)
self.assertFalse(isfile(f_a+'.orig'))
self.assertTrue(isfile(f_b+'.orig'))
for filename in file_diff_dict:
file_dict[filename] = file_diff_dict[filename].modified
self.assertEqual(file_dict, expected_file_dict)
def test_apply_rename(self):
uut = ApplyPatchAction()
with make_temp() as f_a:
file_dict = {f_a: ['1\n', '2\n', '3\n']}
expected_file_dict = {f_a+'.renamed':
['1\n', '2_changed\n', '3_changed\n']}
file_diff_dict = {}
diff = Diff(file_dict[f_a], rename=f_a+'.renamed')
diff.change_line(3, '3\n', '3_changed\n')
uut.apply(Result('origin', 'msg', diffs={f_a: diff}),
file_dict,
file_diff_dict)
self.assertTrue(isfile(f_a+'.orig'))
self.assertTrue(isfile(f_a+'.renamed'))
self.assertFalse(isfile(f_a))
diff = Diff(file_dict[f_a])
diff.change_line(2, '2\n', '2_changed\n')
uut.apply(Result('origin', 'msg', diffs={f_a: diff}),
file_dict,
file_diff_dict)
self.assertFalse(isfile(f_a+'.renamed.orig'))
file_dict = {f_a+'.renamed': open(f_a+'.renamed').readlines()}
self.assertEqual(file_dict, expected_file_dict)
# Recreate file so that context manager make_temp() can delete it
open(f_a, 'w').close()
def test_apply_delete(self):
uut = ApplyPatchAction()
with make_temp() as f_a:
file_dict = {f_a: ['1\n', '2\n', '3\n']}
file_diff_dict = {}
diff = Diff(file_dict[f_a], delete=True)
uut.apply(Result('origin', 'msg', diffs={f_a: diff}),
file_dict,
file_diff_dict)
self.assertFalse(isfile(f_a))
self.assertTrue(isfile(f_a+'.orig'))
os.remove(f_a+'.orig')
diff = Diff(file_dict[f_a])
diff.change_line(3, '3\n', '3_changed\n')
uut.apply(Result('origin', 'msg', diffs={f_a: diff}),
file_dict,
file_diff_dict)
self.assertFalse(isfile(f_a+'.orig'))
# Recreate file so that context manager make_temp() can delete it
open(f_a, 'w').close()
def test_is_applicable(self):
diff = Diff(['1\n', '2\n', '3\n'])
diff.delete_line(2)
patch_result = Result('', '', diffs={'f': diff})
self.assertTrue(
ApplyPatchAction.is_applicable(patch_result, {}, {}))
def test_is_applicable_conflict(self):
diff = Diff(['1\n', '2\n', '3\n'])
diff.add_lines(2, ['a line'])
conflict_result = Result('', '', diffs={'f': diff})
# Applying the same diff twice will result in a conflict
self.assertIn(
'Two or more patches conflict with each other: ',
ApplyPatchAction.is_applicable(conflict_result, {}, {'f': diff})
)
def test_is_applicable_empty_patch(self):
diff = Diff([], rename='new_name')
result = Result('', '', diffs={'f': diff})
# Two renames donot result in any change
self.assertEqual(
ApplyPatchAction.is_applicable(result, {}, {'f': diff}),
'The given patches do not change anything anymore.'
)
def test_is_applicable_without_patch(self):
result = Result('', '')
self.assertEqual(
ApplyPatchAction.is_applicable(result, {}, {}),
'This result has no patch attached.'
)
| agpl-3.0 |
selam/retopy | run.py | 1 | 4143 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Timu EREN
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from retopy.server import RetopyServer
from retopy.server import Application
from retopy.command import CommandHandler
from retopy.command import parameter, authenticated
from retopy.command import CommandError
from tornado.ioloop import IOLoop
class MyCounterHandler(CommandHandler):
"""
Counter methods
"""
_COUNTERS = {}
@staticmethod
def _check_and_create_counter(key):
if not key in MyCounterHandler._COUNTERS:
MyCounterHandler._COUNTERS[key] = 0
@parameter(name="key")
def increment(self):
"""
Increments the number stored at key by one. If the key does not exist,
it is set to 0 before performing the operation.
"""
key = self.get_argument("key")
MyCounterHandler._check_and_create_counter(key)
MyCounterHandler._COUNTERS[key] += 1
self.write(MyCounterHandler._COUNTERS[key])
@parameter(name="key")
def decrement(self):
"""
Decrements the number stored at key by one. If the key does not exist,
it is set to 0 before performing the operation.
"""
key = self.get_argument("key")
MyCounterHandler._check_and_create_counter(key)
MyCounterHandler._COUNTERS[key] -= 1
self.write(MyCounterHandler._COUNTERS[key])
@parameter("key")
def get(self):
"""
Get the value of key. If the key does not exist error is returned
"""
key = self.get_argument("key")
if key not in MyCounterHandler._COUNTERS:
raise CommandError("%s Not found" % (key,))
self.write(MyCounterHandler._COUNTERS.get(key))
@parameter(name="key")
@parameter(name="value", type=int)
def set(self):
"""
Set key to hold the integer value. If key already holds a value, it is overwritten.
"""
key = self.get_argument("key")
MyCounterHandler._check_and_create_counter(key)
MyCounterHandler._COUNTERS[key] = self.get_argument("value")
self.write("OK")
@parameter(name="key")
def rem(self):
"""
Removes the specified keys. A key is ignored if it does not exist.
"""
key = self.get_argument("key")
try:
del MyCounterHandler._COUNTERS[key]
except KeyError, error:
pass
self.write("+OK")
class MyPingHandler(CommandHandler):
@parameter()
def ping(self):
"""Returns PONG. This command is often used to test if a connection is still alive, or to measure latency."""
self.write("+PONG")
class MyLoginHandler(CommandHandler):
@parameter()
@authenticated
def auth_test(self):
self.write("authorized to run this command")
@parameter(name="username")
@parameter(name="password")
def auth(self):
username = self.get_argument("username")
password = self.get_argument("password")
if not username == u"myusername" and not password == u"mypass":
raise CommandError("Wrong username or password")
self.command.user = username
self.write("+OK")
class MyApplication(Application):
def __init__(self):
handlers = [
(MyCounterHandler,),
(MyPingHandler,),
(MyLoginHandler,)
]
settings = {
"default_handlers": True
}
Application.__init__(self, handlers, **settings)
s = RetopyServer(MyApplication())
s.listen(8000)
IOLoop.instance().start()
| apache-2.0 |
Jeebeevee/DouweBot_JJ15 | plugins_org/twitter.py | 10 | 3194 | import random
import re
from time import strptime, strftime
from urllib import quote
from util import hook, http
@hook.api_key('twitter')
@hook.command
def twitter(inp, api_key=None):
".twitter <user>/<user> <n>/<id>/#<search>/#<search> <n> -- " \
"get <user>'s last/<n>th tweet/get tweet <id>/do <search>/get <n>th <search> result"
if not isinstance(api_key, dict) or any(key not in api_key for key in
('consumer', 'consumer_secret', 'access', 'access_secret')):
return "error: api keys not set"
getting_id = False
doing_search = False
index_specified = False
if re.match(r'^\d+$', inp):
getting_id = True
request_url = "https://api.twitter.com/1.1/statuses/show.json?id=%s" % inp
else:
try:
inp, index = re.split('\s+', inp, 1)
index = int(index)
index_specified = True
except ValueError:
index = 0
if index < 0:
index = 0
if index >= 20:
return 'error: only supports up to the 20th tweet'
if re.match(r'^#', inp):
doing_search = True
request_url = "https://api.twitter.com/1.1/search/tweets.json?q=%s" % quote(inp)
else:
request_url = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s" % inp
try:
tweet = http.get_json(request_url, oauth=True, oauth_keys=api_key)
except http.HTTPError, e:
errors = {400: 'bad request (ratelimited?)',
401: 'unauthorized',
403: 'forbidden',
404: 'invalid user/id',
500: 'twitter is broken',
502: 'twitter is down ("getting upgraded")',
503: 'twitter is overloaded (lol, RoR)',
410: 'twitter shut off api v1.'}
if e.code == 404:
return 'error: invalid ' + ['username', 'tweet id'][getting_id]
if e.code in errors:
return 'error: ' + errors[e.code]
return 'error: unknown %s' % e.code
if doing_search:
try:
tweet = tweet["statuses"]
if not index_specified:
index = random.randint(0, len(tweet) - 1)
except KeyError:
return 'error: no results'
if not getting_id:
try:
tweet = tweet[index]
except IndexError:
return 'error: not that many tweets found'
if 'retweeted_status' in tweet:
rt = tweet["retweeted_status"]
rt_text = http.unescape(rt["text"]).replace('\n', ' ')
text = "RT @%s %s" % (rt["user"]["screen_name"], rt_text)
else:
text = http.unescape(tweet["text"]).replace('\n', ' ')
screen_name = tweet["user"]["screen_name"]
time = tweet["created_at"]
time = strftime('%Y-%m-%d %H:%M:%S',
strptime(time, '%a %b %d %H:%M:%S +0000 %Y'))
return "%s \x02%s\x02: %s" % (time, screen_name, text)
@hook.api_key('twitter')
@hook.regex(r'https?://twitter.com/(#!/)?([_0-9a-zA-Z]+)/status/(\d+)')
def show_tweet(match, api_key=None):
return twitter(match.group(3), api_key)
| unlicense |
EqAfrica/machinekit | src/hal/user_comps/hal_storage.py | 8 | 3146 | #!/usr/bin/python
# encoding: utf-8
"""
Storage.py
Created by Alexander Rössler on 2015-01-03.
"""
import time
import sys
import os
import argparse
import ConfigParser
import hal
class Pin:
def __init__(self):
self.halPin = 0
self.halName = ''
self.section = ''
self.name = ''
self.lastValue = 0.0
def savePins(cfg, filename, pins):
for pin in pins:
cfg.set(pin.section, pin.name, str(pin.halPin.value))
with open(filename, 'w') as f:
cfg.write(f)
f.close()
def readPins(cfg, filename, pins):
cfg.read(filename)
for pin in pins:
pin.lastValue = float(cfg.get(pin.section, pin.name))
pin.halPin.value = pin.lastValue
parser = argparse.ArgumentParser(description='HAL component to store and load values')
parser.add_argument('-n', '--name', help='HAL component name', required=True)
parser.add_argument('-f', '--file', help='Filename to store values', required=True)
parser.add_argument('-x', '--on_exit', help='Save on exit', action='store_true')
parser.add_argument('-a', '--autosave', help='Automatically save on value change', action='store_true')
parser.add_argument('-l', '--autoload', help='Automatically load the file values', action='store_true')
parser.add_argument('-i', '--interval', help='Update interval', default=1.00)
args = parser.parse_args()
updateInterval = float(args.interval)
autosave = args.autosave
autoload = args.autoload
saveOnExit = args.on_exit
filename = args.file
loaded = False
# Create pins
pins = []
if not os.path.isfile(filename):
sys.stderr.write('Error: File does not exist.\n');
sys.exit(1)
cfg = ConfigParser.ConfigParser()
cfg.read(filename)
h = hal.component(args.name)
for section in cfg.sections():
for item in cfg.items(section):
pin = Pin()
pin.section = section
pin.name = item[0]
pin.halName = section.lower() + '.' + item[0].lower()
pin.halPin = h.newpin(pin.halName, hal.HAL_FLOAT, hal.HAL_IO)
pins.append(pin)
halReadTriggerPin = h.newpin("read-trigger", hal.HAL_BIT, hal.HAL_IN)
halWriteTriggerPin = h.newpin("write-trigger", hal.HAL_BIT, hal.HAL_IN)
h.ready()
if autoload:
readPins(cfg, filename, pins)
loaded = True
lastReadTrigger = 0
lastWriteTrigger = 0
try:
while (True):
if lastReadTrigger ^ halReadTriggerPin.value:
lastReadTrigger = halReadTriggerPin.value
readPins(cfg, filename, pins)
loaded = True
if lastWriteTrigger ^ halWriteTriggerPin.value:
lastWriteTrigger = halWriteTriggerPin.value
savePins(cfg, filename, pins)
if autosave and loaded:
for pin in pins:
if pin.halPin.value != pin.lastValue:
pin.lastValue = pin.halPin.value
savePins(cfg, filename, pins)
time.sleep(updateInterval)
except KeyboardInterrupt:
if saveOnExit:
savePins(cfg, filename, pins)
print(("exiting HAL component " + args.name))
h.exit()
| lgpl-2.1 |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/python3-src/Lib/encodings/euc_kr.py | 816 | 1027 | #
# euc_kr.py: Python Unicode Codec for EUC_KR
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('euc_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
ujenmr/ansible | lib/ansible/modules/cloud/vmware/vmware_host_ssl_facts.py | 56 | 4683 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_ssl_facts
short_description: Gather facts of ESXi host system about SSL
description:
- This module can be used to gather facts of the SSL thumbprint information for a host.
version_added: 2.7
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- SSL thumbprint information about all ESXi host system in the given cluster will be reported.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- SSL thumbprint information of this ESXi host system will be reported.
- If C(cluster_name) is not given, this parameter is required.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster
vmware_host_ssl_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: all_host_ssl_facts
- name: Get SSL Thumbprint info about "{{ esxi_hostname }}"
vmware_host_ssl_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: '{{ esxi_hostname }}'
register: ssl_facts
- set_fact:
ssl_thumbprint: "{{ ssl_facts['host_ssl_facts'][esxi_hostname]['ssl_thumbprints'][0] }}"
- debug:
msg: "{{ ssl_thumbprint }}"
- name: Add ESXi Host to vCenter
vmware_host:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
cluster_name: '{{ cluster_name }}'
esxi_hostname: '{{ esxi_hostname }}'
esxi_username: '{{ esxi_username }}'
esxi_password: '{{ esxi_password }}'
esxi_ssl_thumbprint: '{{ ssl_thumbprint }}'
state: present
'''
RETURN = r'''
host_ssl_facts:
description:
- dict with hostname as key and dict with SSL thumbprint related facts
returned: facts
type: dict
sample:
{
"10.76.33.215": {
"owner_tag": "",
"principal": "vpxuser",
"ssl_thumbprints": [
"E3:E8:A9:20:8D:32:AE:59:C6:8D:A5:91:B0:20:EF:00:A2:7C:27:EE",
"F1:AC:DA:6E:D8:1E:37:36:4A:5C:07:E5:04:0B:87:C8:75:FB:42:01"
]
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VMwareHostSslManager(PyVmomi):
def __init__(self, module):
super(VMwareHostSslManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.hosts_facts = {}
def gather_ssl_facts(self):
for host in self.hosts:
self.hosts_facts[host.name] = dict(principal='',
owner_tag='',
ssl_thumbprints=[])
host_ssl_info_mgr = host.config.sslThumbprintInfo
if host_ssl_info_mgr:
self.hosts_facts[host.name]['principal'] = host_ssl_info_mgr.principal
self.hosts_facts[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag
self.hosts_facts[host.name]['ssl_thumbprints'] = [i for i in host_ssl_info_mgr.sslThumbprints]
self.module.exit_json(changed=False, host_ssl_facts=self.hosts_facts)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str'),
esxi_hostname=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
vmware_host_accept_config = VMwareHostSslManager(module)
vmware_host_accept_config.gather_ssl_facts()
if __name__ == "__main__":
main()
| gpl-3.0 |
Sonicbids/django | django/contrib/gis/geos/linestring.py | 9 | 5808 | from django.contrib.gis.geos.base import numpy
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos import prototypes as capi
from django.utils.six.moves import range
class LineString(GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
#### Python 'magic' routines ####
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords:
ndim = len(coords[0])
else:
raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in range(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim:
raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
for i in range(ncoords):
if numpy_coords:
cs[i] = coords[i, :]
elif isinstance(coords[i], Point):
cs[i] = coords[i].tuple
else:
cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid', None)
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3):
raise TypeError('Dimension mismatch.')
#### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in range(len(self))]
if numpy:
return numpy.array(lst) # ARRRR!
else:
return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz:
return None
else:
return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minLength = 4
_init_func = capi.create_linearring
| bsd-3-clause |
ToonTownInfiniteRepo/ToontownInfinite | toontown/hood/GZHood.py | 2 | 2614 | from pandac.PandaModules import *
import ToonHood
from toontown.safezone import GZSafeZoneLoader
from toontown.toonbase.ToontownGlobals import *
from toontown.racing import DistributedVehicle
import SkyUtil
class GZHood(ToonHood.ToonHood):
def __init__(self, parentFSM, doneEvent, dnaStore, hoodId):
ToonHood.ToonHood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId)
self.id = GolfZone
self.safeZoneLoaderClass = GZSafeZoneLoader.GZSafeZoneLoader
self.storageDNAFile = 'phase_6/dna/storage_GZ.dna'
self.holidayStorageDNADict = {HALLOWEEN_PROPS: ['phase_6/dna/halloween_props_storage_GZ.dna'],
SPOOKY_PROPS: ['phase_6/dna/halloween_props_storage_GZ.dna']}
self.skyFile = 'phase_3.5/models/props/TT_sky'
self.spookySkyFile = 'phase_3.5/models/props/BR_sky'
self.titleColor = (1.0, 0.5, 0.4, 1.0)
def load(self):
ToonHood.ToonHood.load(self)
self.parentFSM.getStateNamed('GZHood').addChild(self.fsm)
def unload(self):
self.parentFSM.getStateNamed('GZHood').removeChild(self.fsm)
ToonHood.ToonHood.unload(self)
def enter(self, *args):
ToonHood.ToonHood.enter(self, *args)
base.localAvatar.chatMgr.chatInputSpeedChat.addGolfMenu()
base.camLens.setNearFar(SpeedwayCameraNear, SpeedwayCameraFar)
def exit(self):
base.camLens.setNearFar(DefaultCameraNear, DefaultCameraFar)
base.localAvatar.chatMgr.chatInputSpeedChat.removeGolfMenu()
ToonHood.ToonHood.exit(self)
def skyTrack(self, task):
return SkyUtil.cloudSkyTrack(task)
def startSky(self):
if not self.sky.getTag('sky') == 'Regular':
self.endSpookySky()
SkyUtil.startCloudSky(self)
def startSpookySky(self):
if hasattr(self, 'sky') and self.sky:
self.stopSky()
self.sky = loader.loadModel(self.spookySkyFile)
self.sky.setTag('sky', 'Halloween')
self.sky.setScale(1.0)
self.sky.setDepthTest(0)
self.sky.setDepthWrite(0)
self.sky.setColor(0.5, 0.5, 0.5, 1)
self.sky.setBin('background', 100)
self.sky.setFogOff()
self.sky.reparentTo(camera)
self.sky.setTransparency(TransparencyAttrib.MDual, 1)
fadeIn = self.sky.colorScaleInterval(1.5, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 0.25), blendType='easeInOut')
fadeIn.start()
self.sky.setZ(0.0)
self.sky.setHpr(0.0, 0.0, 0.0)
ce = CompassEffect.make(NodePath(), CompassEffect.PRot | CompassEffect.PZ)
self.sky.node().setEffect(ce)
| mit |
Pythonify/awesome | venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.py | 339 | 80176 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.17'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 network. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?'
)
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError('address out of range')
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError('address out of range')
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen
))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
def subnet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address <= self.network_address and
other.broadcast_address >= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
def supernet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
return (
self not in self._constants._public_network and
not self.is_private)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_public_network = IPv4Network('100.64.0.0/10')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
| gpl-3.0 |
jianghuaw/nova | nova/tests/unit/notifications/objects/test_flavor.py | 2 | 7601 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from nova import context
from nova.notifications.objects import flavor as flavor_notification
from nova import objects
from nova.objects import fields
from nova import test
from nova.tests.unit.objects.test_flavor import fake_flavor
PROJECTS_SENTINEL = object()
class TestFlavorNotification(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
super(TestFlavorNotification, self).setUp()
@mock.patch('nova.notifications.objects.flavor.FlavorNotification')
def _verify_notification(self, flavor_obj, flavor, action,
mock_notification, project_id=None,
expected_projects=PROJECTS_SENTINEL):
notification = mock_notification
if action == "CREATE":
flavor_obj.create()
elif action == "DELETE":
flavor_obj.destroy()
elif action == "ADD_ACCESS":
action = "UPDATE"
flavor_obj.add_access(project_id)
elif action == "REMOVE_ACCESS":
action = "UPDATE"
flavor_obj.remove_access(project_id)
else:
flavor_obj.save()
self.assertTrue(notification.called)
event_type = notification.call_args[1]['event_type']
priority = notification.call_args[1]['priority']
publisher = notification.call_args[1]['publisher']
payload = notification.call_args[1]['payload']
self.assertEqual("fake-mini", publisher.host)
self.assertEqual("nova-api", publisher.binary)
self.assertEqual(fields.NotificationPriority.INFO, priority)
self.assertEqual('flavor', event_type.object)
self.assertEqual(getattr(fields.NotificationAction, action),
event_type.action)
notification.return_value.emit.assert_called_once_with(self.ctxt)
schema = flavor_notification.FlavorPayload.SCHEMA
for field in schema:
if field == 'projects' and expected_projects != PROJECTS_SENTINEL:
self.assertEqual(expected_projects, getattr(payload, field))
elif field in flavor_obj:
self.assertEqual(flavor_obj[field], getattr(payload, field))
else:
self.fail('Missing check for field %s in flavor_obj.' % field)
@mock.patch('nova.objects.Flavor._flavor_create')
def test_flavor_create_with_notification(self, mock_create):
flavor = copy.deepcopy(fake_flavor)
flavor_obj = objects.Flavor(context=self.ctxt)
flavor_obj.extra_specs = flavor['extra_specs']
flavorid = '1'
flavor['flavorid'] = flavorid
flavor['id'] = flavorid
mock_create.return_value = flavor
self._verify_notification(flavor_obj, flavor, 'CREATE')
@mock.patch('nova.objects.Flavor._flavor_extra_specs_del')
def test_flavor_update_with_notification(self, mock_delete):
flavor = copy.deepcopy(fake_flavor)
flavorid = '1'
flavor['flavorid'] = flavorid
flavor['id'] = flavorid
flavor_obj = objects.Flavor(context=self.ctxt, **flavor)
flavor_obj.obj_reset_changes()
del flavor_obj.extra_specs['foo']
del flavor['extra_specs']['foo']
self._verify_notification(flavor_obj, flavor, "UPDATE")
projects = ['project-1', 'project-2']
flavor_obj.projects = projects
flavor['projects'] = projects
self._verify_notification(flavor_obj, flavor, "UPDATE")
@mock.patch('nova.objects.Flavor._add_access')
@mock.patch('nova.objects.Flavor._remove_access')
def test_flavor_access_with_notification(self, mock_remove_access,
mock_add_access):
flavor = copy.deepcopy(fake_flavor)
flavorid = '1'
flavor['flavorid'] = flavorid
flavor['id'] = flavorid
flavor_obj = objects.Flavor(context=self.ctxt, **flavor)
flavor_obj.obj_reset_changes()
self._verify_notification(flavor_obj, flavor, "ADD_ACCESS",
project_id="project1")
self._verify_notification(flavor_obj, flavor, "REMOVE_ACCESS",
project_id="project1")
@mock.patch('nova.objects.Flavor._flavor_destroy')
def test_flavor_destroy_with_notification(self, mock_destroy):
flavor = copy.deepcopy(fake_flavor)
flavorid = '1'
flavor['flavorid'] = flavorid
flavor['id'] = flavorid
mock_destroy.return_value = flavor
flavor_obj = objects.Flavor(context=self.ctxt, **flavor)
flavor_obj.obj_reset_changes()
self.assertNotIn('projects', flavor_obj)
# We specifically expect there to not be any projects as we don't want
# to try and lazy-load them from the main database and end up with [].
self._verify_notification(flavor_obj, flavor, "DELETE",
expected_projects=None)
@mock.patch('nova.objects.Flavor._flavor_destroy')
def test_flavor_destroy_with_notification_and_projects(self, mock_destroy):
"""Tests the flavor-delete notification with flavor.projects loaded."""
flavor = copy.deepcopy(fake_flavor)
flavorid = '1'
flavor['flavorid'] = flavorid
flavor['id'] = flavorid
mock_destroy.return_value = flavor
flavor_obj = objects.Flavor(
context=self.ctxt, projects=['foo'], **flavor)
flavor_obj.obj_reset_changes()
self.assertIn('projects', flavor_obj)
self.assertEqual(['foo'], flavor_obj.projects)
# Since projects is loaded we shouldn't try to lazy-load it.
self._verify_notification(flavor_obj, flavor, "DELETE")
def test_obj_make_compatible(self):
flavor = copy.deepcopy(fake_flavor)
flavorid = '1'
flavor['flavorid'] = flavorid
flavor['id'] = flavorid
flavor_obj = objects.Flavor(context=self.ctxt, **flavor)
flavor_payload = flavor_notification.FlavorPayload(flavor_obj)
primitive = flavor_payload.obj_to_primitive()
self.assertIn('name', primitive['nova_object.data'])
self.assertIn('swap', primitive['nova_object.data'])
self.assertIn('rxtx_factor', primitive['nova_object.data'])
self.assertIn('vcpu_weight', primitive['nova_object.data'])
self.assertIn('disabled', primitive['nova_object.data'])
self.assertIn('is_public', primitive['nova_object.data'])
flavor_payload.obj_make_compatible(primitive['nova_object.data'],
'1.0')
self.assertNotIn('name', primitive['nova_object.data'])
self.assertNotIn('swap', primitive['nova_object.data'])
self.assertNotIn('rxtx_factor', primitive['nova_object.data'])
self.assertNotIn('vcpu_weight', primitive['nova_object.data'])
self.assertNotIn('disabled', primitive['nova_object.data'])
self.assertNotIn('is_public', primitive['nova_object.data'])
| apache-2.0 |
serviceagility/boto | boto/sdb/domain.py | 153 | 14351 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
"""
Represents an SDB Domain
"""
from boto.sdb.queryresultset import SelectResultSet
from boto.compat import six
class Domain(object):
def __init__(self, connection=None, name=None):
self.connection = connection
self.name = name
self._metadata = None
def __repr__(self):
return 'Domain:%s' % self.name
def __iter__(self):
return iter(self.select("SELECT * FROM `%s`" % self.name))
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'DomainName':
self.name = value
else:
setattr(self, name, value)
def get_metadata(self):
if not self._metadata:
self._metadata = self.connection.domain_metadata(self)
return self._metadata
def put_attributes(self, item_name, attributes,
replace=True, expected_value=None):
"""
Store attributes for a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being stored.
:type attribute_names: dict or dict-like object
:param attribute_names: The name/value pairs to store as attributes
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
In which case the call will first verify that the attribute
"name" of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or non-existence
(False) of the attribute.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
return self.connection.put_attributes(self, item_name, attributes,
replace, expected_value)
def batch_put_attributes(self, items, replace=True):
"""
Store attributes for multiple items.
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are themselves dictionaries
of attribute names/values, exactly the same as the
attribute_names parameter of the scalar put_attributes
call.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
return self.connection.batch_put_attributes(self, items, replace)
def get_attributes(self, item_name, attribute_name=None,
consistent_read=False, item=None):
"""
Retrieve attributes for a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being retrieved.
:type attribute_names: string or list of strings
:param attribute_names: An attribute name or list of attribute names. This
parameter is optional. If not supplied, all attributes
will be retrieved for the item.
:rtype: :class:`boto.sdb.item.Item`
:return: An Item mapping type containing the requested attribute name/values
"""
return self.connection.get_attributes(self, item_name, attribute_name,
consistent_read, item)
def delete_attributes(self, item_name, attributes=None,
expected_values=None):
"""
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_attributes(self, item_name, attributes,
expected_values)
def batch_delete_attributes(self, items):
"""
Delete multiple items in this domain.
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are either:
* dictionaries of attribute names/values, exactly the
same as the attribute_names parameter of the scalar
put_attributes call. The attribute name/value pairs
will only be deleted if they match the name/value
pairs passed in.
* None which means that all attributes associated
with the item should be deleted.
:rtype: bool
:return: True if successful
"""
return self.connection.batch_delete_attributes(self, items)
def select(self, query='', next_token=None, consistent_read=False, max_items=None):
"""
Returns a set of Attributes for item names within domain_name that match the query.
The query must be expressed in using the SELECT style syntax rather than the
original SimpleDB query language.
:type query: string
:param query: The SimpleDB query to be performed.
:rtype: iter
:return: An iterator containing the results. This is actually a generator
function that will iterate across all search results, not just the
first page.
"""
return SelectResultSet(self, query, max_items=max_items, next_token=next_token,
consistent_read=consistent_read)
def get_item(self, item_name, consistent_read=False):
"""
Retrieves an item from the domain, along with all of its attributes.
:param string item_name: The name of the item to retrieve.
:rtype: :class:`boto.sdb.item.Item` or ``None``
:keyword bool consistent_read: When set to true, ensures that the most
recent data is returned.
:return: The requested item, or ``None`` if there was no match found
"""
item = self.get_attributes(item_name, consistent_read=consistent_read)
if item:
item.domain = self
return item
else:
return None
def new_item(self, item_name):
return self.connection.item_cls(self, item_name)
def delete_item(self, item):
self.delete_attributes(item.name)
def to_xml(self, f=None):
"""Get this domain as an XML DOM Document
:param f: Optional File to dump directly to
:type f: File or Stream
:return: File object where the XML has been dumped to
:rtype: file
"""
if not f:
from tempfile import TemporaryFile
f = TemporaryFile()
print('<?xml version="1.0" encoding="UTF-8"?>', file=f)
print('<Domain id="%s">' % self.name, file=f)
for item in self:
print('\t<Item id="%s">' % item.name, file=f)
for k in item:
print('\t\t<attribute id="%s">' % k, file=f)
values = item[k]
if not isinstance(values, list):
values = [values]
for value in values:
print('\t\t\t<value><![CDATA[', end=' ', file=f)
if isinstance(value, six.text_type):
value = value.encode('utf-8', 'replace')
else:
value = six.text_type(value, errors='replace').encode('utf-8', 'replace')
f.write(value)
print(']]></value>', file=f)
print('\t\t</attribute>', file=f)
print('\t</Item>', file=f)
print('</Domain>', file=f)
f.flush()
f.seek(0)
return f
def from_xml(self, doc):
"""Load this domain based on an XML document"""
import xml.sax
handler = DomainDumpParser(self)
xml.sax.parse(doc, handler)
return handler
def delete(self):
"""
Delete this domain, and all items under it
"""
return self.connection.delete_domain(self)
class DomainMetaData(object):
def __init__(self, domain=None):
self.domain = domain
self.item_count = None
self.item_names_size = None
self.attr_name_count = None
self.attr_names_size = None
self.attr_value_count = None
self.attr_values_size = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ItemCount':
self.item_count = int(value)
elif name == 'ItemNamesSizeBytes':
self.item_names_size = int(value)
elif name == 'AttributeNameCount':
self.attr_name_count = int(value)
elif name == 'AttributeNamesSizeBytes':
self.attr_names_size = int(value)
elif name == 'AttributeValueCount':
self.attr_value_count = int(value)
elif name == 'AttributeValuesSizeBytes':
self.attr_values_size = int(value)
elif name == 'Timestamp':
self.timestamp = value
else:
setattr(self, name, value)
import sys
from xml.sax.handler import ContentHandler
class DomainDumpParser(ContentHandler):
"""
SAX parser for a domain that has been dumped
"""
def __init__(self, domain):
self.uploader = UploaderThread(domain)
self.item_id = None
self.attrs = {}
self.attribute = None
self.value = ""
self.domain = domain
def startElement(self, name, attrs):
if name == "Item":
self.item_id = attrs['id']
self.attrs = {}
elif name == "attribute":
self.attribute = attrs['id']
elif name == "value":
self.value = ""
def characters(self, ch):
self.value += ch
def endElement(self, name):
if name == "value":
if self.value and self.attribute:
value = self.value.strip()
attr_name = self.attribute.strip()
if attr_name in self.attrs:
self.attrs[attr_name].append(value)
else:
self.attrs[attr_name] = [value]
elif name == "Item":
self.uploader.items[self.item_id] = self.attrs
# Every 20 items we spawn off the uploader
if len(self.uploader.items) >= 20:
self.uploader.start()
self.uploader = UploaderThread(self.domain)
elif name == "Domain":
# If we're done, spawn off our last Uploader Thread
self.uploader.start()
from threading import Thread
class UploaderThread(Thread):
"""Uploader Thread"""
def __init__(self, domain):
self.db = domain
self.items = {}
super(UploaderThread, self).__init__()
def run(self):
try:
self.db.batch_put_attributes(self.items)
except:
print("Exception using batch put, trying regular put instead")
for item_name in self.items:
self.db.put_attributes(item_name, self.items[item_name])
print(".", end=' ')
sys.stdout.flush()
| mit |
bplancher/odoo | openerp/service/report.py | 56 | 5118 | # -*- coding: utf-8 -*-
import base64
import logging
import sys
import threading
import openerp
import openerp.report
from openerp import tools
from openerp.exceptions import UserError
import security
_logger = logging.getLogger(__name__)
# TODO: set a maximum report number per user to avoid DOS attacks
#
# Report state:
# False -> True
self_reports = {}
self_id = 0
self_id_protect = threading.Semaphore()
def dispatch(method, params):
(db, uid, passwd ) = params[0:3]
threading.current_thread().uid = uid
params = params[3:]
if method not in ['report', 'report_get', 'render_report']:
raise KeyError("Method not supported %s" % method)
security.check(db,uid,passwd)
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
fn = globals()['exp_' + method]
res = fn(db, uid, *params)
openerp.modules.registry.RegistryManager.signal_caches_change(db)
return res
def exp_render_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.commit()
cr.close()
return _check_report(id)
def exp_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
def go(id, uid, ids, datas, context):
with openerp.api.Environment.manage():
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.commit()
cr.close()
return True
threading.Thread(target=go, args=(id, uid, ids, datas, context)).start()
return id
def _check_report(report_id):
result = self_reports[report_id]
exc = result['exception']
if exc:
raise UserError('%s: %s' % (exc.message, exc.traceback))
res = {'state': result['state']}
if res['state']:
if tools.config['reportgz']:
import zlib
res2 = zlib.compress(result['result'])
res['code'] = 'zlib'
else:
#CHECKME: why is this needed???
if isinstance(result['result'], unicode):
res2 = result['result'].encode('latin1', 'replace')
else:
res2 = result['result']
if res2:
res['result'] = base64.encodestring(res2)
res['format'] = result['format']
del self_reports[report_id]
return res
def exp_report_get(db, uid, report_id):
if report_id in self_reports:
if self_reports[report_id]['uid'] == uid:
return _check_report(report_id)
else:
raise Exception, 'AccessDenied'
else:
raise Exception, 'ReportNotFound'
| agpl-3.0 |
Lilykos/invenio | invenio/legacy/bibrank/adminlib.py | 13 | 43633 | # This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Youshould have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibRank Administrator Interface."""
__revision__ = "$Id$"
import os
import ConfigParser
from invenio.config import \
CFG_SITE_LANG, \
CFG_SITE_URL
from invenio.base.helpers import utf8ifier
import invenio.modules.access.engine as acce
from invenio.base.i18n import language_list_long
from invenio.legacy.dbquery import run_sql, wash_table_column_name
from invenio.modules.ranker.registry import configuration
def getnavtrail(previous=''):
navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % (
CFG_SITE_URL,)
navtrail = navtrail + previous
return navtrail
def check_user(req, role, adminarea=2, authorized=0):
(auth_code, auth_message) = is_adminuser(req, role)
if not authorized and auth_code != 0:
return ("false", auth_message)
return ("", auth_message)
def is_adminuser(req, role):
"""check if user is a registered administrator. """
return acce.acc_authorize_action(req, role)
def perform_index(ln=CFG_SITE_LANG):
"""create the bibrank main area menu page."""
header = ['Code', 'Translations', 'Collections', 'Rank method']
rnk_list = get_def_name('', "rnkMETHOD")
actions = []
for (rnkID, name) in rnk_list:
actions.append([name])
for col in [(('Modify', 'modifytranslations'),),
(('Modify', 'modifycollection'),),
(('Show Details', 'showrankdetails'),
('Modify', 'modifyrank'),
('Delete', 'deleterank'))]:
actions[-1].append('<a href="%s/admin/bibrank/bibrankadmin.py/%s?rnkID=%s&ln=%s">%s</a>' %
(CFG_SITE_URL, col[0][1], rnkID, ln, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/bibrank/bibrankadmin.py/%s?rnkID=%s&ln=%s">%s</a>' % (
CFG_SITE_URL, function, rnkID, ln, str)
output = """
<a href="%s/admin/bibrank/bibrankadmin.py/addrankarea?ln=%s">Add new rank method</a><br /><br />
""" % (CFG_SITE_URL, ln)
output += tupletotable(header=header, tuple=actions)
return addadminbox("""Overview of rank methods <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mi">?</a>]</small>""" % CFG_SITE_URL, datalist=[output, ''])
def perform_modifycollection(rnkID='', ln=CFG_SITE_LANG, func='', colID='', confirm=0):
"""Modify which collections the rank method is visible to"""
output = ""
subtitle = ""
if rnkID:
rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1]
if func in ["0", 0] and confirm in ["1", 1]:
finresult = attach_col_rnk(rnkID, colID)
elif func in ["1", 1] and confirm in ["1", 1]:
finresult = detach_col_rnk(rnkID, colID)
if colID:
colNAME = get_def_name(colID, "collection")[0][1]
subtitle = """Step 1 - Select collection to enable/disable rank method '%s' for""" % rnkNAME
output = """
<dl>
<dt>The rank method is currently enabled for these collections:</dt>
<dd>
"""
col_list = get_rnk_col(rnkID, ln)
if not col_list:
output += """No collections"""
else:
for (id, name) in col_list:
output += """%s, """ % name
output += """</dd>
</dl>
"""
col_list = get_def_name('', "collection")
col_rnk = dict(get_rnk_col(rnkID))
col_list = filter(lambda x: x[0] not in col_rnk, col_list)
if col_list:
text = """
<span class="adminlabel">Enable for:</span>
<select name="colID" class="admin_w200">
<option value="">- select collection -</option>
"""
for (id, name) in col_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["0", 0] and confirm in [
"0", 0] and colID and int(colID) == int(id)) and 'selected="selected"' or '', name)
text += """</select>"""
output += createhiddenform(action="modifycollection",
text=text,
button="Enable",
rnkID=rnkID,
ln=ln,
func=0,
confirm=1)
if confirm in ["0", 0] and func in ["0", 0] and colID:
subtitle = "Step 2 - Confirm to enable rank method for the chosen collection"
text = "<b><p>Please confirm to enable rank method '%s' for the collection '%s'</p></b>" % (
rnkNAME, colNAME)
output += createhiddenform(action="modifycollection",
text=text,
button="Confirm",
rnkID=rnkID,
ln=ln,
colID=colID,
func=0,
confirm=1)
elif confirm in ["1", 1] and func in ["0", 0] and colID:
subtitle = "Step 3 - Result"
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["0", 0]:
output += """<b><span class="info">Please select a collection.</span></b>"""
col_list = get_rnk_col(rnkID, ln)
if col_list:
text = """
<span class="adminlabel">Disable for:</span>
<select name="colID" class="admin_w200">
<option value="">- select collection -</option>
"""
for (id, name) in col_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["1", 1] and confirm in [
"0", 0] and colID and int(colID) == int(id)) and 'selected="selected"' or '', name)
text += """</select>"""
output += createhiddenform(action="modifycollection",
text=text,
button="Disable",
rnkID=rnkID,
ln=ln,
func=1,
confirm=1)
if confirm in ["1", 1] and func in ["1", 1] and colID:
subtitle = "Step 3 - Result"
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["1", 1]:
output += """<b><span class="info">Please select a collection.</span></b>"""
body = [output]
return addadminbox(subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mc">?</a>]</small>""" % CFG_SITE_URL, body)
def perform_modifytranslations(rnkID, ln, sel_type, trans, confirm, callback='yes'):
"""Modify the translations of a rank method"""
output = ''
subtitle = ''
langs = get_languages()
langs.sort()
if confirm in ["2", 2] and rnkID:
finresult = modify_translations(
rnkID, langs, sel_type, trans, "rnkMETHOD")
rnk_name = get_def_name(rnkID, "rnkMETHOD")[0][1]
rnk_dict = dict(
get_i8n_name('', ln, get_rnk_nametypes()[0][0], "rnkMETHOD"))
if rnkID and int(rnkID) in rnk_dict:
rnkID = int(rnkID)
subtitle = """<a name="3">3. Modify translations for rank method '%s'</a>""" % rnk_name
if type(trans) is str:
trans = [trans]
if sel_type == '':
sel_type = get_rnk_nametypes()[0][0]
header = ['Language', 'Translation']
actions = []
text = """
<span class="adminlabel">Name type</span>
<select name="sel_type" class="admin_w200">
"""
types = get_rnk_nametypes()
if len(types) > 1:
for (key, value) in types:
text += """<option value="%s" %s>%s""" % (
key, key == sel_type and 'selected="selected"' or '', value)
trans_names = get_name(rnkID, ln, key, "rnkMETHOD")
if trans_names and trans_names[0][0]:
text += ": %s" % trans_names[0][0]
text += "</option>"
text += """</select>"""
output += createhiddenform(action="modifytranslations",
text=text,
button="Select",
rnkID=rnkID,
ln=ln,
confirm=0)
if confirm in [-1, "-1", 0, "0"]:
trans = []
for key, value in langs:
try:
trans_names = get_name(rnkID, key, sel_type, "rnkMETHOD")
trans.append(trans_names[0][0])
except StandardError as e:
trans.append('')
for nr in range(0, len(langs)):
actions.append(["%s" % (langs[nr][1],)])
actions[-1].append(
'<input type="text" name="trans" size="30" value="%s"/>' % trans[nr])
text = tupletotable(header=header, tuple=actions)
output += createhiddenform(action="modifytranslations",
text=text,
button="Modify",
rnkID=rnkID,
sel_type=sel_type,
ln=ln,
confirm=2)
if sel_type and len(trans) and confirm in ["2", 2]:
output += write_outcome(finresult)
body = [output]
return addadminbox(subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mt">?</a>]</small>""" % CFG_SITE_URL, body)
def perform_addrankarea(rnkcode='', ln=CFG_SITE_LANG, template='', confirm=-1):
"""form to add a new rank method with these values:"""
subtitle = 'Step 1 - Create new rank method'
output = """
<dl>
<dt>BibRank code:</dt>
<dd>A unique code that identifies a rank method, is used when running the bibrank daemon and used to name the configuration file for the method.
<br />The template files includes the necessary parameters for the chosen rank method, and only needs to be edited with the correct tags and paths.
<br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section about adding a rank method</dd>
</dl>
""" % CFG_SITE_URL
text = """
<span class="adminlabel">BibRank code</span>
<input class="admin_wvar" type="text" name="rnkcode" value="%s" />
""" % (rnkcode)
text += """<br />
<span class="adminlabel">Cfg template</span>
<select name="template" class="admin_w200">
<option value="">No template</option>
"""
templates = get_templates()
for templ in templates:
text += """<option value="%s" %s>%s</option>""" % (
templ, template == templ and 'selected="selected"' or '', templ[9:len(templ) - 4])
text += """</select>"""
output += createhiddenform(action="addrankarea",
text=text,
button="Add rank method",
ln=ln,
confirm=1)
if rnkcode:
if confirm in ["0", 0]:
subtitle = 'Step 2 - Confirm addition of rank method'
text = """<b>Add rank method with BibRank code: '%s'.</b>""" % (
rnkcode)
if template:
text += """<br /><b>Using configuration template: '%s'.</b>""" % (
template)
else:
text += """<br /><b>Create empty configuration file.</b>"""
output += createhiddenform(action="addrankarea",
text=text,
rnkcode=rnkcode,
button="Confirm",
template=template,
confirm=1)
elif confirm in ["1", 1]:
rnkID = add_rnk(rnkcode)
subtitle = "Step 3 - Result"
if rnkID[0] == 1:
rnkID = rnkID[1]
text = """<b><span class="info">Added new rank method with BibRank code '%s'</span></b>""" % rnkcode
try:
if template:
infile = open(configuration.get(template, ''), 'r')
indata = infile.readlines()
infile.close()
else:
indata = ()
file = open(
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), 'w')
for line in indata:
file.write(line)
file.close()
if template:
text += """<b><span class="info"><br />Configuration file created using '%s' as template.</span></b>""" % template
else:
text += """<b><span class="info"><br />Empty configuration file created.</span></b>"""
except StandardError as e:
text += """<b><span class="info"><br />Sorry, could not create configuration file: '%s.cfg', either because it already exists, or not enough rights to create file. <br />Please create the file in the path given.</span></b>""" % (
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), )
else:
text = """<b><span class="info">Sorry, could not add rank method, rank method with the same BibRank code probably exists.</span></b>"""
output += text
elif not rnkcode and confirm not in [-1, "-1"]:
output += """<b><span class="info">Sorry, could not add rank method, not enough data submitted.</span></b>"""
body = [output]
return addadminbox(subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#ar">?</a>]</small>""" % CFG_SITE_URL, body)
def perform_modifyrank(rnkID, rnkcode='', ln=CFG_SITE_LANG, template='', cfgfile='', confirm=0):
"""form to modify a rank method
rnkID - id of the rank method
"""
if not rnkID:
return "No ranking method selected."
if not get_rnk_code(rnkID):
return "Ranking method %s does not seem to exist." % str(rnkID)
subtitle = 'Step 1 - Please modify the wanted values below'
if not rnkcode:
oldcode = get_rnk_code(rnkID)[0]
else:
oldcode = rnkcode
output = """
<dl>
<dd>When changing the BibRank code of a rank method, you must also change any scheduled tasks using the old value.
<br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section about modifying a rank method's BibRank code.</dd>
</dl>
""" % CFG_SITE_URL
text = """
<span class="adminlabel">BibRank code</span>
<input class="admin_wvar" type="text" name="rnkcode" value="%s" />
<br />
""" % (oldcode)
try:
text += """<span class="adminlabel">Cfg file</span>"""
textarea = ""
if cfgfile:
textarea += cfgfile
else:
file = open(
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''))
for line in file.readlines():
textarea += line
text += """<textarea class="admin_wvar" name="cfgfile" rows="15" cols="70">""" + \
textarea + """</textarea>"""
except StandardError as e:
text += """<b><span class="info">Cannot load file, either it does not exist, or not enough rights to read it: '%s.cfg'<br />Please create the file in the path given.</span></b>""" % (
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), )
output += createhiddenform(action="modifyrank",
text=text,
rnkID=rnkID,
button="Modify",
confirm=1)
if rnkcode and confirm in ["1", 1] and get_rnk_code(rnkID)[0][0] != rnkcode:
oldcode = get_rnk_code(rnkID)[0][0]
result = modify_rnk(rnkID, rnkcode)
subtitle = "Step 3 - Result"
if result:
text = """<b><span class="info">Rank method modified.</span></b>"""
try:
file = open(configuration.get(oldcode + '.cfg', ''), 'r')
file2 = open(configuration.get(rnkcode + '.cfg', ''), 'w')
lines = file.readlines()
for line in lines:
file2.write(line)
file.close()
file2.close()
os.remove(configuration.get(oldcode + '.cfg', ''))
except StandardError as e:
text = """<b><span class="info">Sorry, could not change name of cfg file, must be done manually: '%s.cfg'</span></b>""" % (
configuration.get(oldcode + '.cfg', ''), )
else:
text = """<b><span class="info">Sorry, could not modify rank method.</span></b>"""
output += text
if cfgfile and confirm in ["1", 1]:
try:
file = open(
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), 'w')
file.write(cfgfile)
file.close()
text = """<b><span class="info"><br />Configuration file modified: '%s/bibrank/%s.cfg'</span></b>""" % (
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), )
except StandardError as e:
text = """<b><span class="info"><br />Sorry, could not modify configuration file, please check for rights to do so: '%s.cfg'<br />Please modify the file manually.</span></b>""" % (
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), )
output += text
finoutput = addadminbox(
subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mr">?</a>]</small>""" % CFG_SITE_URL, [output])
output = ""
text = """
<span class="adminlabel">Select</span>
<select name="template" class="admin_w200">
<option value="">- select template -</option>
"""
templates = get_templates()
for templ in templates:
text += """<option value="%s" %s>%s</option>""" % (
templ, template == templ and 'selected="selected"' or '', templ[9:len(templ) - 4])
text += """</select><br />"""
output += createhiddenform(action="modifyrank",
text=text,
rnkID=rnkID,
button="Show template",
confirm=0)
try:
if template:
textarea = ""
text = """<span class="adminlabel">Content:</span>"""
file = open(configuration.get(template, ''), 'r')
lines = file.readlines()
for line in lines:
textarea += line
file.close()
text += """<textarea class="admin_wvar" readonly="true" rows="15" cols="70">""" + \
textarea + """</textarea>"""
output += text
except StandardError as e:
output += """Cannot load file, either it does not exist, or not enough rights to read it: '%s'""" % (
configuration.get(template, ''), )
finoutput += addadminbox("View templates", [output])
return finoutput
def perform_deleterank(rnkID, ln=CFG_SITE_LANG, confirm=0):
"""form to delete a rank method
"""
subtitle = ''
output = """
<span class="warning">
<dl>
<dt><strong>WARNING:</strong></dt>
<dd><strong>When deleting a rank method, you also deletes all data related to the rank method, like translations, which collections
it was attached to and the data necessary to rank the searchresults. Any scheduled tasks using the deleted rank method will also stop working.
<br /><br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section regarding deleting a rank method.</strong></dd>
</dl>
</span>
""" % CFG_SITE_URL
if rnkID:
if confirm in ["0", 0]:
rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1]
subtitle = 'Step 1 - Confirm deletion'
text = """Delete rank method '%s'.""" % (rnkNAME)
output += createhiddenform(action="deleterank",
text=text,
button="Confirm",
rnkID=rnkID,
confirm=1)
elif confirm in ["1", 1]:
try:
rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1]
rnkcode = get_rnk_code(rnkID)[0][0]
table = ""
try:
config = ConfigParser.ConfigParser()
config.readfp(
open(configuration.get(rnkcode + ".cfg"), 'r'))
table = config.get(
config.get('rank_method', "function"), "table")
except Exception:
pass
result = delete_rnk(rnkID, table)
subtitle = "Step 2 - Result"
if result:
text = """<b><span class="info">Rank method deleted</span></b>"""
try:
os.remove(configuration.get(rnkcode + ".cfg"))
text += """<br /><b><span class="info">Configuration file deleted: '%s.cfg'.</span></b>""" % (
configuration.get(rnkcode + ".cfg"), )
except StandardError as e:
text += """<br /><b><span class="info">Sorry, could not delete configuration file: '%s/bibrank/%s.cfg'.</span><br />Please delete the file manually.</span></b>""" % (
configuration.get(rnkcode + ".cfg"), )
else:
text = """<b><span class="info">Sorry, could not delete rank method</span></b>"""
except StandardError as e:
text = """<b><span class="info">Sorry, could not delete rank method, most likely already deleted</span></b>"""
output = text
body = [output]
return addadminbox(subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#dr">?</a>]</small>""" % CFG_SITE_URL, body)
def perform_showrankdetails(rnkID, ln=CFG_SITE_LANG):
"""Returns details about the rank method given by rnkID"""
if not rnkID:
return "No ranking method selected."
if not get_rnk_code(rnkID):
return "Ranking method %s does not seem to exist." % str(rnkID)
subtitle = """Overview <a href="%s/admin/bibrank/bibrankadmin.py/modifyrank?rnkID=%s&ln=%s">[Modify]</a>""" % (
CFG_SITE_URL, rnkID, ln)
text = """
BibRank code: %s<br />
Last updated by BibRank:
""" % (get_rnk_code(rnkID)[0][0])
if get_rnk(rnkID)[0][2]:
text += "%s<br />" % get_rnk(rnkID)[0][2]
else:
text += "Not yet run.<br />"
output = addadminbox(subtitle, [text])
subtitle = """Rank method statistics"""
text = ""
try:
text = "Not yet implemented"
except StandardError as e:
text = "BibRank not yet run, cannot show statistics for method"
output += addadminbox(subtitle, [text])
subtitle = """Attached to collections <a href="%s/admin/bibrank/bibrankadmin.py/modifycollection?rnkID=%s&ln=%s">[Modify]</a>""" % (
CFG_SITE_URL, rnkID, ln)
text = ""
col = get_rnk_col(rnkID, ln)
for key, value in col:
text += "%s<br />" % value
if not col:
text += "No collections"
output += addadminbox(subtitle, [text])
subtitle = """Translations <a href="%s/admin/bibrank/bibrankadmin.py/modifytranslations?rnkID=%s&ln=%s">[Modify]</a>""" % (
CFG_SITE_URL, rnkID, ln)
prev_lang = ''
trans = get_translations(rnkID)
types = get_rnk_nametypes()
types = dict(map(lambda x: (x[0], x[1]), types))
text = ""
languages = dict(get_languages())
if trans:
for lang, type, name in trans:
if lang and lang in languages and type and name:
if prev_lang != lang:
prev_lang = lang
text += """%s: <br />""" % (languages[lang])
if type in types:
text += """<span style="margin-left: 10px">'%s'</span><span class="note">(%s)</span><br />""" % (
name, types[type])
else:
text = """No translations exists"""
output += addadminbox(subtitle, [text])
subtitle = """Configuration file: '%s/bibrank/%s.cfg' <a href="%s/admin/bibrank/bibrankadmin.py/modifyrank?rnkID=%s&ln=%s">[Modify]</a>""" % (
CFG_ETCDIR, get_rnk_code(rnkID)[0][0], CFG_SITE_URL, rnkID, ln)
text = ""
try:
file = open(configuration.get(get_rnk_code(rnkID)[0][0] + ".cfg", ''))
text += """<pre>"""
for line in file.readlines():
text += line
text += """</pre>"""
except StandardError as e:
text = """Cannot load file, either it does not exist, or not enough rights to read it."""
output += addadminbox(subtitle, [text])
return output
def compare_on_val(second, first):
return cmp(second[1], first[1])
def get_rnk_code(rnkID):
"""Returns the name from rnkMETHOD based on argument
rnkID - id from rnkMETHOD"""
try:
res = run_sql("SELECT name FROM rnkMETHOD where id=%s" % (rnkID))
return res
except StandardError as e:
return ()
def get_rnk(rnkID=''):
"""Return one or all rank methods
rnkID - return the rank method given, or all if not given"""
try:
if rnkID:
res = run_sql(
"SELECT id,name,DATE_FORMAT(last_updated, '%%Y-%%m-%%d %%H:%%i:%%s') from rnkMETHOD WHERE id=%s" % rnkID)
else:
res = run_sql(
"SELECT id,name,DATE_FORMAT(last_updated, '%%Y-%%m-%%d %%H:%%i:%%s') from rnkMETHOD")
return res
except StandardError as e:
return ()
def get_translations(rnkID):
"""Returns the translations in rnkMETHODNAME for a rankmethod
rnkID - the id of the rankmethod from rnkMETHOD """
try:
res = run_sql(
"SELECT ln, type, value FROM rnkMETHODNAME where id_rnkMETHOD=%s ORDER BY ln,type" % (rnkID))
return res
except StandardError as e:
return ()
def get_rnk_nametypes():
"""Return a list of the various translationnames for the rank methods"""
type = []
type.append(('ln', 'Long name'))
#type.append(('sn', 'Short name'))
return type
def get_col_nametypes():
"""Return a list of the various translationnames for the rank methods"""
type = []
type.append(('ln', 'Long name'))
return type
def get_rnk_col(rnkID, ln=CFG_SITE_LANG):
""" Returns a list of the collections the given rank method is attached to
rnkID - id from rnkMETHOD"""
try:
res1 = dict(run_sql(
"SELECT id_collection, '' FROM collection_rnkMETHOD WHERE id_rnkMETHOD=%s" % rnkID))
res2 = get_def_name('', "collection")
result = filter(lambda x: x[0] in res1, res2)
return result
except StandardError as e:
return ()
def get_templates():
"""Read CFG_ETCDIR/bibrank and returns a list of all files with 'template' """
templates = []
files = configuration.itervalues()
for file in files:
if str.find(file, "template_") != -1:
templates.append(file)
return templates
def attach_col_rnk(rnkID, colID):
"""attach rank method to collection
rnkID - id from rnkMETHOD table
colID - id of collection, as in collection table """
try:
res = run_sql(
"INSERT INTO collection_rnkMETHOD(id_collection, id_rnkMETHOD) values (%s,%s)" % (colID, rnkID))
return (1, "")
except StandardError as e:
return (0, e)
def detach_col_rnk(rnkID, colID):
"""detach rank method from collection
rnkID - id from rnkMETHOD table
colID - id of collection, as in collection table """
try:
res = run_sql(
"DELETE FROM collection_rnkMETHOD WHERE id_collection=%s AND id_rnkMETHOD=%s" % (colID, rnkID))
return (1, "")
except StandardError as e:
return (0, e)
def delete_rnk(rnkID, table=""):
"""Deletes all data for the given rank method
rnkID - delete all data in the tables associated with ranking and this id """
try:
res = run_sql("DELETE FROM rnkMETHOD WHERE id=%s" % rnkID)
res = run_sql(
"DELETE FROM rnkMETHODNAME WHERE id_rnkMETHOD=%s" % rnkID)
res = run_sql(
"DELETE FROM collection_rnkMETHOD WHERE id_rnkMETHOD=%s" % rnkID)
res = run_sql(
"DELETE FROM rnkMETHODDATA WHERE id_rnkMETHOD=%s" % rnkID)
if table:
res = run_sql("truncate %s" % table)
res = run_sql("truncate %sR" % table[:-1])
return (1, "")
except StandardError as e:
return (0, e)
def modify_rnk(rnkID, rnkcode):
"""change the code for the rank method given
rnkID - change in rnkMETHOD where id is like this
rnkcode - new value for field 'name' in rnkMETHOD """
try:
res = run_sql(
"UPDATE rnkMETHOD set name=%s WHERE id=%s", (rnkcode, rnkID))
return (1, "")
except StandardError as e:
return (0, e)
def add_rnk(rnkcode):
"""Adds a new rank method to rnkMETHOD
rnkcode - the "code" for the rank method, to be used by bibrank daemon """
try:
res = run_sql("INSERT INTO rnkMETHOD (name) VALUES (%s)", (rnkcode,))
res = run_sql("SELECT id FROM rnkMETHOD WHERE name=%s", (rnkcode,))
if res:
return (1, res[0][0])
else:
raise StandardError
except StandardError as e:
return (0, e)
def addadminbox(header='', datalist=[], cls="admin_wvar"):
"""used to create table around main data on a page, row based.
header - header on top of the table
datalist - list of the data to be added row by row
cls - possible to select wich css-class to format the look of the table."""
if len(datalist) == 1:
per = '100'
else:
per = '75'
output = '<table class="%s" ' % (cls, ) + 'width="95%">\n'
output += """
<thead>
<tr>
<th class="adminheaderleft" colspan="%s">%s</th>
</tr>
</thead>
<tbody>
""" % (len(datalist), header)
output += ' <tr>\n'
output += """
<td style="vertical-align: top; margin-top: 5px; width: %s;">
%s
</td>
""" % (per + '%', datalist[0])
if len(datalist) > 1:
output += """
<td style="vertical-align: top; margin-top: 5px; width: %s;">
%s
</td>
""" % ('25%', datalist[1])
output += ' </tr>\n'
output += """
</tbody>
</table>
"""
return output
def tupletotable(header=[], tuple=[], start='', end='', extracolumn='', highlight_rows_p=False, alternate_row_colors_p=False):
"""create html table for a tuple.
header - optional header for the columns
tuple - create table of this
start - text to be added in the beginning, most likely beginning of a form
end - text to be added in the end, mot likely end of a form.
extracolumn - mainly used to put in a button.
highlight_rows_p - if the cursor hovering a row should highlight the full row or not
alternate_row_colors_p - if alternate background colours should be used for the rows
"""
# study first row in tuple for alignment
align = []
try:
firstrow = tuple[0]
if type(firstrow) in [int, long]:
align = ['admintdright']
elif type(firstrow) in [str, dict]:
align = ['admintdleft']
else:
for item in firstrow:
if type(item) is int:
align.append('admintdright')
else:
align.append('admintdleft')
except IndexError:
firstrow = []
tblstr = ''
for h in header + ['']:
tblstr += ' <th class="adminheader">%s</th>\n' % (h, )
if tblstr:
tblstr = ' <tr>\n%s\n </tr>\n' % (tblstr, )
tblstr = start + '<table class="admin_wvar_nomargin">\n' + tblstr
# extra column
try:
extra = '<tr class="%s">' % (
highlight_rows_p and 'admin_row_highlight' or '')
if type(firstrow) not in [int, long, str, dict]:
# for data in firstrow: extra += '<td class="%s">%s</td>\n' % ('admintd', data)
for i in range(len(firstrow)):
extra += '<td class="{0}">{1}</td>\n'.format(
align[i], firstrow[i])
else:
extra += ' <td class="%s">%s</td>\n' % (align[0], firstrow)
extra += '<td class="extracolumn" rowspan="%s" style="vertical-align: top;">\n%s\n</td>\n</tr>\n' % (
len(tuple), extracolumn)
except IndexError:
extra = ''
tblstr += extra
# for i in range(1, len(tuple)):
j = 0
for row in tuple[1:]:
j += 1
tblstr += ' <tr class="%s %s">\n' % (highlight_rows_p and 'admin_row_highlight' or '',
(j % 2 and alternate_row_colors_p) and 'admin_row_color' or '')
# row = tuple[i]
if type(row) not in [int, long, str, dict]:
# for data in row: tblstr += '<td class="admintd">%s</td>\n' % (data,)
for i in range(len(row)):
tblstr += '<td class="{0}">{1}</td>\n'.format(align[i], utf8ifier(row[i]))
else:
tblstr += ' <td class="%s">%s</td>\n' % (align[0], row)
tblstr += ' </tr> \n'
tblstr += '</table> \n '
tblstr += end
return tblstr
def tupletotable_onlyselected(header=[], tuple=[], selected=[], start='', end='', extracolumn=''):
"""create html table for a tuple.
header - optional header for the columns
tuple - create table of this
selected - indexes of selected rows in the tuple
start - put this in the beginning
end - put this in the beginning
extracolumn - mainly used to put in a button"""
tuple2 = []
for index in selected:
tuple2.append(tuple[int(index) - 1])
return tupletotable(header=header,
tuple=tuple2,
start=start,
end=end,
extracolumn=extracolumn)
def addcheckboxes(datalist=[], name='authids', startindex=1, checked=[]):
"""adds checkboxes in front of the listdata.
datalist - add checkboxes in front of this list
name - name of all the checkboxes, values will be associated with this name
startindex - usually 1 because of the header
checked - values of checkboxes to be pre-checked """
if not type(checked) is list:
checked = [checked]
for row in datalist:
# always box, check another place
if 1 or row[0] not in [-1, "-1", 0, "0"]:
chkstr = str(startindex) in checked and 'checked="checked"' or ''
row.insert(
0, '<input type="checkbox" name="%s" value="%s" %s />' % (name, startindex, chkstr))
else:
row.insert(0, '')
startindex += 1
return datalist
def createhiddenform(action="", text="", button="confirm", cnfrm='', **hidden):
"""create select with hidden values and submit button
action - name of the action to perform on submit
text - additional text, can also be used to add non hidden input
button - value/caption on the submit button
cnfrm - if given, must check checkbox to confirm
**hidden - dictionary with name=value pairs for hidden input """
output = '<form action="%s" method="post">\n' % (action, )
output += '<table>\n<tr><td style="vertical-align: top">'
# output += text.decode('utf-8')
output += text
if cnfrm:
output += ' <input type="checkbox" name="confirm" value="1"/>'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += ' <input type="hidden" name="%s" value="%s"/>\n' % (
key, value)
else:
output += ' <input type="hidden" name="%s" value="%s"/>\n' % (
key, hidden[key])
output += '</td><td style="vertical-align: bottom">'
output += ' <input class="btn btn-default" type="submit" value="%s"/>\n' % (
button, )
output += '</td></tr></table>'
output += '</form>\n'
return output
def get_languages():
languages = []
for (lang, lang_namelong) in language_list_long():
languages.append((lang, lang_namelong))
languages.sort()
return languages
def get_def_name(ID, table):
"""Returns a list of the names, either with the name in the current language, the default language, or just the name from the given table
ln - a language supported by Invenio
type - the type of value wanted, like 'ln', 'sn'"""
name = "name"
if table[-1:].isupper():
name = "NAME"
try:
if ID:
res = run_sql("SELECT id,name FROM %s where id=%s" % (table, ID))
else:
res = run_sql("SELECT id,name FROM %s" % table)
res = list(res)
res.sort(compare_on_val)
return res
except StandardError as e:
return []
def get_i8n_name(ID, ln, rtype, table):
"""Returns a list of the names, either with the name in the current language, the default language, or just the name from the given table
ln - a language supported by Invenio
type - the type of value wanted, like 'ln', 'sn'"""
name = "name"
if table[-1:].isupper():
name = "NAME"
try:
res = ""
if ID:
res = run_sql("SELECT id_%s,value FROM %s%s where type='%s' and ln='%s' and id_%s=%s" % (
table, table, name, rtype, ln, table, ID))
else:
res = run_sql("SELECT id_%s,value FROM %s%s where type='%s' and ln='%s'" % (
table, table, name, rtype, ln))
if ln != CFG_SITE_LANG:
if ID:
res1 = run_sql("SELECT id_%s,value FROM %s%s WHERE ln='%s' and type='%s' and id_%s=%s" % (
table, table, name, CFG_SITE_LANG, rtype, table, ID))
else:
res1 = run_sql("SELECT id_%s,value FROM %s%s WHERE ln='%s' and type='%s'" % (
table, table, name, CFG_SITE_LANG, rtype))
res2 = dict(res)
result = filter(lambda x: x[0] not in res2, res1)
res = res + result
if ID:
res1 = run_sql("SELECT id,name FROM %s where id=%s" % (table, ID))
else:
res1 = run_sql("SELECT id,name FROM %s" % table)
res2 = dict(res)
result = filter(lambda x: x[0] not in res2, res1)
res = res + result
res = list(res)
res.sort(compare_on_val)
return res
except StandardError as e:
raise StandardError
def get_name(ID, ln, rtype, table, id_column=None):
"""Returns the value from the table name based on arguments
ID - id
ln - a language supported by Invenio
type - the type of value wanted, like 'ln', 'sn'
table - tablename
id_column - name of the column with identifier. If None, expect column to be named 'id_%s' % table
"""
name = "name"
if table[-1:].isupper():
name = "NAME"
if id_column:
id_column = wash_table_column_name(id_column)
try:
res = run_sql("SELECT value FROM %s%s WHERE type='%s' and ln='%s' and %s=%s" % (
table, name, rtype, ln, (id_column or 'id_%s' % wash_table_column_name(table)), ID))
return res
except StandardError as e:
return ()
def modify_translations(ID, langs, sel_type, trans, table, id_column=None):
"""add or modify translations in tables given by table
frmID - the id of the format from the format table
sel_type - the name type
langs - the languages
trans - the translations, in same order as in langs
table - the table
id_column - name of the column with identifier. If None, expect column to be named 'id_%s' % table
"""
name = "name"
if table[-1:].isupper():
name = "NAME"
id_column = id_column or 'id_%s' % table
if id_column:
id_column = wash_table_column_name(id_column)
try:
for nr in range(0, len(langs)):
res = run_sql("SELECT value FROM %s%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column),
(ID, sel_type, langs[nr][0]))
if res:
if trans[nr]:
res = run_sql("UPDATE %s%s SET value=%%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column),
(trans[nr], ID, sel_type, langs[nr][0]))
else:
res = run_sql("DELETE FROM %s%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column),
(ID, sel_type, langs[nr][0]))
else:
if trans[nr]:
res = run_sql("INSERT INTO %s%s (%s, type, ln, value) VALUES (%%s,%%s,%%s,%%s)" % (table, name, id_column),
(ID, sel_type, langs[nr][0], trans[nr]))
return (1, "")
except StandardError as e:
return (0, e)
def write_outcome(res):
"""
Write the outcome of an update of some settings.
Parameter 'res' is a tuple (int, str), where 'int' is 0 when there
is an error to display, and 1 when everything went fine. 'str' is
a message displayed when there is an error.
"""
if res and res[0] == 1:
return """<b><span class="info">Operation successfully completed.</span></b>"""
elif res:
return """<b><span class="info">Operation failed. Reason:</span></b><br />%s""" % res[1]
| gpl-2.0 |
abdullah2891/remo | remo/profiles/helpers.py | 3 | 2078 | import urlparse
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from funfactory.helpers import urlparams
from jingo import register
from libravatar import libravatar_url
from remo.profiles.models import FunctionalArea, UserAvatar
from remo.reports.utils import get_last_report
INACTIVE_HIGH = timedelta(weeks=8)
INACTIVE_LOW = timedelta(weeks=4)
@register.filter
def get_avatar_url(user, size=50):
"""Get a url pointing to user's avatar.
The libravatar network is used for avatars. Return cached value if
its last update was less than 24 hours before. Optional argument
size can be provided to set the avatar size.
"""
if not user:
return None
default_img_url = reduce(lambda u, x: urlparse.urljoin(u, x),
[settings.SITE_URL,
settings.STATIC_URL,
'base/img/remo/remo_avatar.png'])
user_avatar, created = UserAvatar.objects.get_or_create(user=user)
now = timezone.now()
if (user_avatar.last_update < now - timedelta(days=7)) or created:
user_avatar.avatar_url = libravatar_url(email=user.email, https=True)
user_avatar.save()
avatar_url = urlparams(user_avatar.avatar_url, default=default_img_url)
if size != -1:
avatar_url = urlparams(avatar_url, size=size)
return avatar_url
@register.filter
def get_functional_area(name):
"""Return the Functional Area object given the name."""
try:
return FunctionalArea.objects.get(name=name)
except FunctionalArea.DoesNotExist:
return None
@register.filter
def get_activity_level(user):
"""Return user's inactivity level."""
last_report = get_last_report(user)
if not last_report:
return ''
today = timezone.now().date()
inactivity_period = today - last_report.report_date
if inactivity_period > INACTIVE_LOW:
if inactivity_period > INACTIVE_HIGH:
return 'inactive-high'
return 'inactive-low'
return ''
| bsd-3-clause |
TridevGuha/django | tests/utils_tests/test_regex_helper.py | 448 | 1784 | from __future__ import unicode_literals
import unittest
from django.utils import regex_helper
class NormalizeTests(unittest.TestCase):
def test_empty(self):
pattern = r""
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_escape(self):
pattern = r"\\\^\$\.\|\?\*\+\(\)\["
expected = [('\\^$.|?*+()[', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_positional(self):
pattern = r"(.*)-(.+)"
expected = [('%(_0)s-%(_1)s', ['_0', '_1'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_ignored(self):
pattern = r"(?i)(?L)(?m)(?s)(?u)(?#)"
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_noncapturing(self):
pattern = r"(?:non-capturing)"
expected = [('non-capturing', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_named(self):
pattern = r"(?P<first_group_name>.*)-(?P<second_group_name>.*)"
expected = [('%(first_group_name)s-%(second_group_name)s',
['first_group_name', 'second_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_backreference(self):
pattern = r"(?P<first_group_name>.*)-(?P=first_group_name)"
expected = [('%(first_group_name)s-%(first_group_name)s',
['first_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
| bsd-3-clause |
DevinDewitt/pyqt5 | examples/webkit/fancybrowser/fancybrowser.py | 3 | 7927 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited
## Copyright (C) 2010 Hans-Peter Jansen <[email protected]>.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
###########################################################################
from PyQt5.QtCore import QFile, QIODevice, Qt, QTextStream, QUrl
from PyQt5.QtWidgets import (QAction, QApplication, QLineEdit, QMainWindow,
QSizePolicy, QStyle, QTextEdit)
from PyQt5.QtNetwork import QNetworkProxyFactory, QNetworkRequest
from PyQt5.QtWebKitWidgets import QWebPage, QWebView
import jquery_rc
class MainWindow(QMainWindow):
def __init__(self, url):
super(MainWindow, self).__init__()
self.progress = 0
fd = QFile(":/jquery.min.js")
if fd.open(QIODevice.ReadOnly | QFile.Text):
self.jQuery = QTextStream(fd).readAll()
fd.close()
else:
self.jQuery = ''
QNetworkProxyFactory.setUseSystemConfiguration(True)
self.view = QWebView(self)
self.view.load(url)
self.view.loadFinished.connect(self.adjustLocation)
self.view.titleChanged.connect(self.adjustTitle)
self.view.loadProgress.connect(self.setProgress)
self.view.loadFinished.connect(self.finishLoading)
self.locationEdit = QLineEdit(self)
self.locationEdit.setSizePolicy(QSizePolicy.Expanding,
self.locationEdit.sizePolicy().verticalPolicy())
self.locationEdit.returnPressed.connect(self.changeLocation)
toolBar = self.addToolBar("Navigation")
toolBar.addAction(self.view.pageAction(QWebPage.Back))
toolBar.addAction(self.view.pageAction(QWebPage.Forward))
toolBar.addAction(self.view.pageAction(QWebPage.Reload))
toolBar.addAction(self.view.pageAction(QWebPage.Stop))
toolBar.addWidget(self.locationEdit)
viewMenu = self.menuBar().addMenu("&View")
viewSourceAction = QAction("Page Source", self)
viewSourceAction.triggered.connect(self.viewSource)
viewMenu.addAction(viewSourceAction)
effectMenu = self.menuBar().addMenu("&Effect")
effectMenu.addAction("Highlight all links", self.highlightAllLinks)
self.rotateAction = QAction(
self.style().standardIcon(QStyle.SP_FileDialogDetailedView),
"Turn images upside down", self, checkable=True,
toggled=self.rotateImages)
effectMenu.addAction(self.rotateAction)
toolsMenu = self.menuBar().addMenu("&Tools")
toolsMenu.addAction("Remove GIF images", self.removeGifImages)
toolsMenu.addAction("Remove all inline frames",
self.removeInlineFrames)
toolsMenu.addAction("Remove all object elements",
self.removeObjectElements)
toolsMenu.addAction("Remove all embedded elements",
self.removeEmbeddedElements)
self.setCentralWidget(self.view)
def viewSource(self):
accessManager = self.view.page().networkAccessManager()
request = QNetworkRequest(self.view.url())
reply = accessManager.get(request)
reply.finished.connect(self.slotSourceDownloaded)
def slotSourceDownloaded(self):
reply = self.sender()
self.textEdit = QTextEdit()
self.textEdit.setAttribute(Qt.WA_DeleteOnClose)
self.textEdit.show()
self.textEdit.setPlainText(QTextStream(reply).readAll())
self.textEdit.resize(600, 400)
reply.deleteLater()
def adjustLocation(self):
self.locationEdit.setText(self.view.url().toString())
def changeLocation(self):
url = QUrl.fromUserInput(self.locationEdit.text())
self.view.load(url)
self.view.setFocus()
def adjustTitle(self):
if 0 < self.progress < 100:
self.setWindowTitle("%s (%s%%)" % (self.view.title(), self.progress))
else:
self.setWindowTitle(self.view.title())
def setProgress(self, p):
self.progress = p
self.adjustTitle()
def finishLoading(self):
self.progress = 100
self.adjustTitle()
self.view.page().mainFrame().evaluateJavaScript(self.jQuery)
self.rotateImages(self.rotateAction.isChecked())
def highlightAllLinks(self):
code = """$('a').each(
function () {
$(this).css('background-color', 'yellow')
}
)"""
self.view.page().mainFrame().evaluateJavaScript(code)
def rotateImages(self, invert):
if invert:
code = """
$('img').each(
function () {
$(this).css('-webkit-transition', '-webkit-transform 2s');
$(this).css('-webkit-transform', 'rotate(180deg)')
}
)"""
else:
code = """
$('img').each(
function () {
$(this).css('-webkit-transition', '-webkit-transform 2s');
$(this).css('-webkit-transform', 'rotate(0deg)')
}
)"""
self.view.page().mainFrame().evaluateJavaScript(code)
def removeGifImages(self):
code = "$('[src*=gif]').remove()"
self.view.page().mainFrame().evaluateJavaScript(code)
def removeInlineFrames(self):
code = "$('iframe').remove()"
self.view.page().mainFrame().evaluateJavaScript(code)
def removeObjectElements(self):
code = "$('object').remove()"
self.view.page().mainFrame().evaluateJavaScript(code)
def removeEmbeddedElements(self):
code = "$('embed').remove()"
self.view.page().mainFrame().evaluateJavaScript(code)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
if len(sys.argv) > 1:
url = QUrl(sys.argv[1])
else:
url = QUrl('http://www.google.com/ncr')
browser = MainWindow(url)
browser.show()
sys.exit(app.exec_())
| gpl-3.0 |
nexusriot/cinder | cinder/openstack/common/scheduler/filters/availability_zone_filter.py | 26 | 1256 | # Copyright (c) 2011-2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.openstack.common.scheduler import filters
class AvailabilityZoneFilter(filters.BaseHostFilter):
"""Filters Hosts by availability zone."""
# Availability zones do not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
spec = filter_properties.get('request_spec', {})
props = spec.get('resource_properties', {})
availability_zone = props.get('availability_zone')
if availability_zone:
return availability_zone == host_state.service['availability_zone']
return True
| apache-2.0 |
KohlsTechnology/ansible | lib/ansible/playbook/play.py | 11 | 12526 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.block import Block
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.vars.manager import preprocess_vars
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['Play']
class Play(Base, Taggable, Become):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
_hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True)
# Facts
_fact_path = FieldAttribute(isa='string', default=None)
_gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
_gather_subset = FieldAttribute(isa='barelist', default=None, always_post_validate=True)
_gather_timeout = FieldAttribute(isa='int', default=None, always_post_validate=True)
# Variable Attributes
_vars_files = FieldAttribute(isa='list', default=[], priority=99)
_vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True)
_vault_password = FieldAttribute(isa='string', always_post_validate=True)
# Role Attributes
_roles = FieldAttribute(isa='list', default=[], priority=90)
# Block (Task) Lists Attributes
_handlers = FieldAttribute(isa='list', default=[])
_pre_tasks = FieldAttribute(isa='list', default=[])
_post_tasks = FieldAttribute(isa='list', default=[])
_tasks = FieldAttribute(isa='list', default=[])
# Flag/Setting Attributes
_force_handlers = FieldAttribute(isa='bool', always_post_validate=True)
_max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
_serial = FieldAttribute(isa='list', default=[], always_post_validate=True)
_strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
_order = FieldAttribute(isa='string', always_post_validate=True)
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self._included_conditional = None
self._included_path = None
self._removed_hosts = []
self.ROLE_CACHE = {}
def __repr__(self):
return self.get_name()
def get_name(self):
''' return the name of the Play '''
return self._attributes.get('name')
@staticmethod
def load(data, variable_manager=None, loader=None, vars=None):
if ('name' not in data or data['name'] is None) and 'hosts' in data:
if isinstance(data['hosts'], list):
data['name'] = ','.join(data['hosts'])
else:
data['name'] = data['hosts']
p = Play()
if vars:
p.vars = vars.copy()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
'''
Adjusts play datastructure to cleanup old/legacy items
'''
if not isinstance(ds, dict):
raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds)))
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. "
"The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
return super(Play, self).preprocess_data(ds)
def _load_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading tasks", obj=self._ds, orig_exc=e)
def _load_pre_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e)
def _load_post_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e)
def _load_handlers(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
'''
try:
return self._extend_value(
self.handlers,
load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
prepend=True
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e)
def _load_roles(self, attr, ds):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
'''
if ds is None:
ds = []
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e)
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
return roles
def _load_vars_prompt(self, attr, ds):
new_ds = preprocess_vars(ds)
vars_prompts = []
if new_ds is not None:
for prompt_data in new_ds:
if 'name' not in prompt_data:
display.deprecated("Using the 'short form' for vars_prompt has been deprecated", version="2.7")
for vname, prompt in prompt_data.items():
vars_prompts.append(dict(
name=vname,
prompt=prompt,
default=None,
private=None,
confirm=None,
encrypt=None,
salt_size=None,
salt=None,
))
else:
vars_prompts.append(prompt_data)
return vars_prompts
def _compile_roles(self):
'''
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
'''
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.get_handler_blocks(play=self))
return block_list
def compile(self):
'''
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
'''
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block.load(
data={'meta': 'flush_handlers'},
play=self,
variable_manager=self._variable_manager,
loader=self._loader
)
block_list = []
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
if self.vars_files is None:
return []
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def serialize(self):
data = super(Play, self).serialize()
roles = []
for role in self.get_roles():
roles.append(role.serialize())
data['roles'] = roles
data['included_path'] = self._included_path
return data
def deserialize(self, data):
super(Play, self).deserialize(data)
self._included_path = data.get('included_path', None)
if 'roles' in data:
role_data = data.get('roles', [])
roles = []
for role in role_data:
r = Role()
r.deserialize(role)
roles.append(r)
setattr(self, 'roles', roles)
del data['roles']
def copy(self):
new_me = super(Play, self).copy()
new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
new_me._included_conditional = self._included_conditional
new_me._included_path = self._included_path
return new_me
| gpl-3.0 |
Plain-Andy-legacy/android_external_chromium_org | build/android/pylib/device/adb_wrapper_test.py | 36 | 2716 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the AdbWrapper class."""
import os
import tempfile
import time
import unittest
from pylib.device import adb_wrapper
from pylib.device import device_errors
class TestAdbWrapper(unittest.TestCase):
def setUp(self):
devices = adb_wrapper.AdbWrapper.GetDevices()
assert devices, 'A device must be attached'
self._adb = devices[0]
self._adb.WaitForDevice()
@staticmethod
def _MakeTempFile(contents):
"""Make a temporary file with the given contents.
Args:
contents: string to write to the temporary file.
Returns:
The absolute path to the file.
"""
fi, path = tempfile.mkstemp()
with os.fdopen(fi, 'wb') as f:
f.write(contents)
return path
def testShell(self):
output = self._adb.Shell('echo test', expect_rc=0)
self.assertEqual(output.strip(), 'test')
output = self._adb.Shell('echo test')
self.assertEqual(output.strip(), 'test')
self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Shell,
'echo test', expect_rc=1)
def testPushPull(self):
path = self._MakeTempFile('foo')
device_path = '/data/local/tmp/testfile.txt'
local_tmpdir = os.path.dirname(path)
self._adb.Push(path, device_path)
self.assertEqual(self._adb.Shell('cat %s' % device_path), 'foo')
self._adb.Pull(device_path, local_tmpdir)
with open(os.path.join(local_tmpdir, 'testfile.txt'), 'r') as f:
self.assertEqual(f.read(), 'foo')
def testInstall(self):
path = self._MakeTempFile('foo')
self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Install,
path)
def testForward(self):
self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Forward,
0, 0)
def testUninstall(self):
self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Uninstall,
'some.nonexistant.package')
def testRebootWaitForDevice(self):
self._adb.Reboot()
print 'waiting for device to reboot...'
while self._adb.GetState() == 'device':
time.sleep(1)
self._adb.WaitForDevice()
self.assertEqual(self._adb.GetState(), 'device')
print 'waiting for package manager...'
while 'package:' not in self._adb.Shell('pm path android'):
time.sleep(1)
def testRootRemount(self):
self._adb.Root()
while True:
try:
self._adb.Shell('start')
break
except device_errors.AdbCommandFailedError:
time.sleep(1)
self._adb.Remount()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
trademob/boto | boto/ec2/autoscale/request.py | 152 | 1549 | # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Request(object):
def __init__(self, connection=None):
self.connection = connection
self.request_id = ''
def __repr__(self):
return 'Request:%s' % self.request_id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'RequestId':
self.request_id = value
else:
setattr(self, name, value)
| mit |
extremewaysback/django | tests/forms_tests/tests/test_formsets.py | 128 | 57919 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.forms import (
CharField, DateField, FileField, Form, IntegerField, SplitDateTimeField,
ValidationError, formsets,
)
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.test import SimpleTestCase
from django.utils.encoding import force_text
class Choice(Form):
choice = CharField()
votes = IntegerField()
# FormSet allows us to use multiple instance of the same form on 1 page. For now,
# the best way to create a FormSet is by using the formset_factory function.
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError("Clean method called")
# Let's define a FormSet that takes a list of favorite drinks, but raises an
# error if there are any duplicates. Used in ``test_clean_hook``,
# ``test_regression_6926`` & ``test_regression_12878``.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm,
formset=BaseFavoriteDrinksFormSet, extra=3)
# Used in ``test_formset_splitdatetimefield``.
class SplitDateTimeForm(Form):
when = SplitDateTimeField(initial=datetime.datetime.now)
SplitDateTimeFormSet = formset_factory(SplitDateTimeForm)
class CustomKwargForm(Form):
def __init__(self, *args, **kwargs):
self.custom_kwarg = kwargs.pop('custom_kwarg')
super(CustomKwargForm, self).__init__(*args, **kwargs)
class FormsFormsetTestCase(SimpleTestCase):
def make_choiceformset(self, formset_data=None, formset_class=ChoiceFormSet,
total_forms=None, initial_forms=0, max_num_forms=0, min_num_forms=0, **kwargs):
"""
Make a ChoiceFormset from the given formset_data.
The data should be given as a list of (choice, votes) tuples.
"""
kwargs.setdefault('prefix', 'choices')
kwargs.setdefault('auto_id', False)
if formset_data is None:
return formset_class(**kwargs)
if total_forms is None:
total_forms = len(formset_data)
def prefixed(*args):
args = (kwargs['prefix'],) + args
return '-'.join(args)
data = {
prefixed('TOTAL_FORMS'): str(total_forms),
prefixed('INITIAL_FORMS'): str(initial_forms),
prefixed('MAX_NUM_FORMS'): str(max_num_forms),
prefixed('MIN_NUM_FORMS'): str(min_num_forms),
}
for i, (choice, votes) in enumerate(formset_data):
data[prefixed(str(i), 'choice')] = choice
data[prefixed(str(i), 'votes')] = votes
return formset_class(data, **kwargs)
def test_basic_formset(self):
# A FormSet constructor takes the same arguments as Form. Let's create a FormSet
# for adding data. By default, it displays 1 blank form. It can display more,
# but we'll look at how to do so later.
formset = self.make_choiceformset()
self.assertHTMLEqual(
str(formset),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="1000" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" /></td></tr>"""
)
# We treat FormSet pretty much like we would treat a normal Form. FormSet has an
# is_valid method, and a cleaned_data or errors attribute depending on whether all
# the forms passed validation. However, unlike a Form instance, cleaned_data and
# errors will be a list of dicts rather than just a single dict.
formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}])
# If a FormSet was not passed any data, its is_valid and has_changed
# methods should return False.
formset = self.make_choiceformset()
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
def test_form_kwargs_formset(self):
"""
Test that custom kwargs set on the formset instance are passed to the
underlying forms.
"""
FormSet = formset_factory(CustomKwargForm, extra=2)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
for form in formset:
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, 1)
def test_form_kwargs_formset_dynamic(self):
"""
Test that form kwargs can be passed dynamically in a formset.
"""
class DynamicBaseFormSet(BaseFormSet):
def get_form_kwargs(self, index):
return {'custom_kwarg': index}
DynamicFormSet = formset_factory(CustomKwargForm, formset=DynamicBaseFormSet, extra=2)
formset = DynamicFormSet(form_kwargs={'custom_kwarg': 'ignored'})
for i, form in enumerate(formset):
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, i)
def test_form_kwargs_empty_form(self):
FormSet = formset_factory(CustomKwargForm)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
self.assertTrue(hasattr(formset.empty_form, 'custom_kwarg'))
self.assertEqual(formset.empty_form.custom_kwarg, 1)
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.']}])
def test_formset_has_changed(self):
# FormSet instances has_changed method will be True if any data is
# passed to his forms, even if the formset didn't validate
blank_formset = self.make_choiceformset([('', '')])
self.assertFalse(blank_formset.has_changed())
# invalid formset test
invalid_formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(invalid_formset.is_valid())
self.assertTrue(invalid_formset.has_changed())
# valid formset test
valid_formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(valid_formset.is_valid())
self.assertTrue(valid_formset.has_changed())
def test_formset_initial_data(self):
# We can also prefill a FormSet with existing data by providing an ``initial``
# argument to the constructor. ``initial`` should be a list of dicts. By default,
# an extra blank form is included.
initial = [{'choice': 'Calexico', 'votes': 100}]
formset = self.make_choiceformset(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>"""
)
# Let's simulate what would happen if we submitted this form.
formset = self.make_choiceformset([('Calexico', '100'), ('', '')], initial_forms=1)
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}])
def test_second_form_partially_filled(self):
# But the second form was blank! Shouldn't we get some errors? No. If we display
# a form as blank, it's ok for it to be submitted as blank. If we fill out even
# one of the fields of a blank form though, it will be validated. We may want to
# required that at least x number of forms are completed, but we'll show how to
# handle that later.
formset = self.make_choiceformset([('Calexico', '100'), ('The Decemberists', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}])
def test_delete_prefilled_data(self):
# If we delete data that was pre-filled, we should get an error. Simply removing
# data from form fields isn't the proper way to delete it. We'll see how to
# handle that case later.
formset = self.make_choiceformset([('', ''), ('', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[{'votes': ['This field is required.'], 'choice': ['This field is required.']}, {}]
)
def test_displaying_more_than_one_blank_form(self):
# Displaying more than 1 blank form ###########################################
# We can also display more than 1 empty form at a time. To do so, pass a
# extra argument to formset_factory.
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>"""
)
# Since we displayed every form as blank, we will also accept them back as blank.
# This may seem a little strange, but later we will show how to require a minimum
# number of forms to be completed.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_min_num_displaying_more_than_one_blank_form(self):
# We can also display more than 1 empty form passing min_num argument
# to formset_factory. It will (essentially) increment the extra argument
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=1)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
# Min_num forms are required; extra forms can be empty.
self.assertFalse(formset.forms[0].empty_permitted)
self.assertTrue(formset.forms[1].empty_permitted)
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>"""
)
def test_min_num_displaying_more_than_one_blank_form_with_zero_extra(self):
# We can also display more than 1 empty form passing min_num argument
ChoiceFormSet = formset_factory(Choice, extra=0, min_num=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="number" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>"""
)
def test_single_form_completed(self):
# We can just fill out one of the forms.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}, {}])
def test_formset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
def test_formset_validate_min_flag(self):
# If validate_min is set and min_num is more than TOTAL_FORMS in the
# data, then throw an exception. MIN_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 3 or more forms.'])
def test_second_form_partially_filled_2(self):
# And once again, if we try to partially complete a form, validation will fail.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}, {}])
def test_more_initial_data(self):
# The extra argument also works when the formset is pre-filled with initial
# data.
initial = [{'choice': 'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="number" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>"""
)
# Make sure retrieving an empty form works, and it shows up in the form list
self.assertTrue(formset.empty_form.empty_permitted)
self.assertHTMLEqual(
formset.empty_form.as_ul(),
"""<li>Choice: <input type="text" name="choices-__prefix__-choice" /></li>
<li>Votes: <input type="number" name="choices-__prefix__-votes" /></li>"""
)
def test_formset_with_deletion(self):
# FormSets with deletion ######################################################
# We can easily add deletion ability to a FormSet with an argument to
# formset_factory. This will add a boolean field to each form instance. When
# that boolean field is True, the form will be in formset.deleted_forms
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>"""
)
# To delete something, we just need to set that form's special delete field to
# 'on'. Let's go ahead and delete Fergie.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[
{'votes': 100, 'DELETE': False, 'choice': 'Calexico'},
{'votes': 900, 'DELETE': True, 'choice': 'Fergie'},
{},
]
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'choice': 'Fergie'}]
)
# If we fill a form with something and then we check the can_delete checkbox for
# that form, that form's errors should not make the entire formset invalid since
# it's going to be deleted.
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3', # the number of forms rendered
'check-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'check-MAX_NUM_FORMS': '0', # max number of forms
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
# If we remove the deletion flag now we will have our validation back.
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
# Should be able to get deleted_forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(
form=Person,
can_delete=True)
p = PeopleForm(
{'form-0-name': '', 'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0, 'form-MAX_NUM_FORMS': 1})
self.assertTrue(p.is_valid())
self.assertEqual(len(p.deleted_forms), 1)
def test_formsets_with_ordering(self):
# FormSets with ordering ######################################################
# We can also add ordering ability to a FormSet with an argument to
# formset_factory. This will add an integer field to each form instance. When
# form validation succeeds, [form.cleaned_data for form in formset.forms] will have the data in the correct
# order specified by the ordering fields. If a number is duplicated in the set
# of ordering fields, for instance form 0 and form 3 are both marked as 1, then
# the form index used as a secondary ordering criteria. In order to put
# something at the front of the list, you'd need to set it's order to 0.
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="number" name="choices-2-votes" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" /></li>"""
)
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
])
def test_empty_ordered_fields(self):
# Ordering fields are allowed to be left blank, and if they *are* left blank,
# they will be sorted below everything else.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': 'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': 'Basia Bulat'},
])
def test_ordering_blank_fieldsets(self):
# Ordering should work with blank fieldsets.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [])
def test_formset_with_ordering_and_deletion(self):
# FormSets with ordering + deletion ###########################################
# Let's try throwing ordering and deletion into the same form.
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{'choice': 'Calexico', 'votes': 100},
{'choice': 'Fergie', 'votes': 900},
{'choice': 'The Decemberists', 'votes': 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertHTMLEqual(
'\n'.join(form_output),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists" /></li>
<li>Votes: <input type="number" name="choices-2-votes" value="500" /></li>
<li>Order: <input type="number" name="choices-2-ORDER" value="3" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="number" name="choices-3-votes" /></li>
<li>Order: <input type="number" name="choices-3-ORDER" /></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE" /></li>"""
)
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': 'Calexico'},
])
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': 'Fergie'}]
)
def test_invalid_deleted_form_with_ordering(self):
# Should be able to get ordered forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(form=Person, can_delete=True, can_order=True)
p = PeopleForm({
'form-0-name': '',
'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(p.is_valid())
self.assertEqual(p.ordered_forms, [])
def test_clean_hook(self):
# FormSet clean hook ##########################################################
# FormSets have a hook for doing extra validation that shouldn't be tied to any
# particular form. It follows the same pattern as the clean hook on Forms.
# We start out with a some duplicate data.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), 'You may only specify a drink once.')
# Make sure we didn't break the valid case.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Bloody Mary',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
# Limiting the maximum number of forms ########################################
# Base case for max_num.
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input type="text" name="form-2-name" id="id_form-2-name" /></td></tr>"""
)
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th><td>
<input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
# Ensure that max_num has no effect when extra is less than max_num.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>"""
)
def test_max_num_with_initial_data(self):
# max_num with initial data
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the initial and extra
# parameters.
initial = [
{'name': 'Fernet and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
def test_max_num_zero(self):
# If max_num is 0 then no form is rendered at all, regardless of extra,
# unless initial data is present. (This changed in the patch for bug
# 20084 -- previously max_num=0 trumped initial data)
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
# test that initial trumps max_num
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Fernet and Coke" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>"""
)
def test_more_initial_than_max_num(self):
# More initial forms than max_num now results in all initial forms
# being displayed (but no extra forms). This behavior was changed
# from max_num taking precedence in the patch for #20084
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input id="id_form-2-name" name="form-2-name" type="text" value="Jack and Coke" /></td></tr>"""
)
# One form from initial and extra=3 with max_num=2 should result in the one
# initial form and one extra.
initial = [
{'name': 'Gin Tonic'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertHTMLEqual(
'\n'.join(form_output),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>"""
)
def test_regression_6926(self):
# Regression test for #6926 ##################################################
# Make sure the management form has the correct prefix.
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, 'form')
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, 'form')
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, 'form')
def test_regression_12878(self):
# Regression test for #12878 #################################################
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['You may only specify a drink once.'])
def test_formset_iteration(self):
# Regression tests for #16455 -- formset instances are iterable
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# confirm iterated formset yields formset.forms
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# confirm indexing of formset
self.assertEqual(formset[0], forms[0])
try:
formset[3]
self.fail('Requesting an invalid formset index should raise an exception')
except IndexError:
pass
# Formsets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
return reversed(self.forms)
def __getitem__(self, idx):
return super(BaseReverseFormSet, self).__getitem__(len(self) - idx - 1)
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# confirm that __iter__ modifies rendering order
# compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
def test_formset_nonzero(self):
"""
Formsets with no forms should still evaluate as true.
Regression test for #15722
"""
ChoiceFormset = formset_factory(Choice, extra=0)
formset = ChoiceFormset()
self.assertEqual(len(formset.forms), 0)
self.assertTrue(formset)
def test_formset_splitdatetimefield(self):
"""
Formset should also work with SplitDateTimeField(initial=datetime.datetime.now).
Regression test for #18709.
"""
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-when_0': '1904-06-16',
'form-0-when_1': '15:51:33',
}
formset = SplitDateTimeFormSet(data)
self.assertTrue(formset.is_valid())
def test_formset_error_class(self):
# Regression tests for #16479 -- formsets form use ErrorList instead of supplied error_class
class CustomErrorList(ErrorList):
pass
formset = FavoriteDrinksFormSet(error_class=CustomErrorList)
self.assertEqual(formset.forms[0].error_class, CustomErrorList)
def test_formset_calls_forms_is_valid(self):
# Regression tests for #18574 -- make sure formsets call
# is_valid() on each form.
class AnotherChoice(Choice):
def is_valid(self):
self.is_valid_called = True
return super(AnotherChoice, self).is_valid()
AnotherChoiceFormSet = formset_factory(AnotherChoice)
data = {
'choices-TOTAL_FORMS': '1', # number of forms rendered
'choices-INITIAL_FORMS': '0', # number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
formset = AnotherChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertTrue(all(form.is_valid_called for form in formset.forms))
def test_hard_limit_on_instantiated_forms(self):
"""A formset has a hard limit on the number of forms instantiated."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 2
ChoiceFormSet = formset_factory(Choice, max_num=1)
# someone fiddles with the mgmt form data...
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# But we still only instantiate 3 forms
self.assertEqual(len(formset.forms), 3)
# and the formset isn't valid
self.assertFalse(formset.is_valid())
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_increase_hard_limit(self):
"""Can increase the built-in forms limit via a higher max_num."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 3
# for this form, we want a limit of 4
ChoiceFormSet = formset_factory(Choice, max_num=4)
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# Four forms are instantiated and no exception is raised
self.assertEqual(len(formset.forms), 4)
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_non_form_errors_run_full_clean(self):
# Regression test for #11160
# If non_form_errors() is called without calling is_valid() first,
# it should ensure that full_clean() is called.
class BaseCustomFormSet(BaseFormSet):
def clean(self):
raise ValidationError("This is a non-form error")
ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIsInstance(formset.non_form_errors(), ErrorList)
self.assertEqual(list(formset.non_form_errors()),
['This is a non-form error'])
def test_validate_max_ignores_forms_marked_for_deletion(self):
class CheckForm(Form):
field = IntegerField()
data = {
'check-TOTAL_FORMS': '2',
'check-INITIAL_FORMS': '0',
'check-MAX_NUM_FORMS': '1',
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
}
CheckFormSet = formset_factory(CheckForm, max_num=1, validate_max=True,
can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
def test_formset_total_error_count(self):
"""A valid formset should have 0 total errors."""
data = [ # formset_data, expected error count
([('Calexico', '100')], 0),
([('Calexico', '')], 1),
([('', 'invalid')], 2),
([('Calexico', '100'), ('Calexico', '')], 1),
([('Calexico', ''), ('Calexico', '')], 2),
]
for formset_data, expected_error_count in data:
formset = self.make_choiceformset(formset_data)
self.assertEqual(formset.total_error_count(), expected_error_count)
def test_formset_total_error_count_with_non_form_errors(self):
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 1)
data['choices-1-votes'] = ''
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 2)
def test_html_safe(self):
formset = self.make_choiceformset()
self.assertTrue(hasattr(formset, '__html__'))
self.assertEqual(force_text(formset), formset.__html__())
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class FormsetAsFooTests(SimpleTestCase):
def test_as_table(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_table(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" value="Calexico" /></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes" value="100" /></td></tr>"""
)
def test_as_p(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_p(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<p>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></p>
<p>Votes: <input type="number" name="choices-0-votes" value="100" /></p>"""
)
def test_as_ul(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertHTMLEqual(
formset.as_ul(),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" />
<input type="hidden" name="choices-INITIAL_FORMS" value="0" />
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0" />
<input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100" /></li>"""
)
# Regression test for #11418 #################################################
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(SimpleTestCase):
def test_no_data_raises_validation_error(self):
with self.assertRaises(ValidationError):
ArticleFormSet({}).is_valid()
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
'form-1-title': 'Test',
'form-1-pub_date': '', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': ['This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = []
empty_forms.append(unbound_formset.empty_form)
empty_forms.append(bound_formset.empty_form)
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(SimpleTestCase):
def test_empty_formset_is_valid(self):
"""Test that an empty formset still calls clean()"""
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '0'},
prefix="form",
)
formset2 = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '1', 'form-0-name': 'bah'},
prefix="form",
)
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
def test_empty_formset_media(self):
"""Make sure media is available on empty formset, refs #19545"""
class MediaForm(Form):
class Media:
js = ('some-file.js',)
self.assertIn('some-file.js', str(formset_factory(MediaForm, extra=0)().media))
def test_empty_formset_is_multipart(self):
"""Make sure `is_multipart()` works with empty formset, refs #19545"""
class FileForm(Form):
file = FileField()
self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
| bsd-3-clause |
mcepl/youtube-dl | test/test_iqiyi_sdk_interpreter.py | 16 | 1103 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.extractor import IqiyiIE
class IqiyiIEWithCredentials(IqiyiIE):
def _get_login_info(self):
return 'foo', 'bar'
class WarningLogger(object):
def __init__(self):
self.messages = []
def warning(self, msg):
self.messages.append(msg)
def debug(self, msg):
pass
def error(self, msg):
pass
class TestIqiyiSDKInterpreter(unittest.TestCase):
def test_iqiyi_sdk_interpreter(self):
'''
Test the functionality of IqiyiSDKInterpreter by trying to log in
If `sign` is incorrect, /validate call throws an HTTP 556 error
'''
logger = WarningLogger()
ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger}))
ie._login()
self.assertTrue('unable to log in:' in logger.messages[0])
if __name__ == '__main__':
unittest.main()
| unlicense |
seiji56/rmaze-2016 | logic_code/last_ver/phys/movement.py | 1 | 6199 | import herkulex
import time
import thread
import RPi.GPIO as gpio
import sensory as sn
herkulex.connect("/dev/ttyAMA0", 115200)
FR = herkulex.servo(0xfd)
FL = herkulex.servo(16)
BR = herkulex.servo(10)
BL = herkulex.servo(20)
DROP = herkulex.servo(50)
ALL = herkulex.servo(0xfe)
ALL.torque_on()
def align(tout = 1, dst_sth = 400):
start = time.time()
while time.time() - start < tout:
vpow = sn.vertcorr(dst_sth)
setPow([-vpow, vpow, -vpow, vpow], sn.latcorr(dst_sth),
sn.angcorr(dst_sth))
stop()
def stop():
ALL.set_servo_speed(1, 0x06)
ALL.set_led(0x06)
def setPow(pots, latcorr = 0, angcorr = 0):
if pots[0] + latcorr + angcorr != 0:
FR.set_servo_speed(pots[0] + latcorr + angcorr, 0x06)
if pots[1] + latcorr + angcorr != 0:
FL.set_servo_speed(pots[1] + latcorr + angcorr, 0x06)
if pots[2] - latcorr + angcorr != 0:
BR.set_servo_speed(pots[2] - latcorr + angcorr, 0x06)
if pots[3] - latcorr + angcorr != 0:
BL.set_servo_speed(pots[3] - latcorr + angcorr, 0x06)
ALL.set_led(0x06)
hasvictim = -1
readmlx = True
def mlxvchk():
global hasvictim
while readmlx:
vic = sn.hasvictim()
if vic >= 0:
hasvictim = vic
walkpow = 1000
walktime = 1.54
sensordfront = 0
sensordback = 0
walkcalib = -50
expft = .5
def walkf(move, dst_sth = 400, col_sth = 500, scstop = False, scuse = False,
old = True, corr = False):
global readmlx
global hasvictim
start = time.time()
basepow = [-walkpow, walkpow + walkcalib,
-walkpow, walkpow + walkcalib]
mlxthread = 0
if move[1] == 1:
readmlx = True
hasvictim = -1
mlxthread = start_new_thread(mlxvchk, ())
if sn.shouldAlign(dst_sth):
align(.5)
if corr:
setPow(basepow, sn.latcorr(dst_sth), sn.angcorr(dst_sth))
setPow(basepow, 0, 0)
if not old:
if scstop and scuse:
wallstate = [sn.wl(dst_sth), sn.wr(dst_sth)] #l, r
tmpws = [sn.wl(dst_sth), sn.wr(dst_sth)]
t_curr = 0
while t_curr < move[1] and sn.wl(dst_sth) == wallstate[0] and not sn.wf(
dst_sth) and sn.wl(dst_sth) == wallstate[1] and not sn.color(col_sth
):
t_start = time.time()
while time.time() - t_start < walktime:
if corr:
setPow(basepow, sn.latcorr(dst_sth), sn.angcorr(dst_sth))
setPow(basepow, 0, 0)
sm = 0
cnt = 0
if sn.wlf() != tmpws[0]:
sm += time.time() - expft
tmpws[0] = sn.wlf()
if sn.wrf() != tmpws[1]:
sm += time.time() - expft
tmpws[1] = sn.wrf()
if cnt > 0:
t_start = sm/cnt
t_curr += 1
elif scstop and not scuse:
wallstate = [sn.wl(dst_sth), sn.wr(dst_sth)] #l, r
t_curr = 0
while t_curr < move[1] and sn.wl(dst_sth) == wallstate[0] and not sn.wf(
dst_sth) and sn.wl(dst_sth) == wallstate[1] and not sn.color(col_sth
):
t_start = time.time()
while time.time() - t_start < walktime:
if corr:
setPow(basepow, sn.latcorr(dst_sth), sn.angcorr(dst_sth))
setPow(basepow, 0, 0)
t_curr += 1
elif not scstop and scuse:
tmpws = [sn.wl(dst_sth), sn.wr(dst_sth)]
t_curr = 0
while t_curr < move[1] and not sn.wf(dst_sth) and not sn.color(col_sth):
t_start = time.time()
while time.time() - t_start < walktime:
if corr:
setPow(basepow, sn.latcorr(dst_sth), sn.angcorr(dst_sth))
setPow(basepow, 0, 0)
sm = 0
cnt = 0
if sn.wlf() != tmpws[0]:
sm += time.time() - expft
tmpws[0] = sn.wlf()
if sn.wrf() != tmpws[1]:
sm += time.time() - expft
tmpws[1] = sn.wrf()
if cnt > 0:
t_start = sm/cnt
t_curr += 1
else:
t_curr = 0
while t_curr < move[1] and not sn.wf(dst_sth) and not sn.color(col_sth):
t_start = time.time()
while time.time() - t_start < walktime:
if corr:
setPow(basepow, sn.latcorr(dst_sth), sn.angcorr())
setPow(basepow, 0, 0)
t_curr += 1
else:
time.sleep(walktime*move[1])
stop()
readmlx = False
if hasvictim >= 0:
act = drop(hasvictim)
move = [act, move]
return move
rotpow = 1000
rottime = 1 # per 90 dg
def turnr(move, dst_sth = 400):
basepow = [rotpow for i in range(4)]
setPow(basepow, 0, 0)
time.sleep(rottime*move[1])
stop()
return move
def turnl(move, dst_sth = 400):
basepow = [-rotpow for i in range(4)]
setPow(basepow, 0, 0)
time.sleep(rottime*move[1])
stop()
return move
def upramp():
basepow = [-walkpow, walkpow + walkcalib,
-walkpow, walkpow + walkcalib]
while sn.isramp():
setPow(basepow, sn.latcorr(dst_sth), sn.angcorr())
walkf((0, .3))
def downramp():
upramp()
gpio.setmode(gpio.BCM)
gpio.setup(20, gpio.OUT)
gpio.output(20, gpio.LOW)
def drop(side):
ret = None
if side == 0:
ret = (1, 2)
elif side == 1:
ret = (1, 1)
elif side == 3:
ret = (3, 1)
for i in range(5):
gpio.output(20, gpio.HIGH)
time.sleep(.5)
gpio.output(20, gpio.LOW)
time.sleep(.5)
apply(ret)
DROP.set_servo_angle(0, 1, 0x08)
time.sleep(1)
DROP.set_servo_angle(-95, 1, 0x08)
time.sleep(1)
DROP.set_servo_speed(1, 0x06)
ALL.set_led(0x06)
return ret
| gpl-3.0 |
inspyration/odoo | addons/event/__openerp__.py | 5 | 1447 | # -*- coding: utf-8 -*-
{
'name': 'Events Organisation',
'version': '0.1',
'website': 'https://www.odoo.com/page/events',
'category': 'Tools',
'summary': 'Trainings, Conferences, Meetings, Exhibitions, Registrations',
'description': """
Organization and management of Events.
======================================
The event module allows you to efficiently organise events and all related tasks: planification, registration tracking,
attendances, etc.
Key Features
------------
* Manage your Events and Registrations
* Use emails to automatically confirm and send acknowledgements for any event registration
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'board', 'email_template', 'marketing'],
'data': [
'security/event_security.xml',
'security/ir.model.access.csv',
'wizard/event_confirm_view.xml',
'report/report_event_registration_view.xml',
'event_view.xml',
'event_data.xml',
'res_config_view.xml',
'email_template.xml',
'views/event.xml',
'event_report.xml',
'views/report_registrationbadge.xml',
],
'demo': [
'event_demo.xml',
],
'installable': True,
'auto_install': False,
'images': ['images/1_event_type_list.jpeg', 'images/2_events.jpeg', 'images/3_registrations.jpeg', 'images/events_kanban.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
buckket/twtxt | twtxt/helper.py | 1 | 5887 | """
twtxt.helper
~~~~~~~~~~~~
This module implements various helper for use in twtxt.
:copyright: (c) 2016-2017 by buckket.
:license: MIT, see LICENSE for more details.
"""
import shlex
import subprocess
import sys
import textwrap
import click
import pkg_resources
from twtxt.mentions import format_mentions
from twtxt.parser import parse_iso8601
def style_timeline(tweets, porcelain=False):
if porcelain:
return "\n".join(style_tweet(tweet, porcelain) for tweet in tweets)
else:
return "\n{0}\n".format("\n\n".join(filter(None, (style_tweet(tweet, porcelain) for tweet in tweets))))
def style_tweet(tweet, porcelain=False):
conf = click.get_current_context().obj["conf"]
limit = conf.character_limit
if porcelain:
return "{nick}\t{url}\t{tweet}".format(
nick=tweet.source.nick,
url=tweet.source.url,
tweet=str(tweet))
else:
if sys.stdout.isatty() and not tweet.text.isprintable():
return None
styled_text = format_mentions(tweet.text)
len_styling = len(styled_text) - len(click.unstyle(styled_text))
final_text = textwrap.shorten(styled_text, limit + len_styling) if limit else styled_text
timestamp = tweet.absolute_datetime if conf.use_abs_time else tweet.relative_datetime
return "➤ {nick} ({time}):\n{tweet}".format(
nick=click.style(tweet.source.nick, bold=True),
tweet=final_text,
time=click.style(timestamp, dim=True))
def style_source(source, porcelain=False):
if porcelain:
return "{nick}\t{url}".format(
nick=source.nick,
url=source.url)
else:
return "➤ {nick} @ {url}".format(
nick=click.style(source.nick, bold=True),
url=source.url)
def style_source_with_status(source, status, porcelain=False):
if porcelain:
return "{nick}\t{url}\t{status}\t{content_length}\t{last_modified}".format(
nick=source.nick,
url=source.url,
status=status.status_code,
content_length=status.content_length,
last_modified=status.last_modified)
else:
if status.status_code == 200:
scolor, smessage = "green", str(status.status_code)
elif status:
scolor, smessage = "red", str(status.status_code)
else:
scolor, smessage = "red", "ERROR"
return "➤ {nick} @ {url} [{content_length}, {last_modified}] ({status})".format(
nick=click.style(source.nick, bold=True, fg=scolor),
url=source.url,
status=click.style(smessage, fg=scolor),
content_length=status.natural_content_length,
last_modified=status.natural_last_modified)
def validate_created_at(ctx, param, value):
if value:
try:
return parse_iso8601(value)
except (ValueError, OverflowError) as e:
raise click.BadParameter("{0}.".format(e))
def validate_text(ctx, param, value):
conf = click.get_current_context().obj["conf"]
if isinstance(value, tuple):
value = " ".join(value)
if not value and not sys.stdin.isatty():
value = click.get_text_stream("stdin").read()
if value:
value = value.strip()
if conf.character_warning and len(value) > conf.character_warning:
click.confirm("✂ Warning: Tweet is longer than {0} characters. Are you sure?".format(
conf.character_warning), abort=True)
return value
else:
raise click.BadArgumentUsage("Text can’t be empty.")
def validate_config_key(ctx, param, value):
"""Validate a configuration key according to `section.item`."""
if not value:
return value
try:
section, item = value.split(".", 1)
except ValueError:
raise click.BadArgumentUsage("Given key does not contain a section name.")
else:
return section, item
def run_pre_tweet_hook(hook, options):
try:
command = shlex.split(hook.format(**options))
except KeyError:
click.echo("✗ Invalid variables in pre_tweet_hook.")
raise click.Abort
try:
subprocess.check_output(command, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
click.echo("✗ pre_tweet_hook returned {}.".format(e.returncode))
if e.output:
click.echo(e.output)
raise click.Abort
def run_post_tweet_hook(hook, options):
try:
command = shlex.split(hook.format(**options))
except KeyError:
click.echo("✗ Invalid variables in post_tweet_hook.")
return
try:
subprocess.check_output(command, shell=True, universal_newlines=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
click.echo("✗ post_tweet_hook returned {}.".format(e.returncode))
if e.output:
click.echo(e.output)
def sort_and_truncate_tweets(tweets, direction, limit):
if direction == "descending":
return sorted(tweets, reverse=True)[:limit]
elif direction == "ascending":
if limit < len(tweets):
return sorted(tweets)[len(tweets) - limit:]
else:
return sorted(tweets)
else:
return []
def generate_user_agent():
try:
version = pkg_resources.require("twtxt")[0].version
except pkg_resources.DistributionNotFound:
version = "unknown"
conf = click.get_current_context().obj["conf"]
if conf.disclose_identity and conf.nick and conf.twturl:
user_agent = "twtxt/{version} (+{url}; @{nick})".format(
version=version, url=conf.twturl, nick=conf.nick)
else:
user_agent = "twtxt/{version}".format(version=version)
return {"User-Agent": user_agent}
| mit |
ghsr/android_kernel_samsung_i9152 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
fabian4/trove | trove/limits/views.py | 7 | 1896 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_utils import timeutils
class LimitView(object):
def __init__(self, rate_limit):
self.rate_limit = rate_limit
def data(self):
get_utc = datetime.datetime.utcfromtimestamp
next_avail = get_utc(self.rate_limit.get("resetTime", 0))
return {"limit": {
"nextAvailable": timeutils.isotime(at=next_avail),
"remaining": self.rate_limit.get("remaining", 0),
"unit": self.rate_limit.get("unit", ""),
"value": self.rate_limit.get("value", ""),
"verb": self.rate_limit.get("verb", ""),
"uri": self.rate_limit.get("URI", ""),
"regex": self.rate_limit.get("regex", "")
}
}
class LimitViews(object):
def __init__(self, abs_limits, rate_limits):
self.abs_limits = abs_limits
self.rate_limits = rate_limits
def data(self):
data = []
abs_view = dict()
abs_view["verb"] = "ABSOLUTE"
for resource_name, abs_limit in self.abs_limits.items():
abs_view["max_" + resource_name] = abs_limit
data.append(abs_view)
for l in self.rate_limits:
data.append(LimitView(l).data()["limit"])
return {"limits": data}
| apache-2.0 |
berendkleinhaneveld/VTK | ThirdParty/Twisted/twisted/web/test/test_wsgi.py | 33 | 54989 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.wsgi}.
"""
__metaclass__ = type
from sys import exc_info
from urllib import quote
from thread import get_ident
import StringIO, cStringIO, tempfile
from zope.interface.verify import verifyObject
from twisted.python.log import addObserver, removeObserver, err
from twisted.python.failure import Failure
from twisted.python.threadpool import ThreadPool
from twisted.internet.defer import Deferred, gatherResults
from twisted.internet import reactor
from twisted.internet.error import ConnectionLost
from twisted.trial.unittest import TestCase
from twisted.web import http
from twisted.web.resource import IResource, Resource
from twisted.web.server import Request, Site, version
from twisted.web.wsgi import WSGIResource
from twisted.web.test.test_web import DummyChannel
class SynchronousThreadPool:
"""
A single-threaded implementation of part of the L{ThreadPool} interface.
This implementation calls functions synchronously rather than running
them in a thread pool. It is used to make the tests which are not
directly for thread-related behavior deterministic.
"""
def callInThread(self, f, *a, **kw):
"""
Call C{f(*a, **kw)} in this thread rather than scheduling it to be
called in a thread.
"""
try:
f(*a, **kw)
except:
# callInThread doesn't let exceptions propagate to the caller.
# None is always returned and any exception raised gets logged
# later on.
err(None, "Callable passed to SynchronousThreadPool.callInThread failed")
class SynchronousReactorThreads:
"""
A single-threaded implementation of part of the L{IReactorThreads}
interface. This implementation assumes that it will only be invoked
from the reactor thread, so it calls functions synchronously rather than
trying to schedule them to run in the reactor thread. It is used in
conjunction with L{SynchronousThreadPool} to make the tests which are
not directly for thread-related behavior deterministic.
"""
def callFromThread(self, f, *a, **kw):
"""
Call C{f(*a, **kw)} in this thread which should also be the reactor
thread.
"""
f(*a, **kw)
class WSGIResourceTests(TestCase):
def setUp(self):
"""
Create a L{WSGIResource} with synchronous threading objects and a no-op
application object. This is useful for testing certain things about
the resource implementation which are unrelated to WSGI.
"""
self.resource = WSGIResource(
SynchronousReactorThreads(), SynchronousThreadPool(),
lambda environ, startResponse: None)
def test_interfaces(self):
"""
L{WSGIResource} implements L{IResource} and stops resource traversal.
"""
verifyObject(IResource, self.resource)
self.assertTrue(self.resource.isLeaf)
def test_unsupported(self):
"""
A L{WSGIResource} cannot have L{IResource} children. Its
C{getChildWithDefault} and C{putChild} methods raise L{RuntimeError}.
"""
self.assertRaises(
RuntimeError,
self.resource.getChildWithDefault,
"foo", Request(DummyChannel(), False))
self.assertRaises(
RuntimeError,
self.resource.putChild,
"foo", Resource())
class WSGITestsMixin:
"""
@ivar channelFactory: A no-argument callable which will be invoked to
create a new HTTP channel to associate with request objects.
"""
channelFactory = DummyChannel
def setUp(self):
self.threadpool = SynchronousThreadPool()
self.reactor = SynchronousReactorThreads()
def lowLevelRender(
self, requestFactory, applicationFactory, channelFactory, method,
version, resourceSegments, requestSegments, query=None, headers=[],
body=None, safe=''):
"""
@param method: A C{str} giving the request method to use.
@param version: A C{str} like C{'1.1'} giving the request version.
@param resourceSegments: A C{list} of unencoded path segments which
specifies the location in the resource hierarchy at which the
L{WSGIResource} will be placed, eg C{['']} for I{/}, C{['foo',
'bar', '']} for I{/foo/bar/}, etc.
@param requestSegments: A C{list} of unencoded path segments giving the
request URI.
@param query: A C{list} of two-tuples of C{str} giving unencoded query
argument keys and values.
@param headers: A C{list} of two-tuples of C{str} giving request header
names and corresponding values.
@param safe: A C{str} giving the bytes which are to be considered
I{safe} for inclusion in the request URI and not quoted.
@return: A L{Deferred} which will be called back with a two-tuple of
the arguments passed which would be passed to the WSGI application
object for this configuration and request (ie, the environment and
start_response callable).
"""
root = WSGIResource(
self.reactor, self.threadpool, applicationFactory())
resourceSegments.reverse()
for seg in resourceSegments:
tmp = Resource()
tmp.putChild(seg, root)
root = tmp
channel = channelFactory()
channel.site = Site(root)
request = requestFactory(channel, False)
for k, v in headers:
request.requestHeaders.addRawHeader(k, v)
request.gotLength(0)
if body:
request.content.write(body)
request.content.seek(0)
uri = '/' + '/'.join([quote(seg, safe) for seg in requestSegments])
if query is not None:
uri += '?' + '&'.join(['='.join([quote(k, safe), quote(v, safe)])
for (k, v) in query])
request.requestReceived(method, uri, 'HTTP/' + version)
return request
def render(self, *a, **kw):
result = Deferred()
def applicationFactory():
def application(*args):
environ, startResponse = args
result.callback(args)
startResponse('200 OK', [])
return iter(())
return application
self.lowLevelRender(
Request, applicationFactory, self.channelFactory, *a, **kw)
return result
def requestFactoryFactory(self, requestClass=Request):
d = Deferred()
def requestFactory(*a, **kw):
request = requestClass(*a, **kw)
# If notifyFinish is called after lowLevelRender returns, it won't
# do the right thing, because the request will have already
# finished. One might argue that this is a bug in
# Request.notifyFinish.
request.notifyFinish().chainDeferred(d)
return request
return d, requestFactory
def getContentFromResponse(self, response):
return response.split('\r\n\r\n', 1)[1]
class EnvironTests(WSGITestsMixin, TestCase):
"""
Tests for the values in the C{environ} C{dict} passed to the application
object by L{twisted.web.wsgi.WSGIResource}.
"""
def environKeyEqual(self, key, value):
def assertEnvironKeyEqual((environ, startResponse)):
self.assertEqual(environ[key], value)
return assertEnvironKeyEqual
def test_environIsDict(self):
"""
L{WSGIResource} calls the application object with an C{environ}
parameter which is exactly of type C{dict}.
"""
d = self.render('GET', '1.1', [], [''])
def cbRendered((environ, startResponse)):
self.assertIdentical(type(environ), dict)
d.addCallback(cbRendered)
return d
def test_requestMethod(self):
"""
The C{'REQUEST_METHOD'} key of the C{environ} C{dict} passed to the
application contains the HTTP method in the request (RFC 3875, section
4.1.12).
"""
get = self.render('GET', '1.1', [], [''])
get.addCallback(self.environKeyEqual('REQUEST_METHOD', 'GET'))
# Also make sure a different request method shows up as a different
# value in the environ dict.
post = self.render('POST', '1.1', [], [''])
post.addCallback(self.environKeyEqual('REQUEST_METHOD', 'POST'))
return gatherResults([get, post])
def test_scriptName(self):
"""
The C{'SCRIPT_NAME'} key of the C{environ} C{dict} passed to the
application contains the I{abs_path} (RFC 2396, section 3) to this
resource (RFC 3875, section 4.1.13).
"""
root = self.render('GET', '1.1', [], [''])
root.addCallback(self.environKeyEqual('SCRIPT_NAME', ''))
emptyChild = self.render('GET', '1.1', [''], [''])
emptyChild.addCallback(self.environKeyEqual('SCRIPT_NAME', '/'))
leaf = self.render('GET', '1.1', ['foo'], ['foo'])
leaf.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo'))
container = self.render('GET', '1.1', ['foo', ''], ['foo', ''])
container.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo/'))
internal = self.render('GET', '1.1', ['foo'], ['foo', 'bar'])
internal.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo'))
unencoded = self.render(
'GET', '1.1', ['foo', '/', 'bar\xff'], ['foo', '/', 'bar\xff'])
# The RFC says "(not URL-encoded)", even though that makes
# interpretation of SCRIPT_NAME ambiguous.
unencoded.addCallback(
self.environKeyEqual('SCRIPT_NAME', '/foo///bar\xff'))
return gatherResults([
root, emptyChild, leaf, container, internal, unencoded])
def test_pathInfo(self):
"""
The C{'PATH_INFO'} key of the C{environ} C{dict} passed to the
application contains the suffix of the request URI path which is not
included in the value for the C{'SCRIPT_NAME'} key (RFC 3875, section
4.1.5).
"""
assertKeyEmpty = self.environKeyEqual('PATH_INFO', '')
root = self.render('GET', '1.1', [], [''])
root.addCallback(self.environKeyEqual('PATH_INFO', '/'))
emptyChild = self.render('GET', '1.1', [''], [''])
emptyChild.addCallback(assertKeyEmpty)
leaf = self.render('GET', '1.1', ['foo'], ['foo'])
leaf.addCallback(assertKeyEmpty)
container = self.render('GET', '1.1', ['foo', ''], ['foo', ''])
container.addCallback(assertKeyEmpty)
internalLeaf = self.render('GET', '1.1', ['foo'], ['foo', 'bar'])
internalLeaf.addCallback(self.environKeyEqual('PATH_INFO', '/bar'))
internalContainer = self.render('GET', '1.1', ['foo'], ['foo', ''])
internalContainer.addCallback(self.environKeyEqual('PATH_INFO', '/'))
unencoded = self.render('GET', '1.1', [], ['foo', '/', 'bar\xff'])
unencoded.addCallback(
self.environKeyEqual('PATH_INFO', '/foo///bar\xff'))
return gatherResults([
root, leaf, container, internalLeaf,
internalContainer, unencoded])
def test_queryString(self):
"""
The C{'QUERY_STRING'} key of the C{environ} C{dict} passed to the
application contains the portion of the request URI after the first
I{?} (RFC 3875, section 4.1.7).
"""
missing = self.render('GET', '1.1', [], [''], None)
missing.addCallback(self.environKeyEqual('QUERY_STRING', ''))
empty = self.render('GET', '1.1', [], [''], [])
empty.addCallback(self.environKeyEqual('QUERY_STRING', ''))
present = self.render('GET', '1.1', [], [''], [('foo', 'bar')])
present.addCallback(self.environKeyEqual('QUERY_STRING', 'foo=bar'))
unencoded = self.render('GET', '1.1', [], [''], [('/', '/')])
unencoded.addCallback(self.environKeyEqual('QUERY_STRING', '%2F=%2F'))
# "?" is reserved in the <searchpart> portion of a URL. However, it
# seems to be a common mistake of clients to forget to quote it. So,
# make sure we handle that invalid case.
doubleQuestion = self.render(
'GET', '1.1', [], [''], [('foo', '?bar')], safe='?')
doubleQuestion.addCallback(
self.environKeyEqual('QUERY_STRING', 'foo=?bar'))
return gatherResults([
missing, empty, present, unencoded, doubleQuestion])
def test_contentType(self):
"""
The C{'CONTENT_TYPE'} key of the C{environ} C{dict} passed to the
application contains the value of the I{Content-Type} request header
(RFC 3875, section 4.1.3).
"""
missing = self.render('GET', '1.1', [], [''])
missing.addCallback(self.environKeyEqual('CONTENT_TYPE', ''))
present = self.render(
'GET', '1.1', [], [''], None, [('content-type', 'x-foo/bar')])
present.addCallback(self.environKeyEqual('CONTENT_TYPE', 'x-foo/bar'))
return gatherResults([missing, present])
def test_contentLength(self):
"""
The C{'CONTENT_LENGTH'} key of the C{environ} C{dict} passed to the
application contains the value of the I{Content-Length} request header
(RFC 3875, section 4.1.2).
"""
missing = self.render('GET', '1.1', [], [''])
missing.addCallback(self.environKeyEqual('CONTENT_LENGTH', ''))
present = self.render(
'GET', '1.1', [], [''], None, [('content-length', '1234')])
present.addCallback(self.environKeyEqual('CONTENT_LENGTH', '1234'))
return gatherResults([missing, present])
def test_serverName(self):
"""
The C{'SERVER_NAME'} key of the C{environ} C{dict} passed to the
application contains the best determination of the server hostname
possible, using either the value of the I{Host} header in the request
or the address the server is listening on if that header is not
present (RFC 3875, section 4.1.14).
"""
missing = self.render('GET', '1.1', [], [''])
# 10.0.0.1 value comes from a bit far away -
# twisted.test.test_web.DummyChannel.transport.getHost().host
missing.addCallback(self.environKeyEqual('SERVER_NAME', '10.0.0.1'))
present = self.render(
'GET', '1.1', [], [''], None, [('host', 'example.org')])
present.addCallback(self.environKeyEqual('SERVER_NAME', 'example.org'))
return gatherResults([missing, present])
def test_serverPort(self):
"""
The C{'SERVER_PORT'} key of the C{environ} C{dict} passed to the
application contains the port number of the server which received the
request (RFC 3875, section 4.1.15).
"""
portNumber = 12354
def makeChannel():
channel = DummyChannel()
channel.transport = DummyChannel.TCP()
channel.transport.port = portNumber
return channel
self.channelFactory = makeChannel
d = self.render('GET', '1.1', [], [''])
d.addCallback(self.environKeyEqual('SERVER_PORT', str(portNumber)))
return d
def test_serverProtocol(self):
"""
The C{'SERVER_PROTOCOL'} key of the C{environ} C{dict} passed to the
application contains the HTTP version number received in the request
(RFC 3875, section 4.1.16).
"""
old = self.render('GET', '1.0', [], [''])
old.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.0'))
new = self.render('GET', '1.1', [], [''])
new.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.1'))
return gatherResults([old, new])
def test_remoteAddr(self):
"""
The C{'REMOTE_ADDR'} key of the C{environ} C{dict} passed to the
application contains the address of the client making the request.
"""
d = self.render('GET', '1.1', [], [''])
d.addCallback(self.environKeyEqual('REMOTE_ADDR', '192.168.1.1'))
return d
def test_headers(self):
"""
HTTP request headers are copied into the C{environ} C{dict} passed to
the application with a C{HTTP_} prefix added to their names.
"""
singleValue = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar'), ('baz', 'quux')])
def cbRendered((environ, startResponse)):
self.assertEqual(environ['HTTP_FOO'], 'bar')
self.assertEqual(environ['HTTP_BAZ'], 'quux')
singleValue.addCallback(cbRendered)
multiValue = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar'), ('foo', 'baz')])
multiValue.addCallback(self.environKeyEqual('HTTP_FOO', 'bar,baz'))
withHyphen = self.render(
'GET', '1.1', [], [''], None, [('foo-bar', 'baz')])
withHyphen.addCallback(self.environKeyEqual('HTTP_FOO_BAR', 'baz'))
multiLine = self.render(
'GET', '1.1', [], [''], None, [('foo', 'bar\n\tbaz')])
multiLine.addCallback(self.environKeyEqual('HTTP_FOO', 'bar \tbaz'))
return gatherResults([singleValue, multiValue, withHyphen, multiLine])
def test_wsgiVersion(self):
"""
The C{'wsgi.version'} key of the C{environ} C{dict} passed to the
application has the value C{(1, 0)} indicating that this is a WSGI 1.0
container.
"""
versionDeferred = self.render('GET', '1.1', [], [''])
versionDeferred.addCallback(self.environKeyEqual('wsgi.version', (1, 0)))
return versionDeferred
def test_wsgiRunOnce(self):
"""
The C{'wsgi.run_once'} key of the C{environ} C{dict} passed to the
application is set to C{False}.
"""
once = self.render('GET', '1.1', [], [''])
once.addCallback(self.environKeyEqual('wsgi.run_once', False))
return once
def test_wsgiMultithread(self):
"""
The C{'wsgi.multithread'} key of the C{environ} C{dict} passed to the
application is set to C{True}.
"""
thread = self.render('GET', '1.1', [], [''])
thread.addCallback(self.environKeyEqual('wsgi.multithread', True))
return thread
def test_wsgiMultiprocess(self):
"""
The C{'wsgi.multiprocess'} key of the C{environ} C{dict} passed to the
application is set to C{False}.
"""
process = self.render('GET', '1.1', [], [''])
process.addCallback(self.environKeyEqual('wsgi.multiprocess', False))
return process
def test_wsgiURLScheme(self):
"""
The C{'wsgi.url_scheme'} key of the C{environ} C{dict} passed to the
application has the request URL scheme.
"""
# XXX Does this need to be different if the request is for an absolute
# URL?
def channelFactory():
channel = DummyChannel()
channel.transport = DummyChannel.SSL()
return channel
self.channelFactory = DummyChannel
httpDeferred = self.render('GET', '1.1', [], [''])
httpDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'http'))
self.channelFactory = channelFactory
httpsDeferred = self.render('GET', '1.1', [], [''])
httpsDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'https'))
return gatherResults([httpDeferred, httpsDeferred])
def test_wsgiErrors(self):
"""
The C{'wsgi.errors'} key of the C{environ} C{dict} passed to the
application is a file-like object (as defined in the U{Input and Errors
Streams<http://www.python.org/dev/peps/pep-0333/#input-and-error-streams>}
section of PEP 333) which converts bytes written to it into events for
the logging system.
"""
events = []
addObserver(events.append)
self.addCleanup(removeObserver, events.append)
errors = self.render('GET', '1.1', [], [''])
def cbErrors((environ, startApplication)):
errors = environ['wsgi.errors']
errors.write('some message\n')
errors.writelines(['another\nmessage\n'])
errors.flush()
self.assertEqual(events[0]['message'], ('some message\n',))
self.assertEqual(events[0]['system'], 'wsgi')
self.assertTrue(events[0]['isError'])
self.assertEqual(events[1]['message'], ('another\nmessage\n',))
self.assertEqual(events[1]['system'], 'wsgi')
self.assertTrue(events[1]['isError'])
self.assertEqual(len(events), 2)
errors.addCallback(cbErrors)
return errors
class InputStreamTestMixin(WSGITestsMixin):
"""
A mixin for L{TestCase} subclasses which defines a number of tests against
L{_InputStream}. The subclass is expected to create a file-like object to
be wrapped by an L{_InputStream} under test.
"""
def getFileType(self):
raise NotImplementedError(
"%s.getFile must be implemented" % (self.__class__.__name__,))
def _renderAndReturnReaderResult(self, reader, content):
contentType = self.getFileType()
class CustomizedRequest(Request):
def gotLength(self, length):
# Always allocate a file of the specified type, instead of
# using the base behavior of selecting one depending on the
# length.
self.content = contentType()
def appFactoryFactory(reader):
result = Deferred()
def applicationFactory():
def application(*args):
environ, startResponse = args
result.callback(reader(environ['wsgi.input']))
startResponse('200 OK', [])
return iter(())
return application
return result, applicationFactory
d, appFactory = appFactoryFactory(reader)
self.lowLevelRender(
CustomizedRequest, appFactory, DummyChannel,
'PUT', '1.1', [], [''], None, [],
content)
return d
def test_readAll(self):
"""
Calling L{_InputStream.read} with no arguments returns the entire input
stream.
"""
bytes = "some bytes are here"
d = self._renderAndReturnReaderResult(lambda input: input.read(), bytes)
d.addCallback(self.assertEqual, bytes)
return d
def test_readSome(self):
"""
Calling L{_InputStream.read} with an integer returns that many bytes
from the input stream, as long as it is less than or equal to the total
number of bytes available.
"""
bytes = "hello, world."
d = self._renderAndReturnReaderResult(lambda input: input.read(3), bytes)
d.addCallback(self.assertEqual, "hel")
return d
def test_readMoreThan(self):
"""
Calling L{_InputStream.read} with an integer that is greater than the
total number of bytes in the input stream returns all bytes in the
input stream.
"""
bytes = "some bytes are here"
d = self._renderAndReturnReaderResult(
lambda input: input.read(len(bytes) + 3), bytes)
d.addCallback(self.assertEqual, bytes)
return d
def test_readTwice(self):
"""
Calling L{_InputStream.read} a second time returns bytes starting from
the position after the last byte returned by the previous read.
"""
bytes = "some bytes, hello"
def read(input):
input.read(3)
return input.read()
d = self._renderAndReturnReaderResult(read, bytes)
d.addCallback(self.assertEqual, bytes[3:])
return d
def test_readNone(self):
"""
Calling L{_InputStream.read} with C{None} as an argument returns all
bytes in the input stream.
"""
bytes = "the entire stream"
d = self._renderAndReturnReaderResult(
lambda input: input.read(None), bytes)
d.addCallback(self.assertEqual, bytes)
return d
def test_readNegative(self):
"""
Calling L{_InputStream.read} with a negative integer as an argument
returns all bytes in the input stream.
"""
bytes = "all of the input"
d = self._renderAndReturnReaderResult(
lambda input: input.read(-1), bytes)
d.addCallback(self.assertEqual, bytes)
return d
def test_readline(self):
"""
Calling L{_InputStream.readline} with no argument returns one line from
the input stream.
"""
bytes = "hello\nworld"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(), bytes)
d.addCallback(self.assertEqual, "hello\n")
return d
def test_readlineSome(self):
"""
Calling L{_InputStream.readline} with an integer returns at most that
many bytes, even if it is not enough to make up a complete line.
COMPATIBILITY NOTE: the size argument is excluded from the WSGI
specification, but is provided here anyhow, because useful libraries
such as python stdlib's cgi.py assume their input file-like-object
supports readline with a size argument. If you use it, be aware your
application may not be portable to other conformant WSGI servers.
"""
bytes = "goodbye\nworld"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(3), bytes)
d.addCallback(self.assertEqual, "goo")
return d
def test_readlineMoreThan(self):
"""
Calling L{_InputStream.readline} with an integer which is greater than
the number of bytes in the next line returns only the next line.
"""
bytes = "some lines\nof text"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(20), bytes)
d.addCallback(self.assertEqual, "some lines\n")
return d
def test_readlineTwice(self):
"""
Calling L{_InputStream.readline} a second time returns the line
following the line returned by the first call.
"""
bytes = "first line\nsecond line\nlast line"
def readline(input):
input.readline()
return input.readline()
d = self._renderAndReturnReaderResult(readline, bytes)
d.addCallback(self.assertEqual, "second line\n")
return d
def test_readlineNone(self):
"""
Calling L{_InputStream.readline} with C{None} as an argument returns
one line from the input stream.
"""
bytes = "this is one line\nthis is another line"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(None), bytes)
d.addCallback(self.assertEqual, "this is one line\n")
return d
def test_readlineNegative(self):
"""
Calling L{_InputStream.readline} with a negative integer as an argument
returns one line from the input stream.
"""
bytes = "input stream line one\nline two"
d = self._renderAndReturnReaderResult(
lambda input: input.readline(-1), bytes)
d.addCallback(self.assertEqual, "input stream line one\n")
return d
def test_readlines(self):
"""
Calling L{_InputStream.readlines} with no arguments returns a list of
all lines from the input stream.
"""
bytes = "alice\nbob\ncarol"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(), bytes)
d.addCallback(self.assertEqual, ["alice\n", "bob\n", "carol"])
return d
def test_readlinesSome(self):
"""
Calling L{_InputStream.readlines} with an integer as an argument
returns a list of lines from the input stream with the argument serving
as an approximate bound on the total number of bytes to read.
"""
bytes = "123\n456\n789\n0"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(5), bytes)
def cbLines(lines):
# Make sure we got enough lines to make 5 bytes. Anything beyond
# that is fine too.
self.assertEqual(lines[:2], ["123\n", "456\n"])
d.addCallback(cbLines)
return d
def test_readlinesMoreThan(self):
"""
Calling L{_InputStream.readlines} with an integer which is greater than
the total number of bytes in the input stream returns a list of all
lines from the input.
"""
bytes = "one potato\ntwo potato\nthree potato"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(100), bytes)
d.addCallback(
self.assertEqual,
["one potato\n", "two potato\n", "three potato"])
return d
def test_readlinesAfterRead(self):
"""
Calling L{_InputStream.readlines} after a call to L{_InputStream.read}
returns lines starting at the byte after the last byte returned by the
C{read} call.
"""
bytes = "hello\nworld\nfoo"
def readlines(input):
input.read(7)
return input.readlines()
d = self._renderAndReturnReaderResult(readlines, bytes)
d.addCallback(self.assertEqual, ["orld\n", "foo"])
return d
def test_readlinesNone(self):
"""
Calling L{_InputStream.readlines} with C{None} as an argument returns
all lines from the input.
"""
bytes = "one fish\ntwo fish\n"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(None), bytes)
d.addCallback(self.assertEqual, ["one fish\n", "two fish\n"])
return d
def test_readlinesNegative(self):
"""
Calling L{_InputStream.readlines} with a negative integer as an
argument returns a list of all lines from the input.
"""
bytes = "red fish\nblue fish\n"
d = self._renderAndReturnReaderResult(
lambda input: input.readlines(-1), bytes)
d.addCallback(self.assertEqual, ["red fish\n", "blue fish\n"])
return d
def test_iterable(self):
"""
Iterating over L{_InputStream} produces lines from the input stream.
"""
bytes = "green eggs\nand ham\n"
d = self._renderAndReturnReaderResult(lambda input: list(input), bytes)
d.addCallback(self.assertEqual, ["green eggs\n", "and ham\n"])
return d
def test_iterableAfterRead(self):
"""
Iterating over L{_InputStream} after calling L{_InputStream.read}
produces lines from the input stream starting from the first byte after
the last byte returned by the C{read} call.
"""
bytes = "green eggs\nand ham\n"
def iterate(input):
input.read(3)
return list(input)
d = self._renderAndReturnReaderResult(iterate, bytes)
d.addCallback(self.assertEqual, ["en eggs\n", "and ham\n"])
return d
class InputStreamStringIOTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a L{StringIO.StringIO}.
"""
def getFileType(self):
return StringIO.StringIO
class InputStreamCStringIOTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a
L{cStringIO.StringIO}.
"""
def getFileType(self):
return cStringIO.StringIO
class InputStreamTemporaryFileTests(InputStreamTestMixin, TestCase):
"""
Tests for L{_InputStream} when it is wrapped around a L{tempfile.TemporaryFile}.
"""
def getFileType(self):
return tempfile.TemporaryFile
class StartResponseTests(WSGITestsMixin, TestCase):
"""
Tests for the I{start_response} parameter passed to the application object
by L{WSGIResource}.
"""
def test_status(self):
"""
The response status passed to the I{start_response} callable is written
as the status of the response to the request.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('107 Strange message', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 107 Strange message'))
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def _headersTest(self, appHeaders, expectedHeaders):
"""
Verify that if the response headers given by C{appHeaders} are passed
to the I{start_response} callable, then the response header lines given
by C{expectedHeaders} plus I{Server} and I{Date} header lines are
included in the response.
"""
# Make the Date header value deterministic
self.patch(http, 'datetimeToString', lambda: 'Tuesday')
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', appHeaders)
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
response = channel.transport.written.getvalue()
headers, rest = response.split('\r\n\r\n', 1)
headerLines = headers.split('\r\n')[1:]
headerLines.sort()
allExpectedHeaders = expectedHeaders + [
'Date: Tuesday',
'Server: ' + version,
'Transfer-Encoding: chunked']
allExpectedHeaders.sort()
self.assertEqual(headerLines, allExpectedHeaders)
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_headers(self):
"""
The headers passed to the I{start_response} callable are included in
the response as are the required I{Date} and I{Server} headers and the
necessary connection (hop to hop) header I{Transfer-Encoding}.
"""
return self._headersTest(
[('foo', 'bar'), ('baz', 'quux')],
['Baz: quux', 'Foo: bar'])
def test_applicationProvidedContentType(self):
"""
If I{Content-Type} is included in the headers passed to the
I{start_response} callable, one I{Content-Type} header is included in
the response.
"""
return self._headersTest(
[('content-type', 'monkeys are great')],
['Content-Type: monkeys are great'])
def test_applicationProvidedServerAndDate(self):
"""
If either I{Server} or I{Date} is included in the headers passed to the
I{start_response} callable, they are disregarded.
"""
return self._headersTest(
[('server', 'foo'), ('Server', 'foo'),
('date', 'bar'), ('dATE', 'bar')],
[])
def test_delayedUntilReturn(self):
"""
Nothing is written in response to a request when the I{start_response}
callable is invoked. If the iterator returned by the application
object produces only empty strings, the response is written after the
last element is produced.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('foo', 'bar'), ('baz', 'quux')])
yield ''
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(intermediateValues, [''])
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_delayedUntilContent(self):
"""
Nothing is written in response to a request when the I{start_response}
callable is invoked. Once a non-empty string has been produced by the
iterator returned by the application object, the response status and
headers are written.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('foo', 'bar')])
yield ''
record()
yield 'foo'
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertFalse(intermediateValues[0])
self.assertTrue(intermediateValues[1])
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_content(self):
"""
Content produced by the iterator returned by the application object is
written to the request as it is produced.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('content-length', '6')])
yield 'foo'
record()
yield 'bar'
record()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(intermediateValues[0]),
'foo')
self.assertEqual(
self.getContentFromResponse(intermediateValues[1]),
'foobar')
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_multipleStartResponse(self):
"""
If the I{start_response} callable is invoked multiple times before a
data for the response body is produced, the values from the last call
are used.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('100 Foo', [])
startResponse('200 Bar', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 200 Bar\r\n'))
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_startResponseWithException(self):
"""
If the I{start_response} callable is invoked with a third positional
argument before the status and headers have been written to the
response, the status and headers become the newly supplied values.
"""
channel = DummyChannel()
def applicationFactory():
def application(environ, startResponse):
startResponse('100 Foo', [], (Exception, Exception("foo"), None))
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 100 Foo\r\n'))
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_startResponseWithExceptionTooLate(self):
"""
If the I{start_response} callable is invoked with a third positional
argument after the status and headers have been written to the
response, the supplied I{exc_info} values are re-raised to the
application.
"""
channel = DummyChannel()
class SomeException(Exception):
pass
try:
raise SomeException()
except:
excInfo = exc_info()
reraised = []
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
yield 'foo'
try:
startResponse('500 ERR', [], excInfo)
except:
reraised.append(exc_info())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 200 OK\r\n'))
self.assertEqual(reraised[0][0], excInfo[0])
self.assertEqual(reraised[0][1], excInfo[1])
self.assertEqual(reraised[0][2].tb_next, excInfo[2])
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_write(self):
"""
I{start_response} returns the I{write} callable which can be used to
write bytes to the response body without buffering.
"""
channel = DummyChannel()
intermediateValues = []
def record():
intermediateValues.append(channel.transport.written.getvalue())
def applicationFactory():
def application(environ, startResponse):
write = startResponse('100 Foo', [('content-length', '6')])
write('foo')
record()
write('bar')
record()
return iter(())
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(intermediateValues[0]),
'foo')
self.assertEqual(
self.getContentFromResponse(intermediateValues[1]),
'foobar')
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
class ApplicationTests(WSGITestsMixin, TestCase):
"""
Tests for things which are done to the application object and the iterator
it returns.
"""
def enableThreads(self):
self.reactor = reactor
self.threadpool = ThreadPool()
self.threadpool.start()
self.addCleanup(self.threadpool.stop)
def test_close(self):
"""
If the application object returns an iterator which also has a I{close}
method, that method is called after iteration is complete.
"""
channel = DummyChannel()
class Result:
def __init__(self):
self.open = True
def __iter__(self):
for i in range(3):
if self.open:
yield str(i)
def close(self):
self.open = False
result = Result()
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [('content-length', '3')])
return result
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertEqual(
self.getContentFromResponse(
channel.transport.written.getvalue()),
'012')
self.assertFalse(result.open)
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''])
return d
def test_applicationCalledInThread(self):
"""
The application object is invoked and iterated in a thread which is not
the reactor thread.
"""
self.enableThreads()
invoked = []
def applicationFactory():
def application(environ, startResponse):
def result():
for i in range(3):
invoked.append(get_ident())
yield str(i)
invoked.append(get_ident())
startResponse('200 OK', [('content-length', '3')])
return result()
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
self.assertNotIn(get_ident(), invoked)
self.assertEqual(len(set(invoked)), 1)
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory,
DummyChannel, 'GET', '1.1', [], [''])
return d
def test_writeCalledFromThread(self):
"""
The I{write} callable returned by I{start_response} calls the request's
C{write} method in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def write(self, bytes):
invoked.append(get_ident())
return Request.write(self, bytes)
def applicationFactory():
def application(environ, startResponse):
write = startResponse('200 OK', [])
write('foo')
return iter(())
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([get_ident()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_iteratedValuesWrittenFromThread(self):
"""
Strings produced by the iterator returned by the application object are
written to the request in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def write(self, bytes):
invoked.append(get_ident())
return Request.write(self, bytes)
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
yield 'foo'
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([get_ident()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_statusWrittenFromThread(self):
"""
The response status is set on the request object in the reactor thread.
"""
self.enableThreads()
invoked = []
class ThreadVerifier(Request):
def setResponseCode(self, code, message):
invoked.append(get_ident())
return Request.setResponseCode(self, code, message)
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
return iter(())
return application
d, requestFactory = self.requestFactoryFactory(ThreadVerifier)
def cbRendered(ignored):
self.assertEqual(set(invoked), set([get_ident()]))
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return d
def test_connectionClosedDuringIteration(self):
"""
If the request connection is lost while the application object is being
iterated, iteration is stopped.
"""
class UnreliableConnection(Request):
"""
This is a request which pretends its connection is lost immediately
after the first write is done to it.
"""
def write(self, bytes):
self.connectionLost(Failure(ConnectionLost("No more connection")))
self.badIter = False
def appIter():
yield "foo"
self.badIter = True
raise Exception("Should not have gotten here")
def applicationFactory():
def application(environ, startResponse):
startResponse('200 OK', [])
return appIter()
return application
d, requestFactory = self.requestFactoryFactory(UnreliableConnection)
def cbRendered(ignored):
self.assertFalse(self.badIter, "Should not have resumed iteration")
d.addCallback(cbRendered)
self.lowLevelRender(
requestFactory, applicationFactory, DummyChannel,
'GET', '1.1', [], [''])
return self.assertFailure(d, ConnectionLost)
def _internalServerErrorTest(self, application):
channel = DummyChannel()
def applicationFactory():
return application
d, requestFactory = self.requestFactoryFactory()
def cbRendered(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertTrue(
channel.transport.written.getvalue().startswith(
'HTTP/1.1 500 Internal Server Error'))
d.addCallback(cbRendered)
request = self.lowLevelRender(
requestFactory, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
return d
def test_applicationExceptionBeforeStartResponse(self):
"""
If the application raises an exception before calling I{start_response}
then the response status is I{500} and the exception is logged.
"""
def application(environ, startResponse):
raise RuntimeError("This application had some error.")
return self._internalServerErrorTest(application)
def test_applicationExceptionAfterStartResponse(self):
"""
If the application calls I{start_response} but then raises an exception
before any data is written to the response then the response status is
I{500} and the exception is logged.
"""
def application(environ, startResponse):
startResponse('200 OK', [])
raise RuntimeError("This application had some error.")
return self._internalServerErrorTest(application)
def _connectionClosedTest(self, application, responseContent):
channel = DummyChannel()
def applicationFactory():
return application
d, requestFactory = self.requestFactoryFactory()
# Capture the request so we can disconnect it later on.
requests = []
def requestFactoryWrapper(*a, **kw):
requests.append(requestFactory(*a, **kw))
return requests[-1]
def ebRendered(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
response = channel.transport.written.getvalue()
self.assertTrue(response.startswith('HTTP/1.1 200 OK'))
# Chunked transfer-encoding makes this a little messy.
self.assertIn(responseContent, response)
d.addErrback(ebRendered)
request = self.lowLevelRender(
requestFactoryWrapper, applicationFactory,
lambda: channel, 'GET', '1.1', [], [''], None, [])
# By now the connection should be closed.
self.assertTrue(channel.transport.disconnected)
# Give it a little push to go the rest of the way.
requests[0].connectionLost(Failure(ConnectionLost("All gone")))
return d
def test_applicationExceptionAfterWrite(self):
"""
If the application raises an exception after the response status has
already been sent then the connection is closed and the exception is
logged.
"""
responseContent = (
'Some bytes, triggering the server to start sending the response')
def application(environ, startResponse):
startResponse('200 OK', [])
yield responseContent
raise RuntimeError("This application had some error.")
return self._connectionClosedTest(application, responseContent)
def test_applicationCloseException(self):
"""
If the application returns a closeable iterator and the C{close} method
raises an exception when called then the connection is still closed and
the exception is logged.
"""
responseContent = 'foo'
class Application(object):
def __init__(self, environ, startResponse):
startResponse('200 OK', [])
def __iter__(self):
yield responseContent
def close(self):
raise RuntimeError("This application had some error.")
return self._connectionClosedTest(Application, responseContent)
| bsd-3-clause |
aurofable/medhack-server | venv/lib/python2.7/encodings/punycode.py | 586 | 6813 | # -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
ryneeverett/cartridge | cartridge/shop/translation.py | 5 | 1121 | from modeltranslation.translator import translator, TranslationOptions
from mezzanine.conf import settings
from mezzanine.core.translation import (TranslatedDisplayable,
TranslatedRichText)
from cartridge.shop.models import (Category, Product, ProductOption,
ProductImage, ProductVariation)
class TranslatedProduct(TranslatedDisplayable, TranslatedRichText):
fields = ()
class TranslatedProductImage(TranslationOptions):
fields = ('description',)
class TranslatedProductOption(TranslationOptions):
fields = ('name',)
class TranslatedProductVariation(TranslationOptions):
fields = tuple(('option%s' % opt[0] for opt in
settings.SHOP_OPTION_TYPE_CHOICES))
class TranslatedCategory(TranslatedRichText):
fields = ()
translator.register(Product, TranslatedProduct)
translator.register(ProductImage, TranslatedProductImage)
translator.register(ProductOption, TranslatedProductOption)
translator.register(ProductVariation, TranslatedProductVariation)
translator.register(Category, TranslatedCategory)
| bsd-2-clause |
FBRTMaka/ooi-ui-services | ooiservices/tests/test_routes.py | 2 | 7359 | #!/usr/bin/env python
'''
Specific testing of routes.
'''
__author__ = 'Edna Donoughe'
import unittest
import json
from base64 import b64encode
from flask import url_for
from ooiservices.app import create_app, db
from ooiservices.app.models import PlatformDeployment, InstrumentDeployment, Stream, StreamParameter
from ooiservices.app.models import Organization, User, UserScope
import flask.ext.whooshalchemy as whooshalchemy
import datetime as dt
app = create_app('TESTING_CONFIG')
app.config['WHOOSH_BASE'] = 'ooiservices/whoosh_index'
whooshalchemy.whoosh_index(app, PlatformDeployment)
'''
These tests are additional to the normal testing performed by coverage; each of
these tests are to validate model logic outside of db management.
'''
class UserTestCase(unittest.TestCase):
def setUp(self):
self.app = app
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
test_username = 'admin'
test_password = 'test'
Organization.insert_org()
User.insert_user(username=test_username, password=test_password)
self.client = self.app.test_client(use_cookies=False)
UserScope.insert_scopes()
admin = User.query.filter_by(user_name='admin').first()
scope = UserScope.query.filter_by(scope_name='user_admin').first()
admin.scopes.append(scope)
db.session.add(admin)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' + b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
#Test [GET] /platform_deployments - 'main.get_platform_deployments'
def test_route_get_platform_deployments(self):
#Create a sample data set.
platform_ref = PlatformDeployment(reference_designator='CE01ISSM')
db.session.add(platform_ref)
db.session.commit()
platform_ref2 = PlatformDeployment(reference_designator='GS05MOAS-PG002')
db.session.add(platform_ref2)
db.session.commit()
content_type = 'application/json'
response = self.client.get(url_for('main.get_platform_deployments'), content_type = content_type)
all_data = response.data
expected_data = json.loads(response.data)
self.assertTrue(response.status_code == 200)
response = self.client.get(url_for('main.get_platform_deployment', id='CE01ISSM'), content_type=content_type)
self.assertTrue(response.status_code == 200)
response = self.client.get('/platform_deployments?array_id=3')
self.assertTrue(response.status_code == 200)
response = self.client.get('/platform_deployments?array_id=999')
self.assertTrue(response.status_code == 200)
data = json.loads(response.data)
no_data = {'platform_deployments': []}
self.assertTrue(data == no_data)
response = self.client.get('/platform_deployments?ref_id="GS05MOAS-PG002"')
self.assertTrue(response.status_code == 200)
# search for not existent platform; all platforms returned
response = self.client.get('/platform_deployments?ref_id="badthing"')
self.assertTrue(response.status_code == 200)
data = json.loads(response.data)
self.assertTrue(data == expected_data)
response = self.client.get('/platform_deployments?search="CE01ISSM"')
self.assertTrue(response.status_code == 200)
# Test [GET] /parameters - 'main.get_parameters'
def test_get_parameters(self):
'''
parameter(id=preferred_timestamp): {
"data_type": null,
"id": 1,
"long_name": null,
"parameter_name": "preferred_timestamp",
"short_name": null,
"standard_name": null,
"units": null
}
'''
content_type = 'application/json'
#Create a sample data set
parameter_name = StreamParameter(stream_parameter_name='preferred_timestamp')
db.session.add(parameter_name)
db.session.commit()
# Get all parameters
response = self.client.get(url_for('main.get_parameters'), content_type=content_type)
self.assertTrue(response.status_code == 200)
# Get parameter
response = self.client.get(url_for('main.get_parameter', id='preferred_timestamp'), content_type=content_type)
self.assertTrue(response.status_code == 200)
def test_organization(self):
content_type = 'application/json'
# get all organizations
response = self.client.get('/organization', content_type=content_type)
self.assertEquals(response.status_code, 200)
data = json.loads(response.data)
expectation = {u'organizations':[{u'id':1, u'organization_long_name' : None, u'organization_name' : u'RPS ASA', u'image_url':None}]}
self.assertEquals(data, expectation)
# Get organization by id
response = self.client.get('/organization/1', content_type=content_type)
self.assertEquals(response.status_code, 200)
data = json.loads(response.data)
self.assertEquals(data, expectation['organizations'][0])
# Get non-existant organization (bad id value); expect failure
response = self.client.get('/organization/999', content_type=content_type)
self.assertEquals(response.status_code, 204)
# # Test [GET] /display_name - 'main.get_display_name'
# def test_get_display_name(self):
#
# content_type = 'application/json'
#
# # Create a sample data set.
# platform_ref = VocabNames(reference_designator='CE01ISSM', level_one='Endurance', level_two='OR Inshore Surface Mooring')
# db.session.add(platform_ref)
# db.session.commit()
#
# platform_ref2 = VocabNames(reference_designator='CE01ISSM-MFC31', level_one='Endurance', level_two='OR Inshore Surface Mooring',
# level_three='Multi-Function Node')
# db.session.add(platform_ref2)
# db.session.commit()
# response = self.client.get(url_for('main.get_display_name', reference_designator='CE01ISSM-MFC31'), content_type=content_type)
# self.assertEquals(response.status_code, 200)
#
# response = self.client.get(url_for('main.get_display_name'), content_type=content_type)
# self.assertEquals(response.status_code, 204)
#
# response = self.client.get(url_for('main.get_display_name', reference_designator='GS03FLMA-RIXXX'), content_type=content_type)
# self.assertEquals(response.status_code, 204)
#
# response = self.client.get(url_for('main.get_display_name', reference_designator='GS03FLMA-RIXXX-BAD'), content_type=content_type)
# self.assertEquals(response.status_code, 204)
| apache-2.0 |
devinbalkind/eden | models/00_db.py | 4 | 6534 | # -*- coding: utf-8 -*-
"""
Import Modules
Configure the Database
Instantiate Classes
"""
if settings.get_L10n_languages_readonly():
# Make the Language files read-only for improved performance
T.is_writable = False
get_vars = request.get_vars
# Are we running in debug mode?
request_debug = get_vars.get("debug", None)
s3.debug = request_debug or settings.get_base_debug()
if request_debug:
# Also override log level:
settings.log.level = "debug"
if s3.debug:
# Reload all modules every request
# Doesn't catch s3cfg or s3/*
from gluon.custom_import import track_changes
track_changes(True)
import datetime
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
########################
# Database Configuration
########################
migrate = settings.get_base_migrate()
fake_migrate = settings.get_base_fake_migrate()
if migrate:
check_reserved = ["mysql", "postgres"]
else:
check_reserved = None
(db_string, pool_size) = settings.get_database_string()
if db_string.find("sqlite") != -1:
db = DAL(db_string,
check_reserved=check_reserved,
migrate_enabled = migrate,
fake_migrate_all = fake_migrate,
lazy_tables = not migrate)
# on SQLite 3.6.19+ this enables foreign key support (included in Python 2.7+)
# db.executesql("PRAGMA foreign_keys=ON")
else:
try:
if db_string.find("mysql") != -1:
# Use MySQLdb where available (pymysql has given broken pipes)
# - done automatically now, no need to add this manually
#try:
# import MySQLdb
# from gluon.dal import MySQLAdapter
# MySQLAdapter.driver = MySQLdb
#except ImportError:
# # Fallback to pymysql
# pass
if check_reserved:
check_reserved = ["postgres"]
db = DAL(db_string,
check_reserved = check_reserved,
pool_size = pool_size,
migrate_enabled = migrate,
lazy_tables = not migrate)
else:
# PostgreSQL
if check_reserved:
check_reserved = ["mysql"]
db = DAL(db_string,
check_reserved = check_reserved,
pool_size = pool_size,
migrate_enabled = migrate,
lazy_tables = not migrate)
except:
db_type = db_string.split(":", 1)[0]
db_location = db_string.split("@", 1)[1]
raise(HTTP(503, "Cannot connect to %s Database: %s" % (db_type, db_location)))
current.db = db
db.set_folder("upload")
# Sessions Storage
if settings.get_base_session_memcache():
# Store sessions in Memcache
from gluon.contrib.memcache import MemcacheClient
cache.memcache = MemcacheClient(request,
[settings.get_base_session_memcache()])
from gluon.contrib.memdb import MEMDB
session.connect(request, response, db=MEMDB(cache.memcache))
####################################################################
# Instantiate Classes from Modules #
# - store instances in current to be accessible from other modules #
####################################################################
from gluon.tools import Mail
mail = Mail()
current.mail = mail
from gluon.storage import Messages
messages = Messages(T)
current.messages = messages
ERROR = Messages(T)
current.ERROR = ERROR
# Import the S3 Framework
if update_check_needed:
# Reload the Field definitions
reload(s3base.s3fields)
else:
import s3 as s3base
# Set up logger (before any module attempts to use it!)
import s3log
s3log.S3Log.setup()
# AAA
current.auth = auth = s3base.AuthS3()
# Use session for persistent per-user variables
# - beware of a user having multiple tabs open!
# - don't save callables or class instances as these can't be pickled
if not session.s3:
session.s3 = Storage()
# Use username instead of email address for logins
# - would probably require further customisation
# to get this fully-working within Eden as it's not a Tested configuration
#auth.settings.login_userfield = "username"
auth.settings.hmac_key = settings.get_auth_hmac_key()
auth.define_tables(migrate=migrate, fake_migrate=fake_migrate)
current.audit = audit = s3base.S3Audit(migrate=migrate, fake_migrate=fake_migrate)
# Shortcuts for models/controllers/views
s3_has_role = auth.s3_has_role
s3_has_permission = auth.s3_has_permission
s3_logged_in_person = auth.s3_logged_in_person
# CRUD
s3.crud = Storage()
# S3 Custom Validators and Widgets, imported here into the global
# namespace in order to access them without the s3base namespace prefix
s3_action_buttons = s3base.S3CRUD.action_buttons
s3_fullname = s3base.s3_fullname
S3ResourceHeader = s3base.S3ResourceHeader
from s3.s3navigation import s3_rheader_tabs
from s3.s3validators import *
from s3.s3widgets import *
from s3.s3data import *
# GIS Module
gis = s3base.GIS()
current.gis = gis
# s3_request
s3_request = s3base.s3_request
# Field Selectors
FS = s3base.FS
# S3XML
s3xml = s3base.S3XML()
current.xml = s3xml
# Messaging
msg = s3base.S3Msg()
current.msg = msg
# Sync
sync = s3base.S3Sync()
current.sync = sync
# -----------------------------------------------------------------------------
def s3_clear_session():
# CRUD last opened records (rcvars)
s3base.s3_remove_last_record_id()
# Session-owned records
if "owned_records" in session:
del session["owned_records"]
if "s3" in session:
s3 = session.s3
opts = ["hrm", "report_options", "utc_offset", "deduplicate"]
for o in opts:
if o in s3:
del s3[o]
# -----------------------------------------------------------------------------
def s3_auth_on_login(form):
"""
Actions to be performed upon successful login
Do not redirect from here!
"""
s3_clear_session()
# -----------------------------------------------------------------------------
def s3_auth_on_logout(user):
"""
Actions to be performed after logout
Do not redirect from here!
"""
s3_clear_session()
# END =========================================================================
| mit |
playingaround2017/test123 | gamera/pixmaps/img2img.py | 2 | 2515 | #
# This file has been taken from wxpython (see the file
# wx/tools/img2img.py in the wxpython source distribution)
#
# Copyright (c) 1998 Julian Smart, Robert Roebling et al
#
# This program may be freely used, copied and distributed under
# the terms of the wxWindows Library Licence, Version 3. See
# the file "copyright" of the wxpython distribution from
# http://wxpython.org/ for details.
#
"""
Common routines for the image converter utilities.
"""
import sys, os, glob, getopt, string
import wx
if wx.Platform == "__WXGTK__":
# some bitmap related things need to have a wxApp initialized...
app = wx.PySimpleApp()
wx.InitAllImageHandlers()
def convert(file, maskClr, outputDir, outputName, outType, outExt):
if string.lower(os.path.splitext(file)[1]) == ".ico":
icon = wx.Icon(file, wx.BITMAP_TYPE_ICO)
img = wx.BitmapFromIcon(icon)
else:
img = wx.Bitmap(file, wx.BITMAP_TYPE_ANY)
if not img.Ok():
return 0, file + " failed to load!"
else:
if maskClr:
om = img.GetMask()
mask = wx.MaskColour(img, maskClr)
img.SetMask(mask)
if om is not None:
om.Destroy()
if outputName:
newname = outputName
else:
newname = os.path.join(outputDir,
os.path.basename(os.path.splitext(file)[0]) + outExt)
if img.SaveFile(newname, outType):
return 1, file + " converted to " + newname
else:
img = wx.ImageFromBitmap(img)
if img.SaveFile(newname, outType):
return 1, "ok"
else:
return 0, file + " failed to save!"
def main(args, outType, outExt, doc):
if not args or ("-h" in args):
print doc
return
outputDir = ""
maskClr = None
outputName = None
try:
opts, fileArgs = getopt.getopt(args, "m:n:o:")
except getopt.GetoptError:
print __doc__
return
for opt, val in opts:
if opt == "-m":
maskClr = val
elif opt == "-n":
outputName = val
elif opt == "-o":
outputDir = val
if not fileArgs:
print doc
return
for arg in fileArgs:
for file in glob.glob(arg):
if not os.path.isfile(file):
continue
ok, msg = convert(file, maskClr, outputDir, outputName,
outType, outExt)
print msg
| gpl-2.0 |
topiaruss/django-filer | filer/migrations/0012_renaming_folderpermissions.py | 49 | 10617 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('filer_folderpermission', 'can_edit_new', 'can_edit')
db.rename_column('filer_folderpermission', 'can_read_new', 'can_read')
db.rename_column('filer_folderpermission', 'can_add_children_new', 'can_add_children')
def backwards(self, orm):
db.rename_column('filer_folderpermission', 'can_edit', 'can_edit_new')
db.rename_column('filer_folderpermission', 'can_read', 'can_read_new')
db.rename_column('filer_folderpermission', 'can_add_children', 'can_add_children_new')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.clipboard': {
'Meta': {'object_name': 'Clipboard'},
'files': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'in_clipboards'", 'symmetrical': 'False', 'through': "orm['filer.ClipboardItem']", 'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filer_clipboards'", 'to': "orm['auth.User']"})
},
'filer.clipboarditem': {
'Meta': {'object_name': 'ClipboardItem'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Clipboard']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folderpermission': {
'Meta': {'object_name': 'FolderPermission'},
'can_add_children': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'can_edit': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'can_read': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'everybody': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Folder']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.User']"})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['filer']
| bsd-3-clause |
ogenstad/ansible | lib/ansible/modules/cloud/google/gcp_healthcheck.py | 48 | 15302 | #!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_healthcheck
version_added: "2.4"
short_description: Create, Update or Destroy a Healthcheck.
description:
- Create, Update or Destroy a Healthcheck. Currently only HTTP and
HTTPS Healthchecks are supported. Healthchecks are used to monitor
individual instances, managed instance groups and/or backend
services. Healtchecks are reusable.
- Visit
U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
for an overview of Healthchecks on GCP.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
API details on HTTP Healthchecks.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
for more details on the HTTPS Healtcheck API.
requirements:
- "python >= 2.6"
- "google-api-python-client >= 1.6.2"
- "google-auth >= 0.9.0"
- "google-auth-httplib2 >= 0.0.2"
notes:
- Only supports HTTP and HTTPS Healthchecks currently.
author:
- "Tom Melendez (@supertom) <[email protected]>"
options:
check_interval:
description:
- How often (in seconds) to send a health check.
default: 5
healthcheck_name:
description:
- Name of the Healthcheck.
required: true
healthcheck_type:
description:
- Type of Healthcheck.
required: true
choices: ["HTTP", "HTTPS"]
host_header:
description:
- The value of the host header in the health check request. If left
empty, the public IP on behalf of which this health
check is performed will be used.
required: true
default: ""
port:
description:
- The TCP port number for the health check request. The default value is
443 for HTTPS and 80 for HTTP.
request_path:
description:
- The request path of the HTTPS health check request.
required: false
default: "/"
state:
description: State of the Healthcheck.
required: true
choices: ["present", "absent"]
timeout:
description:
- How long (in seconds) to wait for a response before claiming
failure. It is invalid for timeout
to have a greater value than check_interval.
default: 5
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this
many consecutive failures.
default: 2
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this
many consecutive successes.
default: 2
service_account_email:
description:
- service account email
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
credentials_file:
description:
- Path to the JSON file associated with the service account email
project_id:
description:
- Your GCP project ID
'''
EXAMPLES = '''
- name: Create Minimum HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
state: present
- name: Create HTTP HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
host: my-host
request_path: /hc
check_interval: 10
timeout: 30
unhealthy_threshhold: 2
healthy_threshhold: 1
state: present
- name: Create HTTPS HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: "{{ https_healthcheck }}"
healthcheck_type: HTTPS
host_header: my-host
request_path: /hc
check_interval: 5
timeout: 5
unhealthy_threshold: 2
healthy_threshold: 1
state: present
'''
RETURN = '''
state:
description: state of the Healthcheck
returned: Always.
type: str
sample: present
healthcheck_name:
description: Name of the Healthcheck
returned: Always
type: str
sample: my-url-map
healthcheck_type:
description: Type of the Healthcheck
returned: Always
type: str
sample: HTTP
healthcheck:
description: GCP Healthcheck dictionary
returned: Always. Refer to GCP documentation for detailed field descriptions.
type: dict
sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import get_google_api_client, GCPUtils
USER_AGENT_PRODUCT = 'ansible-healthcheck'
USER_AGENT_VERSION = '0.0.1'
def _validate_healthcheck_params(params):
"""
Validate healthcheck params.
Simple validation has already assumed by AnsibleModule.
:param params: Ansible dictionary containing configuration.
:type params: ``dict``
:return: True or raises ValueError
:rtype: ``bool`` or `class:ValueError`
"""
if params['timeout'] > params['check_interval']:
raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
params['timeout'], params['check_interval']))
return (True, '')
def _build_healthcheck_dict(params):
"""
Reformat services in Ansible Params for GCP.
:param params: Params from AnsibleModule object
:type params: ``dict``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: dictionary suitable for submission to GCP
HealthCheck (HTTP/HTTPS) API.
:rtype ``dict``
"""
gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
if 'timeout' in gcp_dict:
gcp_dict['timeoutSec'] = gcp_dict['timeout']
del gcp_dict['timeout']
if 'checkInterval' in gcp_dict:
gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
del gcp_dict['checkInterval']
if 'hostHeader' in gcp_dict:
gcp_dict['host'] = gcp_dict['hostHeader']
del gcp_dict['hostHeader']
if 'healthcheckType' in gcp_dict:
del gcp_dict['healthcheckType']
return gcp_dict
def _get_req_resource(client, resource_type):
if resource_type == 'HTTPS':
return (client.httpsHealthChecks(), 'httpsHealthCheck')
else:
return (client.httpHealthChecks(), 'httpHealthCheck')
def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
"""
Get a Healthcheck from GCP.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: A dict resp from the respective GCP 'get' request.
:rtype: ``dict``
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.get(**args)
return GCPUtils.execute_api_client_req(req, raise_404=False)
except:
raise
def create_healthcheck(client, params, project_id, resource_type='HTTP'):
"""
Create a new Healthcheck.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
try:
resource, _ = _get_req_resource(client, resource_type)
args = {'project': project_id, 'body': gcp_dict}
req = resource.insert(**args)
return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
if not return_data:
return_data = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=project_id)
return (True, return_data)
except:
raise
def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
"""
Delete a Healthcheck.
:param client: An initialized GCE Compute Disover resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.delete(**args)
return_data = GCPUtils.execute_api_client_req(req, client)
return (True, return_data)
except:
raise
def update_healthcheck(client, healthcheck, params, name, project_id,
resource_type='HTTP'):
"""
Update a Healthcheck.
If the healthcheck has not changed, the update will not occur.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param healthcheck: Name of the Url Map.
:type healthcheck: ``dict``
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
if ans:
return (False, 'no update necessary')
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name, 'body': gcp_dict}
req = resource.update(**args)
return_data = GCPUtils.execute_api_client_req(
req, client=client, raw=False)
return (True, return_data)
except:
raise
def main():
module = AnsibleModule(argument_spec=dict(
healthcheck_name=dict(required=True),
healthcheck_type=dict(required=True,
choices=['HTTP', 'HTTPS']),
request_path=dict(required=False, default='/'),
check_interval=dict(required=False, type='int', default=5),
healthy_threshold=dict(required=False, type='int', default=2),
unhealthy_threshold=dict(required=False, type='int', default=2),
host_header=dict(required=False, type='str', default=''),
timeout=dict(required=False, type='int', default=5),
port=dict(required=False, type='int'),
state=dict(choices=['absent', 'present'], default='present'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
credentials_file=dict(),
project_id=dict(), ), )
client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
user_agent_version=USER_AGENT_VERSION)
params = {}
params['healthcheck_name'] = module.params.get('healthcheck_name')
params['healthcheck_type'] = module.params.get('healthcheck_type')
params['request_path'] = module.params.get('request_path')
params['check_interval'] = module.params.get('check_interval')
params['healthy_threshold'] = module.params.get('healthy_threshold')
params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
params['host_header'] = module.params.get('host_header')
params['timeout'] = module.params.get('timeout')
params['port'] = module.params.get('port', None)
params['state'] = module.params.get('state')
if not params['port']:
params['port'] = 80
if params['healthcheck_type'] == 'HTTPS':
params['port'] = 443
try:
_validate_healthcheck_params(params)
except Exception as e:
module.fail_json(msg=e.message, changed=False)
changed = False
json_output = {'state': params['state']}
healthcheck = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
if not healthcheck:
if params['state'] == 'absent':
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown healthcheck: %s" %
(params['healthcheck_name']))
else:
# Create
changed, json_output['healthcheck'] = create_healthcheck(client,
params=params,
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
elif params['state'] == 'absent':
# Delete
changed, json_output['healthcheck'] = delete_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
else:
changed, json_output['healthcheck'] = update_healthcheck(client,
healthcheck=healthcheck,
params=params,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
goodwinnk/intellij-community | python/lib/Lib/site-packages/django/contrib/admin/views/template.py | 88 | 3255 | from django import template, forms
from django.contrib.admin.views.decorators import staff_member_required
from django.template import loader
from django.shortcuts import render_to_response
from django.contrib.sites.models import Site
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
def template_validator(request):
"""
Displays the template validator form, which finds and displays template
syntax errors.
"""
# get a dict of {site_id : settings_module} for the validator
settings_modules = {}
for mod in settings.ADMIN_FOR:
settings_module = import_module(mod)
settings_modules[settings_module.SITE_ID] = settings_module
site_list = Site.objects.in_bulk(settings_modules.keys()).values()
if request.POST:
form = TemplateValidatorForm(settings_modules, site_list,
data=request.POST)
if form.is_valid():
messages.info(request, 'The template is valid.')
else:
form = TemplateValidatorForm(settings_modules, site_list)
return render_to_response('admin/template_validator.html', {
'title': 'Template validator',
'form': form,
}, context_instance=template.RequestContext(request))
template_validator = staff_member_required(template_validator)
class TemplateValidatorForm(forms.Form):
site = forms.ChoiceField(_('site'))
template = forms.CharField(
_('template'), widget=forms.Textarea({'rows': 25, 'cols': 80}))
def __init__(self, settings_modules, site_list, *args, **kwargs):
self.settings_modules = settings_modules
super(TemplateValidatorForm, self).__init__(*args, **kwargs)
self.fields['site'].choices = [(s.id, s.name) for s in site_list]
def clean_template(self):
# Get the settings module. If the site isn't set, we don't raise an
# error since the site field will.
try:
site_id = int(self.cleaned_data.get('site', None))
except (ValueError, TypeError):
return
settings_module = self.settings_modules.get(site_id, None)
if settings_module is None:
return
# So that inheritance works in the site's context, register a new
# function for "extends" that uses the site's TEMPLATE_DIRS instead.
def new_do_extends(parser, token):
node = loader.do_extends(parser, token)
node.template_dirs = settings_module.TEMPLATE_DIRS
return node
register = template.Library()
register.tag('extends', new_do_extends)
template.builtins.append(register)
# Now validate the template using the new TEMPLATE_DIRS, making sure to
# reset the extends function in any case.
error = None
template_string = self.cleaned_data['template']
try:
tmpl = loader.get_template_from_string(template_string)
tmpl.render(template.Context({}))
except template.TemplateSyntaxError, e:
error = e
template.builtins.remove(register)
if error:
raise forms.ValidationError(e.args)
| apache-2.0 |
percipient/django-two-factor-auth | two_factor/forms.py | 4 | 5418 | from binascii import unhexlify
from time import time
from django import forms
from django.forms import ModelForm, Form
from django.utils.translation import ugettext_lazy as _
from django_otp.forms import OTPAuthenticationFormMixin
from django_otp.oath import totp
from django_otp.plugins.otp_totp.models import TOTPDevice
from two_factor.utils import totp_digits
try:
from otp_yubikey.models import RemoteYubikeyDevice, YubikeyDevice
except ImportError:
RemoteYubikeyDevice = YubikeyDevice = None
from .models import (PhoneDevice, get_available_phone_methods,
get_available_methods)
class MethodForm(forms.Form):
method = forms.ChoiceField(label=_("Method"),
initial='generator',
widget=forms.RadioSelect)
def __init__(self, **kwargs):
super(MethodForm, self).__init__(**kwargs)
self.fields['method'].choices = get_available_methods()
class PhoneNumberMethodForm(ModelForm):
method = forms.ChoiceField(widget=forms.RadioSelect, label=_('Method'))
class Meta:
model = PhoneDevice
fields = 'number', 'method',
def __init__(self, **kwargs):
super(PhoneNumberMethodForm, self).__init__(**kwargs)
self.fields['method'].choices = get_available_phone_methods()
class PhoneNumberForm(ModelForm):
class Meta:
model = PhoneDevice
fields = 'number',
class DeviceValidationForm(forms.Form):
token = forms.IntegerField(label=_("Token"), min_value=1, max_value=int('9' * totp_digits()))
error_messages = {
'invalid_token': _('Entered token is not valid.'),
}
def __init__(self, device, **args):
super(DeviceValidationForm, self).__init__(**args)
self.device = device
def clean_token(self):
token = self.cleaned_data['token']
if not self.device.verify_token(token):
raise forms.ValidationError(self.error_messages['invalid_token'])
return token
class YubiKeyDeviceForm(DeviceValidationForm):
token = forms.CharField(label=_("YubiKey"))
error_messages = {
'invalid_token': _("The YubiKey could not be verified."),
}
def clean_token(self):
self.device.public_id = self.cleaned_data['token'][:-32]
return super(YubiKeyDeviceForm, self).clean_token()
class TOTPDeviceForm(forms.Form):
token = forms.IntegerField(label=_("Token"), min_value=0, max_value=int('9' * totp_digits()))
error_messages = {
'invalid_token': _('Entered token is not valid.'),
}
def __init__(self, key, user, metadata=None, **kwargs):
super(TOTPDeviceForm, self).__init__(**kwargs)
self.key = key
self.tolerance = 1
self.t0 = 0
self.step = 30
self.drift = 0
self.digits = totp_digits()
self.user = user
self.metadata = metadata or {}
@property
def bin_key(self):
"""
The secret key as a binary string.
"""
return unhexlify(self.key.encode())
def clean_token(self):
token = self.cleaned_data.get('token')
validated = False
t0s = [self.t0]
key = self.bin_key
if 'valid_t0' in self.metadata:
t0s.append(int(time()) - self.metadata['valid_t0'])
for t0 in t0s:
for offset in range(-self.tolerance, self.tolerance):
if totp(key, self.step, t0, self.digits, self.drift + offset) == token:
self.drift = offset
self.metadata['valid_t0'] = int(time()) - t0
validated = True
if not validated:
raise forms.ValidationError(self.error_messages['invalid_token'])
return token
def save(self):
return TOTPDevice.objects.create(user=self.user, key=self.key,
tolerance=self.tolerance, t0=self.t0,
step=self.step, drift=self.drift,
digits=self.digits,
name='default')
class DisableForm(forms.Form):
understand = forms.BooleanField(label=_("Yes, I am sure"))
class AuthenticationTokenForm(OTPAuthenticationFormMixin, Form):
otp_token = forms.IntegerField(label=_("Token"), min_value=1,
max_value=int('9' * totp_digits()))
def __init__(self, user, initial_device, **kwargs):
"""
`initial_device` is either the user's default device, or the backup
device when the user chooses to enter a backup token. The token will
be verified against all devices, it is not limited to the given
device.
"""
super(AuthenticationTokenForm, self).__init__(**kwargs)
self.user = user
# YubiKey generates a OTP of 44 characters (not digits). So if the
# user's primary device is a YubiKey, replace the otp_token
# IntegerField with a CharField.
if RemoteYubikeyDevice and YubikeyDevice and \
isinstance(initial_device, (RemoteYubikeyDevice, YubikeyDevice)):
self.fields['otp_token'] = forms.CharField(label=_('YubiKey'))
def clean(self):
self.clean_otp(self.user)
return self.cleaned_data
class BackupTokenForm(AuthenticationTokenForm):
otp_token = forms.CharField(label=_("Token"))
| mit |
ponnam/ESAT-131226 | plugins/ti.alloy/plugin.py | 1729 | 5251 | import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.