repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
molobrakos/home-assistant | homeassistant/components/zwave/fan.py | 9 | 2651 | """Support for Z-Wave fans."""
import logging
import math
from homeassistant.core import callback
from homeassistant.components.fan import (
DOMAIN, FanEntity, SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH,
SUPPORT_SET_SPEED)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ZWaveDeviceEntity
_LOGGER = logging.getLogger(__name__)
SPEED_LIST = [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
SUPPORTED_FEATURES = SUPPORT_SET_SPEED
# Value will first be divided to an integer
VALUE_TO_SPEED = {
0: SPEED_OFF,
1: SPEED_LOW,
2: SPEED_MEDIUM,
3: SPEED_HIGH,
}
SPEED_TO_VALUE = {
SPEED_OFF: 0,
SPEED_LOW: 1,
SPEED_MEDIUM: 50,
SPEED_HIGH: 99,
}
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Old method of setting up Z-Wave fans."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Fan from Config Entry."""
@callback
def async_add_fan(fan):
"""Add Z-Wave Fan."""
async_add_entities([fan])
async_dispatcher_connect(hass, 'zwave_new_fan', async_add_fan)
def get_device(values, **kwargs):
"""Create Z-Wave entity device."""
return ZwaveFan(values)
class ZwaveFan(ZWaveDeviceEntity, FanEntity):
"""Representation of a Z-Wave fan."""
def __init__(self, values):
"""Initialize the Z-Wave fan device."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self.update_properties()
def update_properties(self):
"""Handle data changes for node values."""
value = math.ceil(self.values.primary.data * 3 / 100)
self._state = VALUE_TO_SPEED[value]
def set_speed(self, speed):
"""Set the speed of the fan."""
self.node.set_dimmer(
self.values.primary.value_id, SPEED_TO_VALUE[speed])
def turn_on(self, speed=None, **kwargs):
"""Turn the device on."""
if speed is None:
# Value 255 tells device to return to previous value
self.node.set_dimmer(self.values.primary.value_id, 255)
else:
self.set_speed(speed)
def turn_off(self, **kwargs):
"""Turn the device off."""
self.node.set_dimmer(self.values.primary.value_id, 0)
@property
def speed(self):
"""Return the current speed."""
return self._state
@property
def speed_list(self):
"""Get the list of available speeds."""
return SPEED_LIST
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_FEATURES
| apache-2.0 |
jackkiej/SickRage | lib/tornado/test/import_test.py | 15 | 1547 | # flake8: noqa
from __future__ import absolute_import, division, print_function, with_statement
from tornado.test.util import unittest
class ImportTest(unittest.TestCase):
def test_import_everything(self):
# Some of our modules are not otherwise tested. Import them
# all (unless they have external dependencies) here to at
# least ensure that there are no syntax errors.
import tornado.auth
import tornado.autoreload
import tornado.concurrent
# import tornado.curl_httpclient # depends on pycurl
import tornado.escape
import tornado.gen
import tornado.http1connection
import tornado.httpclient
import tornado.httpserver
import tornado.httputil
import tornado.ioloop
import tornado.iostream
import tornado.locale
import tornado.log
import tornado.netutil
import tornado.options
import tornado.process
import tornado.simple_httpclient
import tornado.stack_context
import tornado.tcpserver
import tornado.template
import tornado.testing
import tornado.util
import tornado.web
import tornado.websocket
import tornado.wsgi
# for modules with dependencies, if those dependencies can be loaded,
# load them too.
def test_import_pycurl(self):
try:
import pycurl # type: ignore
except ImportError:
pass
else:
import tornado.curl_httpclient
| gpl-3.0 |
acorg/dark-matter | bin/make-consensus.py | 1 | 10130 | #!/usr/bin/env python
import os
import sys
import argparse
from tempfile import mkdtemp
from os.path import join, basename
from dark.fasta import FastaReads
from dark.process import Executor
IVAR_FREQUENCY_THRESHOLD_DEFAULT = 0.6
IVAR_DOCS = (
'https://andersen-lab.github.io/ivar/html/manualpage.html#autotoc_md19')
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Make a consensus sequence.')
parser.add_argument(
'--reference', required=True,
help='The reference FASTA file.')
parser.add_argument(
'--bam',
help=('The BAM file from which the consensus should be made. '
'Required if --maskLowCoverage is used. If no BAM file is '
'given, a VCF file must be provided. If both a BAM and a VCF '
'file are given, the VCF file will take precedence.'))
parser.add_argument(
'--vcfFile',
help=('The VCF file. If omitted, bcftools will be used to make a VCF '
'file from the BAM file.'))
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--id',
help=('The id to use in the consensus sequence in the output FASTA. '
'If not given, the reference sequence id will be used.'))
group.add_argument(
'--idLambda', metavar='LAMBDA-FUNCTION',
help=('A one-argument function taking and returning a read id. '
'This can be used to set the id of the reference sequence based '
'on the id of the reference sequence (the function will be '
'called with the id of the reference sequence). E.g., '
'--idLambda "lambda id: id.split(\'_\')[0]" or '
'--idLambda "lambda id: id[:10] + \'-consensus\'".'))
parser.add_argument(
'--sample',
help=('The name of the sample (from the @RG SM tag in the original '
'alignment BAM file) for which a consensus should be made. '
'If not given, the first sample name (from the #CHROM header) '
'in the VCF file will be used.'))
parser.add_argument(
'--dryRun', default=False, action='store_true',
help='Do not run commands, just print what would be done.')
parser.add_argument(
'--maskLowCoverage', default=0, type=int,
help=('Put an N into sites where the coverage is below the specified '
'cutoff. If you specify a negative numer, masking will be '
'turned off. Requires --bam.'))
parser.add_argument(
'--log', default=False, action='store_true',
help=('Show a log of commands that were (or would be, if --dryRun is '
'used) executed.'))
parser.add_argument(
'--noClean', default=True, action='store_false', dest='clean',
help=('Do not remove intermediate files or the temporary directory.'))
parser.add_argument(
'--callHaplotypesGATK', default=False, action='store_true',
help=('Use GATK to call haplotypes. See '
'https://gatk.broadinstitute.org for details on GATK.'))
parser.add_argument(
'--picardJar',
help=('The path to the Picard jar file. See '
'https://github.com/broadinstitute/picard for details on '
'Picard.'))
parser.add_argument(
'--ivar', default=False, action='store_true',
help='If given, ivar will be used to call the consensus.')
parser.add_argument(
'--ivarFrequencyThreshold', type=float,
help=(f'The frequency threshold used by ivar when calling the '
f'consensus. If the frequency of the most-common nucleotide at '
f'a site meets this threshold, the nucleotide will be called. '
f'Otherwise, an ambiguous nucleotide code will be produced, '
f'based on the smallest set of most-frequent nucleotides whose '
f'summed frequencies meet the threshold. See {IVAR_DOCS} for '
f'more information. If not given, '
f'{IVAR_FREQUENCY_THRESHOLD_DEFAULT} is used. Can only be used '
f'if --ivar is also specified.'))
parser.add_argument(
'--ivarBedFile',
help=('If ivar should trim primers, a BED file of the primer '
'positions.'))
args = parser.parse_args()
if not (args.bam or args.vcfFile):
print('At least one of --bam or --vcfFile must be given.',
file=sys.stderr)
sys.exit(1)
if args.maskLowCoverage and not args.bam:
print('If --maskLowCoverage is used, --bam must be too.',
file=sys.stderr)
sys.exit(1)
if args.ivar and not args.bam:
print('If --ivar is used, --bam must be too.', file=sys.stderr)
sys.exit(1)
if args.ivarFrequencyThreshold is not None and not args.ivar:
print('If --ivarFrequencyThreshold is used, --ivar must be too.',
file=sys.stderr)
sys.exit(1)
if args.ivar and args.ivarFrequencyThreshold is None:
args.ivarFrequencyThreshold = IVAR_FREQUENCY_THRESHOLD_DEFAULT
e = Executor(args.dryRun)
tempdir = mkdtemp(prefix='consensus-')
if args.vcfFile:
vcfFile = args.vcfFile
else:
# No VCF file provided, so make one.
vcfFile = join(tempdir, 'vcf.gz')
if args.callHaplotypesGATK:
e.execute("samtools index '%s'" % args.bam)
if args.picardJar:
picardJar = args.picardJar
else:
try:
picardJar = os.environ['PICARD_JAR']
except KeyError:
print('If you use --callHaplotypesGATK, you must give a '
'Picard JAR file with --picardJar or else set '
'PICARD_JAR in your environment.', file=sys.stderr)
sys.exit(1)
indexFile = args.reference + '.fai'
if os.path.exists(indexFile):
removeIndex = False
else:
removeIndex = True
e.execute("samtools faidx '%s'" % args.reference)
if args.reference.lower().endswith('.fasta'):
dictFile = args.reference[:-len('.fasta')] + '.dict'
else:
dictFile = args.reference + '.dict'
if os.path.exists(dictFile):
removeDict = False
else:
removeDict = True
e.execute(
"java -jar '%s' CreateSequenceDictionary R='%s' O='%s'"
% (picardJar, args.reference, dictFile))
e.execute(
'gatk --java-options -Xmx4g HaplotypeCaller '
"--reference '%s' "
"--input '%s' "
"--output '%s' "
"--sample-ploidy 1 "
'-ERC GVCF' %
(args.reference, args.bam, vcfFile))
if removeIndex:
e.execute("rm '%s'" % indexFile)
if removeDict:
e.execute("rm '%s'" % dictFile)
else:
e.execute("bcftools mpileup --max-depth 5000 -Ou -f '%s' '%s' | "
"bcftools call --ploidy 1 -mv -Oz -o '%s'" %
(args.reference, args.bam, vcfFile))
e.execute("bcftools index '%s'" % vcfFile)
if args.maskLowCoverage >= 0:
# Make a BED file.
bedFile = join(tempdir, 'mask.bed')
# The doubled-% below are so that Python doesn't try to fill in the
# values and instead just generates a single % that awk sees.
e.execute(
"samtools depth -a '%s' | "
"awk '$3 < %d {printf \"%%s\\t%%d\\t%%d\\n\", "
"$1, $2 - 1, $2}' > '%s'" %
(args.bam, args.maskLowCoverage, bedFile))
maskArg = '--mask ' + bedFile
else:
maskArg = ''
if args.sample:
sample = args.sample
else:
result = e.execute(
"gunzip -c '%s' | egrep -m 1 '^#CHROM' | cut -f10" % vcfFile)
sample = result.stdout.strip()
consensusFile = join(tempdir, 'consensus.fasta')
if args.ivar:
if args.ivarBedFile:
tempBamFile = join(tempdir, basename(args.bam) + '-trimmed')
result = e.execute(
"ivar trim -i %r -b %r -p %r -e" % (
args.bam, args.ivarBedFile, tempBamFile))
ivarTempBamFile = tempBamFile + '.bam'
sortedIvarTempBamFile = tempBamFile + '-trimmed-sorted.bam'
result = e.execute(
"samtools sort %r -o %r" % (
ivarTempBamFile, sortedIvarTempBamFile))
bamFile = sortedIvarTempBamFile
else:
bamFile = args.bam
ivarConsensusFile = join(tempdir, 'temporary-consensus')
result = e.execute(
"samtools mpileup -A -Q 0 %r | "
"ivar consensus -p %r -q 20 -t %r -m %r" % (
bamFile, ivarConsensusFile, args.ivarFrequencyThreshold,
args.maskLowCoverage))
result = e.execute(
"mv %s %s" % (ivarConsensusFile + '.fa', consensusFile))
else:
result = e.execute(
"bcftools consensus --sample '%s' --iupac-codes %s --fasta-ref "
"'%s' '%s' > '%s'" %
(sample, maskArg, args.reference, vcfFile, consensusFile))
consensus = list(FastaReads(consensusFile))[0]
if args.id is not None:
consensus.id = args.id
elif args.idLambda is not None:
idLambda = eval(args.idLambda)
consensus.id = idLambda(consensus.id)
print(consensus.toString('fasta'), end='')
if result.stderr:
print(result.stderr, end='', file=sys.stderr)
if args.dryRun or args.log:
print('\n'.join(e.log), file=sys.stderr)
if tempdir:
if args.clean:
e.execute("rm -r '%s'" % tempdir)
else:
print('Temporary directory %r.' % tempdir, file=sys.stderr)
if __name__ == '__main__':
main()
| mit |
paramite/glance | glance/artifacts/domain/__init__.py | 9 | 2753 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_utils import timeutils
from glance import i18n
_ = i18n._
class Artifact(object):
def __init__(self, id, name, version, type_name, type_version,
visibility, state, owner, created_at=None,
updated_at=None, **kwargs):
self.id = id
self.name = name
self.type_name = type_name
self.version = version
self.type_version = type_version
self.visibility = visibility
self.state = state
self.owner = owner
self.created_at = created_at
self.updated_at = updated_at
self.description = kwargs.pop('description', None)
self.blobs = kwargs.pop('blobs', {})
self.properties = kwargs.pop('properties', {})
self.dependencies = kwargs.pop('dependencies', {})
self.tags = kwargs.pop('tags', [])
if kwargs:
message = _("__init__() got unexpected keyword argument '%s'")
raise TypeError(message % kwargs.keys()[0])
class ArtifactFactory(object):
def __init__(self, context, klass):
self.klass = klass
self.context = context
def new_artifact(self, name, version, **kwargs):
id = kwargs.pop('id', str(uuid.uuid4()))
tags = kwargs.pop('tags', [])
# pop reserved fields from kwargs dict
for param in ['owner', 'created_at', 'updated_at',
'deleted_at', 'visibility', 'state']:
kwargs.pop(param, '')
curr_timestamp = timeutils.utcnow()
base = self.klass(id=id,
name=name,
version=version,
visibility='private',
state='creating',
# XXX FIXME remove after using authentification
# paste-flavor
# (no or '' as owner will always be there)
owner=self.context.owner or '',
created_at=curr_timestamp,
updated_at=curr_timestamp,
tags=tags,
**kwargs)
return base
| apache-2.0 |
sekikn/incubator-airflow | airflow/migrations/versions/b0125267960b_merge_heads.py | 8 | 1232 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Merge the four heads back together
Revision ID: 08364691d074
Revises: a56c9515abdc, 004c1210f153, 74effc47d867, b3b105409875
Create Date: 2019-11-19 22:05:11.752222
"""
# revision identifiers, used by Alembic.
revision = '08364691d074'
down_revision = ('a56c9515abdc', '004c1210f153', '74effc47d867', 'b3b105409875')
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
pass
def downgrade(): # noqa: D103
pass
| apache-2.0 |
RoboticsURJC/JdeRobot | src/libs/comm_py/comm/ice/ptMotorsIceClient.py | 5 | 3897 | # -*- coding: utf-8 -*-
#
# Copyright (C) 1997-2016 JDE Developers Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
# Authors :
# Aitor Martinez Fernandez <[email protected]>
#
import time
import traceback
import jderobot
import threading
import Ice
from .threadSensor import ThreadSensor
class PTMotors:
def __init__(self, jdrc, prefix):
self.lock = threading.Lock()
self.data=jderobot.PTMotorsData()
self.params=jderobot.PTMotorsParams()
ic = jdrc.getIc()
try:
proxyStr = jdrc.getConfig().getProperty(prefix+".Proxy")
base = ic.stringToProxy(proxyStr)
self.proxy = jderobot.PTMotorsPrx.checkedCast(base)
if not self.proxy:
print ('Interface ' + prefix + ' not configured')
else:
self.params = self.proxy.getPTMotorsParams()
print ("+++ MAX/MIN Pan/Tilt Values +++")
print ("+ Min Pan: " + str(self.params.minPan) + " +")
print ("+ Max Pan: " + str(self.params.maxPan) + " +")
print ("+ Max Pan speed: " + str(self.params.maxPanSpeed) + " +")
print ("+ Min Tilt: " + str(self.params.minTilt) + " +")
print ("+ Max Tilt: " + str(self.params.maxTilt) + " +")
print ("+ Max Tilt speed: " + str(self.params.maxTiltSpeed) + " +")
print ("++++++++++++++++++++++++++++++")
except Ice.ConnectionRefusedException:
print(prefix + ': connection refused')
except:
traceback.print_exc()
exit(-1)
def getLimits(self):
self.lock.acquire()
params = self.params
self.lock.release()
return params
def setPTMotorsData(self, pan, tilt, panspeed, tiltspeed):
self.lock.acquire()
self.data.pan = pan
self.data.tilt = tilt
self.data.panSpeed = panspeed
self.data.tiltSpeed = tiltspeed
self.lock.release()
def sendPTMotorsData(self):
if self.hasproxy():
self.lock.acquire()
self.data.timeStamp = time.time()
self.proxy.setPTMotorsData(self.data)
self.lock.release()
def hasproxy (self):
return hasattr(self,"proxy") and self.proxy
def update(self):
self.sendPTMotorsData()
class PTMotorsIceClient:
def __init__(self,ic,prefix, start = False):
self.motors = PTMotors(ic,prefix)
self.kill_event = threading.Event()
self.thread = ThreadSensor(self.motors, self.kill_event)
self.thread.daemon = True
if start:
self.start()
# if client is stopped you can not start again, Threading.Thread raised error
def start(self):
self.kill_event.clear()
self.thread.start()
# if client is stopped you can not start again
def stop(self):
self.kill_event.set()
def getLimits(self):
return self.motors.getLimits()
def hasproxy():
return self.motors.hasproxy()
def setPTMotorsData(self, pan, tilt, panspeed, tiltspeed):
self.motors.setPTMotorsData(pan, tilt, panspeed, tiltspeed)
| gpl-3.0 |
Kilhog/odoo | addons/edi/__openerp__.py | 312 | 1911 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Electronic Data Interchange (EDI)',
'version': '1.0',
'category': 'Tools',
'description': """
Provides a common EDI platform that other Applications can use.
===============================================================
OpenERP specifies a generic EDI format for exchanging business documents between
different systems, and provides generic mechanisms to import and export them.
More details about OpenERP's EDI format may be found in the technical OpenERP
documentation at http://doc.openerp.com.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/api',
'depends': ['base', 'email_template'],
'data' : [
'views/edi.xml',
],
'icon': '/edi/static/src/img/knowledge.png',
'test': ['test/edi_partner_test.yml'],
'qweb': ['static/src/xml/*.xml'],
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
karolhrdina/malamute | bindings/python/test_malamute.py | 11 | 1127 | from malamute import MalamuteClient
def test(addr):
service = MalamuteClient()
service.connect(addr, 100, b'service')
service.set_worker(b'service', b'derps')
writer = MalamuteClient()
print("writer.connect")
writer.connect(addr, 100, b'writer')
print("writer.set_producer")
writer.set_producer(b'writer')
reader = MalamuteClient()
print("reader.connect")
reader.connect(addr, 100, b'reader')
print("reader.set_consumer")
reader.set_consumer(b'writer', b'foo')
reader.set_consumer(b'writer', b'bar')
print("writer.send")
writer.send(b'foo', [b'whoaaa', b'whaaaaaa'])
writer.send(b'bar', [b'whoaaa', b'whaaaaaa'])
print(reader.recv())
print(reader.recv())
reader.sendfor(b'service', b'derps', None, 100, [b'foooooo'])
reader.sendfor(b'service', b'derps', None, 100, [b'foooooo'])
reader.sendfor(b'service', b'derps', None, 100, [b'foooooo'])
print(service.recv())
print(service.recv())
print(service.recv())
service.sendto(b'reader', b'response', None, 100, [b'ok'])
print(reader.recv())
if __name__ == '__main__':
# this depends on having a test server running
test(b'tcp://localhost:9999')
| mpl-2.0 |
oasiswork/odoo | addons/base_report_designer/wizard/base_report_designer_modify.py | 314 | 6128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import time
import urllib
from openerp import osv, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class base_report_sxw(osv.osv_memory):
"""Base Report sxw """
_name = 'base.report.sxw'
_columns = {
'report_id': fields.many2one('ir.actions.report.xml', "Report", required=True,domain=[('report_sxw_content','<>',False)],),
}
def get_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
data_obj = self.pool['ir.model.data']
id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_sxw')
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.report.file.sxw',
'views': [(id2, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
class base_report_file_sxw(osv.osv_memory):
"""Base Report File sxw """
_name = 'base.report.file.sxw'
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(base_report_file_sxw, self).default_get(cr, uid, fields, context=context)
report_id1 = self.pool['base.report.sxw'].search(cr,uid,[])
data = self.pool['base.report.sxw'].read(cr, uid, report_id1, context=context)[0]
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if context is None:
context={}
if 'report_id' in fields:
res['report_id'] = data['report_id']
res['file_sxw'] = base64.encodestring(report.report_sxw_content)
return res
_columns = {
'report_id': fields.many2one('ir.actions.report.xml', "Report", readonly=True),
'file_sxw':fields.binary('Your .SXW file',readonly=True),
'file_sxw_upload':fields.binary('Your .SXW file',required=True)
}
def upload_report(self, cr, uid, ids, context=None):
from base_report_designer import openerp_sxw2rml
import StringIO
data=self.read(cr,uid,ids)[0]
sxwval = StringIO.StringIO(base64.decodestring(data['file_sxw_upload']))
fp = tools.file_open('normalized_oo2rml.xsl',subdir='addons/base_report_designer/openerp_sxw2rml')
newrmlcontent = str(openerp_sxw2rml.sxw2rml(sxwval, xsl=fp.read()))
report = self.pool['ir.actions.report.xml'].write(cr, uid, [data['report_id']], {
'report_sxw_content': base64.decodestring(data['file_sxw_upload']),
'report_rml_content': newrmlcontent
})
cr.commit()
data_obj = self.pool['ir.model.data']
id2 = data_obj._get_id(cr, uid, 'base_report_designer', 'view_base_report_file_rml')
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.report.rml.save',
'views': [(id2, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
class base_report_rml_save(osv.osv_memory):
"""Base Report file Save"""
_name = 'base.report.rml.save'
def default_get(self, cr, uid, fields, context=None):
"""
To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(base_report_rml_save, self).default_get(cr, uid, fields, context=context)
report_ids = self.pool['base.report.sxw'].search(cr,uid,[], context=context)
data = self.pool['base.report.file.sxw'].read(cr, uid, report_ids, context=context)[0]
report = self.pool['ir.actions.report.xml'].browse(cr, uid, data['report_id'], context=context)
if 'file_rml' in fields:
res['file_rml'] = base64.encodestring(report.report_rml_content)
return res
_columns = {
'file_rml':fields.binary('Save As'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
brijeshkesariya/odoo | addons/hr_holidays/wizard/hr_holidays_summary_employees.py | 337 | 2152 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_holidays_summary_employee(osv.osv_memory):
_name = 'hr.holidays.summary.employee'
_description = 'HR Leaves Summary Report By Employee'
_columns = {
'date_from': fields.date('From', required=True),
'emp': fields.many2many('hr.employee', 'summary_emp_rel', 'sum_id', 'emp_id', 'Employee(s)'),
'holiday_type': fields.selection([('Approved','Approved'),('Confirmed','Confirmed'),('both','Both Approved and Confirmed')], 'Select Leave Type', required=True)
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'holiday_type': 'Approved',
}
def print_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
data['emp'] = context['active_ids']
datas = {
'ids': [],
'model': 'hr.employee',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'holidays.summary',
'datas': datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nvoron23/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Tools/scripts/linktree.py | 101 | 2425 | #! /usr/bin/env python
# linktree
#
# Make a copy of a directory tree with symbolic links to all files in the
# original tree.
# All symbolic links go to a special symbolic link at the top, so you
# can easily fix things if the original source tree moves.
# See also "mkreal".
#
# usage: mklinks oldtree newtree
import sys, os
LINK = '.LINK' # Name of special symlink at the top.
debug = 0
def main():
if not 3 <= len(sys.argv) <= 4:
print 'usage:', sys.argv[0], 'oldtree newtree [linkto]'
return 2
oldtree, newtree = sys.argv[1], sys.argv[2]
if len(sys.argv) > 3:
link = sys.argv[3]
link_may_fail = 1
else:
link = LINK
link_may_fail = 0
if not os.path.isdir(oldtree):
print oldtree + ': not a directory'
return 1
try:
os.mkdir(newtree, 0777)
except os.error, msg:
print newtree + ': cannot mkdir:', msg
return 1
linkname = os.path.join(newtree, link)
try:
os.symlink(os.path.join(os.pardir, oldtree), linkname)
except os.error, msg:
if not link_may_fail:
print linkname + ': cannot symlink:', msg
return 1
else:
print linkname + ': warning: cannot symlink:', msg
linknames(oldtree, newtree, link)
return 0
def linknames(old, new, link):
if debug: print 'linknames', (old, new, link)
try:
names = os.listdir(old)
except os.error, msg:
print old + ': warning: cannot listdir:', msg
return
for name in names:
if name not in (os.curdir, os.pardir):
oldname = os.path.join(old, name)
linkname = os.path.join(link, name)
newname = os.path.join(new, name)
if debug > 1: print oldname, newname, linkname
if os.path.isdir(oldname) and \
not os.path.islink(oldname):
try:
os.mkdir(newname, 0777)
ok = 1
except:
print newname + \
': warning: cannot mkdir:', msg
ok = 0
if ok:
linkname = os.path.join(os.pardir,
linkname)
linknames(oldname, newname, linkname)
else:
os.symlink(linkname, newname)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
amiguez/youtube-dl | youtube_dl/extractor/kanalplay.py | 113 | 3283 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
srt_subtitles_timecode,
)
class KanalPlayIE(InfoExtractor):
IE_DESC = 'Kanal 5/9/11 Play'
_VALID_URL = r'https?://(?:www\.)?kanal(?P<channel_id>5|9|11)play\.se/(?:#!/)?(?:play/)?program/\d+/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.kanal5play.se/#!/play/program/3060212363/video/3270012277',
'info_dict': {
'id': '3270012277',
'ext': 'flv',
'title': 'Saknar både dusch och avlopp',
'description': 'md5:6023a95832a06059832ae93bc3c7efb7',
'duration': 2636.36,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.kanal9play.se/#!/play/program/335032/video/246042',
'only_matching': True,
}, {
'url': 'http://www.kanal11play.se/#!/play/program/232835958/video/367135199',
'only_matching': True,
}]
def _fix_subtitles(self, subs):
return '\r\n\r\n'.join(
'%s\r\n%s --> %s\r\n%s'
% (
num,
srt_subtitles_timecode(item['startMillis'] / 1000.0),
srt_subtitles_timecode(item['endMillis'] / 1000.0),
item['text'],
) for num, item in enumerate(subs, 1))
def _get_subtitles(self, channel_id, video_id):
subs = self._download_json(
'http://www.kanal%splay.se/api/subtitles/%s' % (channel_id, video_id),
video_id, 'Downloading subtitles JSON', fatal=False)
return {'se': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]} if subs else {}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
channel_id = mobj.group('channel_id')
video = self._download_json(
'http://www.kanal%splay.se/api/getVideo?format=FLASH&videoId=%s' % (channel_id, video_id),
video_id)
reasons_for_no_streams = video.get('reasonsForNoStreams')
if reasons_for_no_streams:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, '\n'.join(reasons_for_no_streams)),
expected=True)
title = video['title']
description = video.get('description')
duration = float_or_none(video.get('length'), 1000)
thumbnail = video.get('posterUrl')
stream_base_url = video['streamBaseUrl']
formats = [{
'url': stream_base_url,
'play_path': stream['source'],
'ext': 'flv',
'tbr': float_or_none(stream.get('bitrate'), 1000),
'rtmp_real_time': True,
} for stream in video['streams']]
self._sort_formats(formats)
subtitles = {}
if video.get('hasSubtitle'):
subtitles = self.extract_subtitles(channel_id, video_id)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
shuggiefisher/django-on-google-app-engine-base | django/contrib/sitemaps/views.py | 232 | 2084 | from django.http import HttpResponse, Http404
from django.template import loader
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.utils.encoding import smart_str
from django.core.paginator import EmptyPage, PageNotAnInteger
def index(request, sitemaps, template_name='sitemap_index.xml'):
current_site = get_current_site(request)
sites = []
protocol = request.is_secure() and 'https' or 'http'
for section, site in sitemaps.items():
site.request = request
if callable(site):
pages = site().paginator.num_pages
else:
pages = site.paginator.num_pages
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap', kwargs={'section': section})
sites.append('%s://%s%s' % (protocol, current_site.domain, sitemap_url))
if pages > 1:
for page in range(2, pages+1):
sites.append('%s://%s%s?p=%s' % (protocol, current_site.domain, sitemap_url, page))
xml = loader.render_to_string(template_name, {'sitemaps': sites})
return HttpResponse(xml, mimetype='application/xml')
def sitemap(request, sitemaps, section=None, template_name='sitemap.xml'):
maps, urls = [], []
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps.append(sitemaps[section])
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
current_site = get_current_site(request)
for site in maps:
try:
if callable(site):
urls.extend(site().get_urls(page=page, site=current_site))
else:
urls.extend(site.get_urls(page=page, site=current_site))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
xml = smart_str(loader.render_to_string(template_name, {'urlset': urls}))
return HttpResponse(xml, mimetype='application/xml')
| bsd-3-clause |
jazcollins/models | object_detection/utils/label_map_util.py | 2 | 4740 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
import logging
import tensorflow as tf
from google.protobuf import text_format
from object_detection.protos import string_int_label_map_pb2
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 1:
raise ValueError('Label map ids should be >= 1.')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Loads label map proto and returns categories list compatible with eval.
This function loads a label map and returns a list of dicts, each of which
has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False of if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info('Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path: path to label_map.
Returns:
A dictionary mapping label names to id.
"""
label_map = load_labelmap(label_map_path)
label_map_dict = {}
for item in label_map.item:
label_map_dict[item.name] = item.id
return label_map_dict
| apache-2.0 |
mavenlin/tensorflow | tensorflow/contrib/bayesflow/python/ops/monte_carlo.py | 102 | 1304 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monte Carlo integration and helpers.
See the @{$python/contrib.bayesflow.monte_carlo} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.bayesflow.python.ops.monte_carlo_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'expectation',
'expectation_importance_sampler',
'expectation_importance_sampler_logspace',
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
kailash-cd/CustomAdCreator | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| gpl-3.0 |
laura-dietz/taia-stream-eval | code/utils.py | 1 | 1464 | epochsPerWeek = int(6.048E5)
epochsPerDay = 86400
epochsPerInterval = epochsPerWeek
# evalTR = 1325376000 # this is the old eval time range, Jan 1st
evalTR = 1326334731 # this is the new eval time range, J1
evalTRend = 1338508800
def epochsToDate(d):
return (d - evalTR) / epochsPerDay
weekStarts = range(evalTR, evalTRend, epochsPerWeek)
dayStarts = range(evalTR, evalTRend, epochsPerDay)
allStarts = [evalTR]
def intervalRange(epochsPerInterval):
starts = xrange(evalTR, evalTRend, epochsPerInterval)
intervalList = [(start, start + epochsPerInterval) for start in starts]
return intervalList
def correctWeighting(values, posData, totalposvalues, numberOfIntervals):
return correctedWeightingMultiMix(values, posData, totalposvalues, numberOfIntervals)
def correctedWeightingGeoMean(values, posData, totalposvalues, numberOfIntervals):
correctedWeighting = values ** (posData/totalposvalues*numberOfIntervals)
return correctedWeighting
def correctedWeightingMultiMix(values, posData, totalposvalues, numberOfIntervals):
correctedWeighting = posData/totalposvalues*numberOfIntervals*values
return correctedWeighting
def correctedWeightingUnif(values, posData, totalposvalues, numberOfIntervals):
return values
def renameMetric(metric):
if metric=='Prec@R': return 'R-prec'
elif metric=='correctedAUC': return 'ROC-AUC'
elif metric == 'nDCG@R': return 'NDCG@R'
else: return metric | apache-2.0 |
rrampage/rethinkdb | test/rql_test/connections/http_support/flask/testsuite/reqctx.py | 557 | 5960 | # -*- coding: utf-8 -*-
"""
flask.testsuite.reqctx
~~~~~~~~~~~~~~~~~~~~~~
Tests the request context.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
try:
from greenlet import greenlet
except ImportError:
greenlet = None
from flask.testsuite import FlaskTestCase
class RequestContextTestCase(FlaskTestCase):
def test_teardown_on_pop(self):
buffer = []
app = flask.Flask(__name__)
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
self.assert_equal(buffer, [])
ctx.pop()
self.assert_equal(buffer, [None])
def test_proper_test_request_context(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return None
@app.route('/', subdomain='foo')
def sub():
return None
with app.test_request_context('/'):
self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')
with app.test_request_context('/'):
self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')
try:
with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):
pass
except Exception as e:
self.assert_true(isinstance(e, ValueError))
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:5000') does not match the " + \
"server name from the WSGI environment ('localhost')")
try:
app.config.update(SERVER_NAME='localhost')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
try:
app.config.update(SERVER_NAME='localhost:80')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
def test_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
@app.route('/meh')
def meh():
return flask.request.url
with app.test_request_context('/?name=World'):
self.assert_equal(index(), 'Hello World!')
with app.test_request_context('/meh'):
self.assert_equal(meh(), 'http://localhost/meh')
self.assert_true(flask._request_ctx_stack.top is None)
def test_context_test(self):
app = flask.Flask(__name__)
self.assert_false(flask.request)
self.assert_false(flask.has_request_context())
ctx = app.test_request_context()
ctx.push()
try:
self.assert_true(flask.request)
self.assert_true(flask.has_request_context())
finally:
ctx.pop()
def test_manual_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
ctx = app.test_request_context('/?name=World')
ctx.push()
self.assert_equal(index(), 'Hello World!')
ctx.pop()
try:
index()
except RuntimeError:
pass
else:
self.assert_true(0, 'expected runtime error')
def test_greenlet_context_copying(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
def g():
self.assert_false(flask.request)
self.assert_false(flask.current_app)
with reqctx:
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
self.assert_false(flask.request)
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
def test_greenlet_context_copying_api(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
@flask.copy_current_request_context
def g():
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
# Disable test if we don't have greenlets available
if greenlet is None:
test_greenlet_context_copying = None
test_greenlet_context_copying_api = None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RequestContextTestCase))
return suite
| agpl-3.0 |
mSenyor/sl4a | python/src/Tools/scripts/nm2def.py | 94 | 2444 | #! /usr/bin/env python
"""nm2def.py
Helpers to extract symbols from Unix libs and auto-generate
Windows definition files from them. Depends on nm(1). Tested
on Linux and Solaris only (-p option to nm is for Solaris only).
By Marc-Andre Lemburg, Aug 1998.
Additional notes: the output of nm is supposed to look like this:
acceler.o:
000001fd T PyGrammar_AddAccelerators
U PyGrammar_FindDFA
00000237 T PyGrammar_RemoveAccelerators
U _IO_stderr_
U exit
U fprintf
U free
U malloc
U printf
grammar1.o:
00000000 T PyGrammar_FindDFA
00000034 T PyGrammar_LabelRepr
U _PyParser_TokenNames
U abort
U printf
U sprintf
...
Even if this isn't the default output of your nm, there is generally an
option to produce this format (since it is the original v7 Unix format).
"""
import os, sys
PYTHONLIB = 'libpython'+sys.version[:3]+'.a'
PC_PYTHONLIB = 'Python'+sys.version[0]+sys.version[2]+'.dll'
NM = 'nm -p -g %s' # For Linux, use "nm -g %s"
def symbols(lib=PYTHONLIB,types=('T','C','D')):
lines = os.popen(NM % lib).readlines()
lines = [s.strip() for s in lines]
symbols = {}
for line in lines:
if len(line) == 0 or ':' in line:
continue
items = line.split()
if len(items) != 3:
continue
address, type, name = items
if type not in types:
continue
symbols[name] = address,type
return symbols
def export_list(symbols):
data = []
code = []
for name,(addr,type) in symbols.items():
if type in ('C','D'):
data.append('\t'+name)
else:
code.append('\t'+name)
data.sort()
data.append('')
code.sort()
return ' DATA\n'.join(data)+'\n'+'\n'.join(code)
# Definition file template
DEF_TEMPLATE = """\
EXPORTS
%s
"""
# Special symbols that have to be included even though they don't
# pass the filter
SPECIALS = (
)
def filter_Python(symbols,specials=SPECIALS):
for name in symbols.keys():
if name[:2] == 'Py' or name[:3] == '_Py':
pass
elif name not in specials:
del symbols[name]
def main():
s = symbols(PYTHONLIB)
filter_Python(s)
exports = export_list(s)
f = sys.stdout # open('PC/python_nt.def','w')
f.write(DEF_TEMPLATE % (exports))
f.close()
if __name__ == '__main__':
main()
| apache-2.0 |
lach76/scancode-toolkit | setup.py | 9 | 3246 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
long_description = '%s\n%s' % (
read('README.rst'),
re.sub(':obj:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
)
setup(
name='scancode-toolkit',
version='1.5.0',
license='Apache-2.0 with ScanCode acknowledgment and CC0-1.0 and others',
description='ScanCode is a tool to scan code for license, copyright and other interesting facts.',
long_description=long_description,
author='ScanCode',
author_email='[email protected]',
url='https://github.com/nexB/scancode-toolkit',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'License :: OSI Approved :: CC0',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
],
keywords=[
'license', 'filetype', 'urn', 'date', 'codec',
],
install_requires=[
# cluecode
'py2-ipaddress >= 2.0, <3.0',
'url >= 0.1.4',
'publicsuffix2',
# TODO: upgrade to nltk==3.0.1
'nltk >= 2.0b4, <3.0.0',
# extractcode
'patch >= 1.14.2, < 1.15 ',
# to work around bug http://bugs.python.org/issue19839
# on multistream bzip2 files
'bz2file >= 0.98',
# licensedcode
'PyYAML >= 3.0, <4.0',
# textcode
'Beautifulsoup >= 3.2.0, <4.0.0',
'Beautifulsoup4 >= 4.3.0, <5.0.0',
'html5lib',
'six',
# typecode and textcode
'pygments >= 2.0.0, <3.0.0',
'pdfminer >= 20140328',
# typecode
'chardet >= 2.1.1, <3.0.0',
'binaryornot >= 0.4.0',
# scancode and AboutCode
'click >= 4.0.0, < 5.0.0',
'jinja2 >= 2.7.0, < 3.0.0',
'MarkupSafe >= 0.23',
'colorama',
# AboutCode
'about-code-tool >= 0.9.0',
# packagedcode
'requests >= 2.7.0, < 3.0.0',
],
extras_require={
'base': [
'certifi',
'setuptools',
'wheel',
'pip',
'wincertstore',
],
'dev': [
'pytest',
'execnet',
'py',
'pytest-xdist',
'bumpversion',
],
},
entry_points={
'console_scripts': [
'scancode = scancode.cli:scancode',
'extractcode = scancode.extract_cli:extractcode',
],
},
)
| apache-2.0 |
pgmillon/ansible | test/units/module_utils/basic/test_heuristic_log_sanitize.py | 56 | 3727 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from units.compat import unittest
from ansible.module_utils.basic import heuristic_log_sanitize
class TestHeuristicLogSanitize(unittest.TestCase):
def setUp(self):
self.URL_SECRET = 'http://username:pas:[email protected]/data'
self.SSH_SECRET = 'username:pas:[email protected]/data'
self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here'))
self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {
'host%s' % i: {
'pstack': {
'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']
},
}
}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def test_did_not_hide_too_much(self):
self.assertEquals(heuristic_log_sanitize(self.clean_data), self.clean_data)
def test_hides_url_secrets(self):
url_output = heuristic_log_sanitize(self.url_data)
# Basic functionality: Successfully hid the password
self.assertNotIn('pas:word', url_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', url_output)
# In this implementation we replace the password with 8 "*" which is
# also the length of our password. The url fields should be able to
# accurately detect where the password ends so the length should be
# the same:
self.assertEqual(len(url_output), len(self.url_data))
def test_hides_ssh_secrets(self):
ssh_output = heuristic_log_sanitize(self.ssh_data)
self.assertNotIn('pas:word', ssh_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', ssh_output)
# ssh checking is harder as the heuristic is overzealous in many
# cases. Since the input will have at least one ":" present before
# the password we can tell some things about the beginning and end of
# the data, though:
self.assertTrue(ssh_output.startswith("{'"))
self.assertTrue(ssh_output.endswith("}"))
self.assertIn(":********@foo.com/data'", ssh_output)
def test_hides_parameter_secrets(self):
output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret']))
self.assertNotIn('secret', output)
| gpl-3.0 |
robhudson/kuma | vendor/packages/logilab/astng/exceptions.py | 27 | 1650 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""this module contains exceptions used in the astng library
"""
__doctype__ = "restructuredtext en"
class ASTNGError(Exception):
"""base exception class for all astng related exceptions"""
class ASTNGBuildingException(ASTNGError):
"""exception class when we are unable to build an astng representation"""
class ResolveError(ASTNGError):
"""base class of astng resolution/inference error"""
class NotFoundError(ResolveError):
"""raised when we are unable to resolve a name"""
class InferenceError(ResolveError):
"""raised when we are unable to infer a node"""
class UnresolvableName(InferenceError):
"""raised when we are unable to resolve a name"""
class NoDefault(ASTNGError):
"""raised by function's `default_value` method when an argument has
no default value
"""
| mpl-2.0 |
LiveAsynchronousVisualizedArchitecture/lava | Visualizer/nanogui/docs/exhale.py | 3 | 136915 | # This file is part of exhale: https://github.com/svenevs/exhale
#
# This file was generated on/around (date -Ru):
#
# Fri, 20 Jan 2017 21:14:23 +0000
#
# Copyright (c) 2016, Stephen McDowell
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of exhale nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from breathe.parser.index import parse as breathe_parse
import sys
import re
import os
import itertools
try:
# Python 2 StringIO
from cStringIO import StringIO
except ImportError:
# Python 3 StringIO
from io import StringIO
__all__ = ['generate', 'ExhaleRoot', 'ExhaleNode', 'exclaimError', 'qualifyKind',
'kindAsBreatheDirective', 'specificationsForKind', 'EXHALE_FILE_HEADING',
'EXHALE_SECTION_HEADING', 'EXHALE_SUBSECTION_HEADING']
__name__ = "exhale"
__docformat__ = "reStructuredText"
EXHALE_API_TOCTREE_MAX_DEPTH = 5 # DO NOT EXPOSE
'''
The value used as ``:maxdepth:`` with restructured text ``.. toctree::`` directives.
The default value is 5, as any larger will likely produce errors with a LaTeX build.
Change this value by specifying the proper value to the dictionary passed to the
`generate` function.
'''
EXHALE_API_DOXY_OUTPUT_DIR = "" # DO NOT EXPOSE
'''
The path to the doxygen xml output **directory**, relative to ``conf.py`` (or whichever
file is calling `generate`. This value **must** be set for `generate` to be able to do
anything.
'''
EXHALE_API_DOXYGEN_STRIP_FROM_PATH = None # DO NOT EXPOSE
'''
Accounts for broken STRIP_FROM_PATH handling on RTD.
'''
EXHALE_GENERATE_BREATHE_FILE_DIRECTIVES = False # DO NOT EXPOSE
'''
Currently, Exhale (I...) do not know how to extract the documentation string for a given
file being produced. If True, then the breathe directive (``doxygenfile``) will be
incorporated at the bottom of the file. This will duplicate a lot of information, but
will include the file's description at the beginning.
'''
EXHALE_FILE_HEADING = "=" * 88
''' The restructured text file heading separator (``"=" * 88``). '''
EXHALE_SECTION_HEADING = "-" * 88
''' The restructured text section heading separator (``"-" * 88``). '''
EXHALE_SUBSECTION_HEADING = "*" * 88
''' The restructured text sub-section heading separator (``"*" * 88``).'''
EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION = None # DO NOT EXPOSE
'''
User specified override of `specificationsForKind`. No safety checks are performed for
externally provided functions. Change the functionality of `specificationsForKind` by
specifiying a function in the dictionary passed to `generate`.
'''
########################################################################################
#
##
###
####
##### Primary entry point.
####
###
##
#
########################################################################################
def generate(exhaleArgs):
'''
The main entry point to exhale, which parses and generates the full API.
:Parameters:
``exhaleArgs`` (dict)
The dictionary of arguments to configure exhale with. All keys are strings,
and most values should also be strings. See below.
**Required Entries:**
**key**: ``"doxygenIndexXMLPath"`` --- value type: ``str``
The absolute or relative path to where the Doxygen index.xml is. A relative
path must be relative to the file **calling** exhale.
**key**: ``"containmentFolder"`` --- value type: ``str``
The folder the generated API will be created in. If the folder does not exist,
exhale will create the folder. The path can be absolute, or relative to the
file that is **calling** exhale. For example, ``"./generated_api"``.
**key**: ``"rootFileName"`` --- value type: ``str``
The name of the file that **you** will be linking to from your reStructuredText
documents. Do not include the ``containmentFolder`` path in this file name,
exhale will create the file ``"{}/{}".format(containmentFolder, rootFileName)``.
In order for Sphinx to be happy, you should include a ``.rst`` suffix. All of
the generated API uses reStructuredText, and that will not ever change.
For example, if you specify
- ``"containmentFolder" = "./generated_api"``, and
- ``"rootFileName" = "library_root.rst"``
Then exhale will generate the file ``./generated_api/library_root.rst``.
You could include this file in a toctree directive (say in ``index.rst``) with::
.. toctree:
:maxdepth: 2
generated_api/library_root
Since Sphinx allows for some flexibility (e.g. your primary domain may be using
``.txt`` files), **no error checking will be performed**.
**key**: ``"rootFileTitle"`` --- value type: ``str``
The title to be written at the top of ``rootFileName``, which will appear in
your file including it in the ``toctree`` directive.
**key**: ``"doxygenStripFromPath"`` --- value type: ``str``
When building on Read the Docs, there seem to be issues regarding the Doxygen
variable ``STRIP_FROM_PATH`` when built remotely. That is, it isn't stripped at
all. Provide me with a string path (e.g. ``".."``), and I will strip this for
you for the File nodes being generated. I will use the exact value of
``os.path.abspath("..")`` in the example above, so you can supply either a
relative or absolute path. The File view hierarchy **will** break if you do
not give me a value for this, and therefore I hesitantly require this argument.
The value ``".."`` assumes that ``conf.py`` is in a ``docs/`` or similar folder
exactly one level below the repository's root.
**Additional Options:**
**key**: ``"afterTitleDescription"`` --- value type: ``str``
Properly formatted reStructuredText with **no indentation** to be included
directly after the title. You can use any rst directives or formatting you wish
in this string. I suggest using the ``textwrap`` module, e.g.::
description = textwrap.dedent(\'\'\'
This is a description of the functionality of the library being documented.
.. warning::
Please be advised that this library does not do anything.
\'\'\')
Then you can add ``"afterTitleDescription" = description`` to your dictionary.
**key**: ``"afterBodySummary"`` --- value type: ``str``
Similar to ``afterTitleDescription``, this is a string with reStructuredText
formatting. This will be inserted after the generated API body. The layout
looks something like this::
rootFileTitle
============================================================================
afterTitleDescription (if provided)
[[[ GENERATED API BODY ]]]
afterBodySummary (if provided)
**key**: ``"createTreeView"`` --- value type: ``bool``
For portability, the default value if not specified is ``False``, which will
generate reStructuredText bulleted lists for the Class View and File View
hierarchies. If ``True``, raw html unordered lists will be generated. Please
refer to the *Clickable Hierarchies* subsection of :ref:`usage_advanced_usage`
for more details.
**key**: ``"fullToctreeMaxDepth"`` --- value type: ``int``
Beneath the Class View and File View hierarchies a Full API listing is generated
as there are items that may not appear in the Class View hierarchy, as well as
without this an obscene amount of warnings are generated from Sphinx because
neither view actually uses a ``toctree``, they link directly.
The default value is 5 if not specified, but you may want to give a smaller
value depending on the framework being documented. This value must be greater
than or equal to 1 (this is the value of ``:maxdepth:``).
**key**: ``"appendBreatheFileDirective"`` --- value type: ``bool``
Currently, I do not know how to reliably extract the brief / detailed file
descriptions for a given file node. Therefore, if you have file level
documentation in your project that has meaning, it would otherwise be omitted.
As a temporary patch, if you specify this value as ``True`` then at the bottom
of the file page the full ``doxygenfile`` directive output from Breathe will
be appended to the file documentiation. File level brief and detailed
descriptions will be included, followed by a large amount of duplication. I
hope to remove this value soon, in place of either parsing the xml more
carefully or finding out how to extract this information directly from Breathe.
The default value of this behavior is ``False`` if it is not specified in the
dictionary passed as input for this method. Please refer to the *Customizing
File Pages* subsection of :ref:`usage_customizing_file_pages` for more
information on what the impact of this variable is.
**key**: ``"customSpecificationFunction"`` --- value type: ``function``
The custom specification function to override the default behavior of exhale.
Please refer to the :func:`exhale.specificationsForKind` documentation.
:raises ValueError:
If the required dictionary arguments are not present, or any of the (key, value)
pairs are invalid.
:raises RuntimeError:
If any **fatal** error is caught during the generation of the API.
'''
if type(exhaleArgs) is not dict:
raise ValueError("The type of 'exhaleArgs' must be a dictionary.")
# Gather mandatory input
if "doxygenIndexXMLPath" not in exhaleArgs:
raise ValueError("'doxygenIndexXMLPath' must be present in the arguments to generate.")
try:
global EXHALE_API_DOXY_OUTPUT_DIR
doxygenIndexXMLPath = exhaleArgs["doxygenIndexXMLPath"]
EXHALE_API_DOXY_OUTPUT_DIR = doxygenIndexXMLPath.split("index.xml")[0]
except Exception as e:
raise ValueError("Unable to utilize the provided 'doxygenIndexXMLPath'\n{}".format(e))
if "containmentFolder" not in exhaleArgs:
raise ValueError("'containmentFolder' must be present in the arguments to generate.")
containmentFolder = exhaleArgs["containmentFolder"]
if type(containmentFolder) is not str:
raise ValueError("The type of the value for the key 'containmentFolder' must be a string.")
if "rootFileName" not in exhaleArgs:
raise ValueError("'rootFileName' must be present in the arguments passed to generate.")
rootFileName = exhaleArgs["rootFileName"]
if type(rootFileName) is not str:
raise ValueError("The type of the value for the key 'rootFileName' must be a string.")
if "rootFileTitle" not in exhaleArgs:
raise ValueError("'rootFileTitle' must be present in the arguments passed to generate.")
rootFileTitle = exhaleArgs["rootFileTitle"]
if type(rootFileTitle) is not str:
raise ValueError("The type of the value for the key 'rootFileTitle' must be a string.")
if "doxygenStripFromPath" not in exhaleArgs:
raise ValueError("'doxygenStripFromPath' must be present in the arguments passed to generate.")
doxygenStripFromPath = exhaleArgs["doxygenStripFromPath"]
if type(doxygenStripFromPath) is not str:
raise ValueError("The type of the value for the key 'doxygenStripFromPath' must be a string.")
try:
strip = os.path.abspath(doxygenStripFromPath)
if not os.path.isdir(strip):
raise ValueError("The value for the key 'doxygenStripFromPath' does not appear to be a valid path")
except Exception as e:
raise RuntimeError("Error coordinating the 'doxygenStripFromPath' variable: {}".format(e))
global EXHALE_API_DOXYGEN_STRIP_FROM_PATH
EXHALE_API_DOXYGEN_STRIP_FROM_PATH = strip
# gather the optional configurations
if "afterTitleDescription" in exhaleArgs:
afterTitleDescription = exhaleArgs["afterTitleDescription"]
if type(afterTitleDescription) is not str:
raise ValueError("The type of the value for the key 'afterTitleDescription' must be a string.")
else:
afterTitleDescription = ""
if "afterBodySummary" in exhaleArgs:
afterBodySummary = exhaleArgs["afterBodySummary"]
if type(afterBodySummary) is not str:
raise ValueError("The type of the value for the key 'afterBodySummary' must be a string.")
else:
afterBodySummary = ""
if "createTreeView" in exhaleArgs:
createTreeView = exhaleArgs["createTreeView"]
if type(createTreeView) is not bool:
raise ValueError("The type of the value for the key 'createTreeView' must be a boolean.")
else:
createTreeView = False
if "fullToctreeMaxDepth" in exhaleArgs:
fullToctreeMaxDepth = exhaleArgs["fullToctreeMaxDepth"]
if type(fullToctreeMaxDepth) is not int:
raise ValueError("The type of the value for the key 'fullToctreeMaxDepth' must be an int.")
global EXHALE_API_TOCTREE_MAX_DEPTH
EXHALE_API_TOCTREE_MAX_DEPTH = fullToctreeMaxDepth
if "appendBreatheFileDirective" in exhaleArgs:
appendBreatheFileDirective = exhaleArgs["appendBreatheFileDirective"]
if type(appendBreatheFileDirective) is not bool:
raise ValueError("The type of the value for the key 'appendBreatheFileDirective' must be a boolean.")
global EXHALE_GENERATE_BREATHE_FILE_DIRECTIVES
EXHALE_GENERATE_BREATHE_FILE_DIRECTIVES = appendBreatheFileDirective
if "customSpecificationFunction" in exhaleArgs:
customSpecificationFunction = exhaleArgs["customSpecificationFunction"]
try:
ret = customSpecificationFunction("class")
except:
raise ValueError("Unable to call your custom specification function with 'class' as input...")
if type(ret) is not str:
raise ValueError("Your custom specification function did not return a string...")
global EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION
EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION = customSpecificationFunction
# input gathered, try creating the breathe root compound
try:
breatheRoot = breathe_parse(doxygenIndexXMLPath)
except Exception as e:
raise RuntimeError("Unable to use Breathe to parse the specified doxygen index.xml: {}".format(e))
if breatheRoot is not None:
# split into multiple try-except blocks to make it a little easier to identify
# where the error comes from
try:
textRoot = ExhaleRoot(breatheRoot, containmentFolder, rootFileName,
rootFileTitle, afterTitleDescription,
afterBodySummary, createTreeView)
except Exception as e:
raise RuntimeError("Exception caught creating the ExhaleRoot object: {}".format(e))
try:
textRoot.parse()
except Exception as e:
raise RuntimeError("Exception caught while parsing: {}".format(e))
try:
textRoot.generateFullAPI()
except Exception as e:
raise RuntimeError("Exception caught while generating: {}".format(e))
else:
raise RuntimeError("Critical error: the returned Breathe root is 'None'.")
########################################################################################
#
##
###
####
##### Utility / helper functions.
####
###
##
#
########################################################################################
def qualifyKind(kind):
'''
Qualifies the breathe ``kind`` and returns an qualifier string describing this
to be used for the text output (e.g. in generated file headings and link names).
The output for a given kind is as follows:
+-------------+------------------+
| Input Kind | Output Qualifier |
+=============+==================+
| "class" | "Class" |
+-------------+------------------+
| "define" | "Define" |
+-------------+------------------+
| "enum" | "Enum" |
+-------------+------------------+
| "enumvalue" | "Enumvalue" |
+-------------+------------------+
| "file" | "File" |
+-------------+------------------+
| "function" | "Function" |
+-------------+------------------+
| "group" | "Group" |
+-------------+------------------+
| "namespace" | "Namespace" |
+-------------+------------------+
| "struct" | "Struct" |
+-------------+------------------+
| "typedef" | "Typedef" |
+-------------+------------------+
| "union" | "Union" |
+-------------+------------------+
| "variable" | "Variable" |
+-------------+------------------+
The following breathe kinds are ignored:
- "autodoxygenfile"
- "doxygenindex"
- "autodoxygenindex"
Note also that although a return value is generated, neither "enumvalue" nor
"group" are actually used.
:Parameters:
``kind`` (str)
The return value of a Breathe ``compound`` object's ``get_kind()`` method.
:Return (str):
The qualifying string that will be used to build the reStructuredText titles and
other qualifying names. If the empty string is returned then it was not
recognized.
'''
if kind == "class":
qualifier = "Class"
elif kind == "struct":
qualifier = "Struct"
elif kind == "function":
qualifier = "Function"
elif kind == "enum":
qualifier = "Enum"
elif kind == "enumvalue":# unused
qualifier = "Enumvalue"
elif kind == "namespace":
qualifier = "Namespace"
elif kind == "define":
qualifier = "Define"
elif kind == "typedef":
qualifier = "Typedef"
elif kind == "variable":
qualifier = "Variable"
elif kind == "file":
qualifier = "File"
elif kind == "dir":
qualifier = "Directory"
elif kind == "group":
qualifier = "Group"
elif kind == "union":
qualifier = "Union"
else:
qualifier = ""
return qualifier
def kindAsBreatheDirective(kind):
'''
Returns the appropriate breathe restructured text directive for the specified kind.
The output for a given kind is as follows:
+-------------+--------------------+
| Input Kind | Output Directive |
+=============+====================+
| "class" | "doxygenclass" |
+-------------+--------------------+
| "define" | "doxygendefine" |
+-------------+--------------------+
| "enum" | "doxygenenum" |
+-------------+--------------------+
| "enumvalue" | "doxygenenumvalue" |
+-------------+--------------------+
| "file" | "doxygenfile" |
+-------------+--------------------+
| "function" | "doxygenfunction" |
+-------------+--------------------+
| "group" | "doxygengroup" |
+-------------+--------------------+
| "namespace" | "doxygennamespace" |
+-------------+--------------------+
| "struct" | "doxygenstruct" |
+-------------+--------------------+
| "typedef" | "doxygentypedef" |
+-------------+--------------------+
| "union" | "doxygenunion" |
+-------------+--------------------+
| "variable" | "doxygenvariable" |
+-------------+--------------------+
The following breathe kinds are ignored:
- "autodoxygenfile"
- "doxygenindex"
- "autodoxygenindex"
Note also that although a return value is generated, neither "enumvalue" nor
"group" are actually used.
:Parameters:
``kind`` (str)
The kind of the breathe compound / ExhaleNode object (same values).
:Return (str):
The directive to be used for the given ``kind``. The empty string is returned
for both unrecognized and ignored input values.
'''
if kind == "class":
directive = "doxygenclass"
elif kind == "struct":
directive = "doxygenstruct"
elif kind == "function":
directive = "doxygenfunction"
elif kind == "enum":
directive = "doxygenenum"
elif kind == "enumvalue":# unused
directive = "doxygenenumvalue"
elif kind == "namespace":
directive = "doxygennamespace"
elif kind == "define":
directive = "doxygendefine"
elif kind == "typedef":
directive = "doxygentypedef"
elif kind == "variable":
directive = "doxygenvariable"
elif kind == "file":
directive = "doxygenfile"
elif kind == "union":
directive = "doxygenunion"
elif kind == "group":# unused
directive = "doxygengroup"
else:
directive = ""
return directive
def specificationsForKind(kind):
'''
Returns the relevant modifiers for the restructured text directive associated with
the input kind. The only considered values for the default implementation are
``class`` and ``struct``, for which the return value is exactly::
" :members:\\n :protected-members:\\n :undoc-members:\\n"
Formatting of the return is fundamentally important, it must include both the prior
indentation as well as newlines separating any relevant directive modifiers. The
way the framework uses this function is very specific; if you do not follow the
conventions then sphinx will explode.
Consider a ``struct thing`` being documented. The file generated for this will be::
.. _struct_thing:
Struct thing
================================================================================
.. doxygenstruct:: thing
:members:
:protected-members:
:undoc-members:
Assuming the first two lines will be in a variable called ``link_declaration``, and
the next three lines are stored in ``header``, the following is performed::
directive = ".. {}:: {}\\n".format(kindAsBreatheDirective(node.kind), node.name)
specifications = "{}\\n\\n".format(specificationsForKind(node.kind))
gen_file.write("{}{}{}{}".format(link_declaration, header, directive, specifications))
That is, **no preceding newline** should be returned from your custom function, and
**no trailing newline** is needed. Your indentation for each specifier should be
**exactly three spaces**, and if you want more than one you need a newline in between
every specification you want to include. Whitespace control is handled internally
because many of the directives do not need anything added. For a full listing of
what your specifier options are, refer to the breathe documentation:
http://breathe.readthedocs.io/en/latest/directives.html
:Parameters:
``kind`` (str)
The kind of the node we are generating the directive specifications for.
:Return (str):
The correctly formatted specifier(s) for the given ``kind``. If no specifier(s)
are necessary or desired, the empty string is returned.
'''
# use the custom directives function
if EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION is not None:
return EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION(kind)
# otherwise, just provide class and struct
if kind == "class" or kind == "struct":
directive = " :members:\n :protected-members:\n :undoc-members:"
else:
directive = ""
return directive
def exclaimError(msg, ansi_fmt="34;1m"):
'''
Prints ``msg`` to the console in color with ``(!)`` prepended in color.
Example (uncolorized) output of ``exclaimError("No leading space needed.")``::
(!) No leading space needed.
All messages are written to ``sys.stderr``, and are closed with ``[0m``. The
default color is blue, but can be changed using ``ansi_fmt``.
Documentation building has a verbose output process, this just helps distinguish an
error message coming from exhale.
:Parameters:
``msg`` (str)
The message you want printed to standard error.
``ansi_fmt`` (str)
An ansi color format. ``msg`` is printed as
``"\\033[" + ansi_fmt + msg + "\\033[0m\\n``, so you should specify both the
color code and the format code (after the semicolon). The default value is
``34;1m`` --- refer to
http://misc.flogisoft.com/bash/tip_colors_and_formatting for alternatives.
'''
sys.stderr.write("\033[{}(!) {}\033[0m\n".format(ansi_fmt, msg))
########################################################################################
#
##
###
####
##### Graph representation.
####
###
##
#
########################################################################################
class ExhaleNode:
'''
A wrapper class to track parental relationships, filenames, etc.
:Parameters:
``breatheCompound`` (breathe.compound)
The Breathe compound object we will use to gather the name, chilren, etc.
:Attributes:
``compound`` (breathe.compound)
The compound discovered from breathe that we are going to track.
``kind`` (str)
The string returned by the ``breatheCompound.get_kind()`` method. Used to
qualify this node throughout the framework, as well as for hierarchical
sorting.
``name`` (str)
The string returned by the ``breatheCompound.get_name()`` method. This name
will be fully qualified --- ``class A`` inside of ``namespace n`` will have
a ``name`` of ``n::A``. Files and directories may have ``/`` characters as
well.
``refid`` (str)
The reference ID as created by Doxygen. This will be used to scrape files
and see if a given reference identification number should be associated with
that file or not.
``children`` (list)
A potentially empty list of ``ExhaleNode`` object references that are
considered a child of this Node. Please note that a child reference in any
``children`` list may be stored in **many** other lists. Mutating a given
child will mutate the object, and therefore affect other parents of this
child. Lastly, a node of kind ``enum`` will never have its ``enumvalue``
children as it is impossible to rebuild that relationship without more
Doxygen xml parsing.
``parent`` (ExhaleNode)
If an ExhaleNode is determined to be a child of another ExhaleNode, this
node will be added to its parent's ``children`` list, and a reference to
the parent will be in this field. Initialized to ``None``, make sure you
check that it is an object first.
.. warning::
Do not ever set the ``parent`` of a given node if the would-be parent's
kind is ``"file"``. Doing so will break many important relationships,
such as nested class definitions. Effectively, **every** node will be
added as a child to a file node at some point. The file node will track
this, but the child should not.
The following three member variables are stored internally, but managed
externally by the :class:`exhale.ExhaleRoot` class:
``file_name`` (str)
The name of the file to create. Set to ``None`` on creation, refer to
:func:`exhale.ExhaleRoot.initializeNodeFilenameAndLink`.
``link_name`` (str)
The name of the reStructuredText link that will be at the top of the file.
Set to ``None`` on creation, refer to
:func:`exhale.ExhaleRoot.initializeNodeFilenameAndLink`.
``title`` (str)
The title that will appear at the top of the reStructuredText file
``file_name``. When the reStructuredText document for this node is being
written, the root object will set this field.
The following two fields are used for tracking what has or has not already been
included in the hierarchy views. Things like classes or structs in the global
namespace will not be found by :func:`exhale.ExhaleNode.inClassView`, and the
ExhaleRoot object will need to track which ones were missed.
``in_class_view`` (bool)
Whether or not this node has already been incorporated in the class view.
``in_file_view`` (bool)
Whether or not this node has already been incorporated in the file view.
This class wields duck typing. If ``self.kind == "file"``, then the additional
member variables below exist:
``namespaces_used`` (list)
A list of namespace nodes that are either defined or used in this file.
``includes`` (list)
A list of strings that are parsed from the Doxygen xml for this file as
include directives.
``included_by`` (list)
A list of (refid, name) string tuples that are parsed from the Doxygen xml
for this file presenting all of the other files that include this file.
They are stored this way so that the root class can later link to that file
by its refid.
``location`` (str)
A string parsed from the Doxygen xml for this file stating where this file
is physically in relation to the *Doxygen* root.
``program_listing`` (list)
A list of strings that is the Doxygen xml <programlisting>, without the
opening or closing <programlisting> tags.
``program_file`` (list)
Managed externally by the root similar to ``file_name`` etc, this is the
name of the file that will be created to display the program listing if it
exists. Set to ``None`` on creation, refer to
:func:`exhale.ExhaleRoot.initializeNodeFilenameAndLink`.
``program_link_name`` (str)
Managed externally by the root similar to ``file_name`` etc, this is the
reStructuredText link that will be declared at the top of the
``program_file``. Set to ``None`` on creation, refer to
:func:`exhale.ExhaleRoot.initializeNodeFilenameAndLink`.
'''
def __init__(self, breatheCompound):
self.compound = breatheCompound
self.kind = breatheCompound.get_kind()
self.name = breatheCompound.get_name()
self.refid = breatheCompound.get_refid()
self.children = [] # ExhaleNodes
self.parent = None # if reparented, will be an ExhaleNode
# managed externally
self.file_name = None
self.link_name = None
self.title = None
# representation of hierarchies
self.in_class_view = False
self.in_directory_view = False
# kind-specific additional information
if self.kind == "file":
self.namespaces_used = [] # ExhaleNodes
self.includes = [] # strings
self.included_by = [] # (refid, name) tuples
self.location = ""
self.program_listing = [] # strings
self.program_file = ""
self.program_link_name = ""
def __lt__(self, other):
'''
The ``ExhaleRoot`` class stores a bunch of lists of ``ExhaleNode`` objects.
When these lists are sorted, this method will be called to perform the sorting.
:Parameters:
``other`` (ExhaleNode)
The node we are comparing whether ``self`` is less than or not.
:Return (bool):
True if ``self`` is less than ``other``, False otherwise.
'''
# allows alphabetical sorting within types
if self.kind == other.kind:
return self.name.lower() < other.name.lower()
# treat structs and classes as the same type
elif self.kind == "struct" or self.kind == "class":
if other.kind != "struct" and other.kind != "class":
return True
else:
if self.kind == "struct" and other.kind == "class":
return True
elif self.kind == "class" and other.kind == "struct":
return False
else:
return self.name < other.name
# otherwise, sort based off the kind
else:
return self.kind < other.kind
def findNestedNamespaces(self, lst):
'''
Recursive helper function for finding nested namespaces. If this node is a
namespace node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedNamespaces`` with the same list.
:Parameters:
``lst`` (list)
The list each namespace node is to be appended to.
'''
if self.kind == "namespace":
lst.append(self)
for c in self.children:
c.findNestedNamespaces(lst)
def findNestedDirectories(self, lst):
'''
Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to.
'''
if self.kind == "dir":
lst.append(self)
for c in self.children:
c.findNestedDirectories(lst)
def findNestedClassLike(self, lst):
'''
Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to.
'''
if self.kind == "class" or self.kind == "struct":
lst.append(self)
for c in self.children:
c.findNestedClassLike(lst)
def findNestedEnums(self, lst):
'''
Recursive helper function for finding nested enums. If this node is a class or
struct it may have had an enum added to its child list. When this occurred, the
enum was removed from ``self.enums`` in the :class:`exhale.ExhaleRoot` class and
needs to be rediscovered by calling this method on all of its children. If this
node is an enum, it is because a parent class or struct called this method, in
which case it is added to ``lst``.
**Note**: this is used slightly differently than nested directories, namespaces,
and classes will be. Refer to :func:`exhale.ExhaleRoot.generateNodeDocuments`
function for details.
:Parameters:
``lst`` (list)
The list each enum is to be appended to.
'''
if self.kind == "enum":
lst.append(self)
for c in self.children:
c.findNestedEnums(lst)
def findNestedUnions(self, lst):
'''
Recursive helper function for finding nested unions. If this node is a class or
struct it may have had a union added to its child list. When this occurred, the
union was removed from ``self.unions`` in the :class:`exhale.ExhaleRoot` class
and needs to be rediscovered by calling this method on all of its children. If
this node is a union, it is because a parent class or struct called this method,
in which case it is added to ``lst``.
**Note**: this is used slightly differently than nested directories, namespaces,
and classes will be. Refer to :func:`exhale.ExhaleRoot.generateNodeDocuments`
function for details.
:Parameters:
``lst`` (list)
The list each union is to be appended to.
'''
if self.kind == "union":
lst.append(self)
for c in self.children:
c.findNestedUnions(lst)
def toConsole(self, level, printChildren=True):
'''
Debugging tool for printing hierarchies / ownership to the console. Recursively
calls children ``toConsole`` if this node is not a directory or a file, and
``printChildren == True``.
:Parameters:
``level`` (int)
The indentation level to be used, should be greater than or equal to 0.
``printChildren`` (bool)
Whether or not the ``toConsole`` method for the children found in
``self.children`` should be called with ``level+1``. Default is True,
set to False for directories and files.
'''
indent = " " * level
print("{}- [{}]: {}".format(indent, self.kind, self.name))
# files are children of directories, the file section will print those children
if self.kind == "dir":
for c in self.children:
c.toConsole(level + 1, printChildren=False)
elif printChildren:
if self.kind == "file":
print("{}[[[ location=\"{}\" ]]]".format(" " * (level + 1), self.location))
for i in self.includes:
print("{}- #include <{}>".format(" " * (level + 1), i))
for ref, name in self.included_by:
print("{}- included by: [{}]".format(" " * (level + 1), name))
for n in self.namespaces_used:
n.toConsole(level + 1, printChildren=False)
for c in self.children:
c.toConsole(level + 1)
elif self.kind == "class" or self.kind == "struct":
relevant_children = []
for c in self.children:
if c.kind == "class" or c.kind == "struct" or \
c.kind == "enum" or c.kind == "union":
relevant_children.append(c)
for rc in sorted(relevant_children):
rc.toConsole(level + 1)
elif self.kind != "union":
for c in self.children:
c.toConsole(level + 1)
def typeSort(self):
'''
Sorts ``self.children`` in place, and has each child sort its own children.
Refer to :func:`exhale.ExhaleRoot.deepSortList` for more information on when
this is necessary.
'''
self.children.sort()
for c in self.children:
c.typeSort()
def inClassView(self):
'''
Whether or not this node should be included in the class view hierarchy. Helper
method for :func:`exhale.ExhaleNode.toClassView`. Sets the member variable
``self.in_class_view`` to True if appropriate.
:Return (bool):
True if this node should be included in the class view --- either it is a
node of kind ``struct``, ``class``, ``enum``, ``union``, or it is a
``namespace`` that one or more if its descendants was one of the previous
four kinds. Returns False otherwise.
'''
if self.kind == "namespace":
for c in self.children:
if c.inClassView():
return True
return False
else:
# flag that this node is already in the class view so we can find the
# missing top level nodes at the end
self.in_class_view = True
return self.kind == "struct" or self.kind == "class" or \
self.kind == "enum" or self.kind == "union"
def toClassView(self, level, stream, treeView, lastChild=False):
'''
Recursively generates the class view hierarchy using this node and its children,
if it is determined by :func:`exhale.ExhaleNode.inClassView` that this node
should be included.
:Parameters:
``level`` (int)
An integer greater than or equal to 0 representing the indentation level
for this node.
``stream`` (StringIO)
The stream that is being written to by all of the nodes (created and
destroyed by the ExhaleRoot object).
``treeView`` (bool)
If False, standard reStructuredText bulleted lists will be written to
the ``stream``. If True, then raw html unordered lists will be written
to the ``stream``.
``lastChild`` (bool)
When ``treeView == True``, the unordered lists generated need to have
an <li class="lastChild"> tag on the last child for the
``collapsibleList`` to work correctly. The default value of this
parameter is False, and should only ever be set to True internally by
recursive calls to this method.
'''
has_nested_children = False
if self.inClassView():
if not treeView:
stream.write("{}- :ref:`{}`\n".format(' ' * level, self.link_name))
else:
indent = ' ' * (level * 2)
if lastChild:
opening_li = '<li class="lastChild">'
else:
opening_li = '<li>'
# turn double underscores into underscores, then underscores into hyphens
html_link = self.link_name.replace("__", "_").replace("_", "-")
# should always have at least two parts (templates will have more)
title_as_link_parts = self.title.split(" ")
qualifier = title_as_link_parts[0]
link_title = " ".join(title_as_link_parts[1:])
link_title = link_title.replace("&", "&").replace("<", "<").replace(">", ">")
html_link = '{} <a href="{}.html#{}">{}</a>'.format(qualifier,
self.file_name.split('.rst')[0],
html_link,
link_title)
# search for nested children to display as sub-items in the tree view
if self.kind == "class" or self.kind == "struct":
nested_enums = []
nested_unions = []
nested_class_like = []
# important: only scan self.children, do not use recursive findNested* methods
for c in self.children:
if c.kind == "enum":
nested_enums.append(c)
elif c.kind == "union":
nested_unions.append(c)
elif c.kind == "struct" or c.kind == "class":
nested_class_like.append(c)
has_nested_children = nested_enums or nested_unions or nested_class_like # <3 Python
# if there are sub children, there needs to be a new html list generated
if self.kind == "namespace" or has_nested_children:
next_indent = ' {}'.format(indent)
stream.write('{}{}\n{}{}\n{}<ul>\n'.format(indent, opening_li,
next_indent, html_link,
next_indent))
else:
stream.write('{}{}{}</li>\n'.format(indent, opening_li, html_link))
# include the relevant children (class like or nested namespaces / classes)
if self.kind == "namespace":
# pre-process and find everything that is relevant
kids = []
nspaces = []
for c in self.children:
if c.inClassView():
if c.kind == "namespace":
nspaces.append(c)
else:
kids.append(c)
# always put nested namespaces last; parent dictates to the child if
# they are the last child being printed
kids.sort()
num_kids = len(kids)
nspaces.sort()
num_nspaces = len(nspaces)
last_child_index = num_kids + num_nspaces - 1
child_idx = 0
# first all of the child class like, then any nested namespaces
for node in itertools.chain(kids, nspaces):
node.toClassView(level + 1, stream, treeView, child_idx == last_child_index)
child_idx += 1
# now that all of the children haven been written, close the tags
if treeView:
stream.write(" {}</ul>\n{}</li>\n".format(indent, indent))
# current node is a class or struct with nested children
elif has_nested_children:
nested_class_like.sort()
num_class_like = len(nested_class_like)
nested_enums.sort()
num_enums = len(nested_enums)
nested_unions.sort()
num_unions = len(nested_unions)
last_child_index = num_class_like + num_enums + num_unions - 1
child_idx = 0
# first all of the classes / structs, then enums, then unions
for node in itertools.chain(nested_class_like, nested_enums, nested_unions):
node.toClassView(level + 1, stream, treeView, child_idx == last_child_index)
child_idx += 1
# now that all of the children haven been written, close the tags
if treeView:
stream.write(" {}</ul>\n{}</li>\n".format(indent, indent))
def inDirectoryView(self):
'''
Whether or not this node should be included in the file view hierarchy. Helper
method for :func:`exhale.ExhaleNode.toDirectoryView`. Sets the member variable
``self.in_directory_view`` to True if appropriate.
:Return (bool):
True if this node should be included in the file view --- either it is a
node of kind ``file``, or it is a ``dir`` that one or more if its
descendants was a ``file``. Returns False otherwise.
'''
if self.kind == "file":
# flag that this file is already in the directory view so that potential
# missing files can be found later.
self.in_directory_view = True
return True
elif self.kind == "dir":
for c in self.children:
if c.inDirectoryView():
return True
return False
def toDirectoryView(self, level, stream, treeView, lastChild=False):
'''
Recursively generates the file view hierarchy using this node and its children,
if it is determined by :func:`exhale.ExhaleNode.inDirectoryView` that this node
should be included.
:Parameters:
``level`` (int)
An integer greater than or equal to 0 representing the indentation level
for this node.
``stream`` (StringIO)
The stream that is being written to by all of the nodes (created and
destroyed by the ExhaleRoot object).
``treeView`` (bool)
If False, standard reStructuredText bulleted lists will be written to
the ``stream``. If True, then raw html unordered lists will be written
to the ``stream``.
``lastChild`` (bool)
When ``treeView == True``, the unordered lists generated need to have
an <li class="lastChild"> tag on the last child for the
``collapsibleList`` to work correctly. The default value of this
parameter is False, and should only ever be set to True internally by
recursive calls to this method.
'''
if self.inDirectoryView():
if not treeView:
stream.write("{}- :ref:`{}`\n".format(' ' * level, self.link_name))
else:
indent = ' ' * (level * 2)
if lastChild:
opening_li = '<li class="lastChild">'
else:
opening_li = '<li>'
# turn double underscores into underscores, then underscores into hyphens
html_link = self.link_name.replace("__", "_").replace("_", "-")
# should always have at least two parts (templates will have more)
title_as_link_parts = self.title.split(" ")
qualifier = title_as_link_parts[0]
link_title = " ".join(title_as_link_parts[1:])
link_title = link_title.replace("&", "&").replace("<", "<").replace(">", ">")
html_link = '{} <a href="{}.html#{}">{}</a>'.format(qualifier,
self.file_name.split('.rst')[0],
html_link,
link_title)
if self.kind == "dir":
next_indent = ' {}'.format(indent)
stream.write('{}{}\n{}{}\n{}<ul>\n'.format(indent, opening_li,
next_indent, html_link,
next_indent))
else:
stream.write('{}{}{}</li>\n'.format(indent, opening_li, html_link))
# include the relevant children (class like or nested namespaces)
if self.kind == "dir":
# pre-process and find everything that is relevant
kids = []
dirs = []
for c in self.children:
if c.inDirectoryView():
if c.kind == "dir":
dirs.append(c)
elif c.kind == "file":
kids.append(c)
# always put nested namespaces last; parent dictates to the child if
# they are the last child being printed
kids.sort()
num_kids = len(kids)
dirs.sort()
num_dirs = len(dirs)
last_child_index = num_kids + num_dirs - 1
child_idx = 0
for k in kids:
k.toDirectoryView(level + 1, stream, treeView, child_idx == last_child_index)
child_idx += 1
for n in dirs:
n.toDirectoryView(level + 1, stream, treeView, child_idx == last_child_index)
child_idx += 1
# now that all of the children haven been written, close the tags
if treeView:
stream.write(" {}</ul>\n{}</li>\n".format(indent, indent))
class ExhaleRoot:
'''
The full representation of the hierarchy graphs. In addition to containing specific
lists of ExhaleNodes of interest, the ExhaleRoot class is responsible for comparing
the parsed breathe hierarchy and rebuilding lost relationships using the Doxygen
xml files. Once the graph parsing has finished, the ExhaleRoot generates all of the
relevant reStructuredText documents and links them together.
The ExhaleRoot class is not designed for reuse at this time. If you want to
generate a new hierarchy with a different directory or something, changing all of
the right fields may be difficult and / or unsuccessful. Refer to the bottom of the
source code for :func:`exhale.generate` for safe usage (just exception handling),
but the design of this class is to be used as follows:
.. code-block:: py
textRoot = ExhaleRoot(... args ...)
textRoot.parse()
textRoot.generateFullAPI()
Zero checks are in place to enforce this usage, and if you are modifying the
execution of this class and things are not working make sure you follow the ordering
of those methods.
:Parameters:
``breatheRoot`` (instance)
Type unknown, this is the return value of ``breathe.breathe_parse``.
``rootDirectory`` (str)
The name of the root directory to put everything in. This should be the
value of the key ``containmentFolder`` in the dictionary passed to
:func:`exhale.generate`.
``rootFileName`` (str)
The name of the file the root library api will be put into. This should not
contain the ``rootDirectory`` path. This should be the value of the key
``rootFileName`` in the dictionary passed to :func:`exhale.generate`.
``rootFileTitle`` (str)
The title to be written to the top of ``rootFileName``. This should be the
value of the key ``rootFileTitle`` in the dictionary passed to
:func:`exhale.generate`.
``rootFileDescription`` (str)
The description of the library api file placed after ``rootFileTitle``.
This should be the value of the key ``afterTitleDescription`` in the
dictionary passed to :func:`exhale.generate`.
``rootFileSummary`` (str)
The summary of the library api placed after the generated hierarchy views.
This should be the value of the key ``afterBodySummary`` in the dictionary
passed to :func:`exhale.generate`.
``createTreeView`` (bool)
Creates the raw html unordered lists for use with ``collapsibleList`` if
True. Otherwise, creates standard reStructuredText bulleted lists. Should
be the value of the key ``createTreeView`` in the dictionary passed to
:func:`exhale.generate`.
:Attributes:
``breathe_root`` (instance)
The value of the parameter ``breatheRoot``.
``root_directory`` (str)
The value of the parameter ``rootDirectory``.
``root_file_name`` (str)
The value of the parameter ``rootFileName``.
``full_root_file_path`` (str)
The full file path of the root file (``"root_directory/root_file_name"``).
``root_file_title`` (str)
The value of the parameter ``rootFileTitle``.
``root_file_description`` (str)
The value of the parameter ``rootFileDescription``.
``root_file_summary`` (str)
The value of the parameter ``rootFileSummary``.
``class_view_file`` (str)
The full file path the class view hierarchy will be written to. This is
incorporated into ``root_file_name`` using an ``.. include:`` directive.
``directory_view_file`` (str)
The full file path the file view hierarchy will be written to. This is
incorporated into ``root_file_name`` using an ``.. include:`` directive.
``unabridged_api_file`` (str)
The full file path the full API will be written to. This is incorporated
into ``root_file_name`` using a ``.. toctree:`` directive with a
``:maxdepth:`` according to the value of the key ``fullToctreeMaxDepth``
in the dictionary passed into :func:`exhale.generate`.
``use_tree_view`` (bool)
The value of the parameter ``createTreeView``.
``all_compounds`` (list)
A list of all the Breathe compound objects discovered along the way.
Populated during :func:`exhale.ExhaleRoot.discoverAllNodes`.
``all_nodes`` (list)
A list of all of the ExhaleNode objects created. Populated during
:func:`exhale.ExhaleRoot.discoverAllNodes`.
``node_by_refid`` (dict)
A dictionary with string ExhaleNode ``refid`` values, and values that are the
ExhaleNode it came from. Storing it this way is convenient for when the
Doxygen xml file is being parsed.
``class_like`` (list)
The full list of ExhaleNodes of kind ``struct`` or ``class``
``defines`` (list)
The full list of ExhaleNodes of kind ``define``.
``enums`` (list)
The full list of ExhaleNodes of kind ``enum``.
``enum_values`` (list)
The full list of ExhaleNodes of kind ``enumvalue``. Populated, not used.
``functions`` (list)
The full list of ExhaleNodes of kind ``function``.
``dirs`` (list)
The full list of ExhaleNodes of kind ``dir``.
``files`` (list)
The full list of ExhaleNodes of kind ``file``.
``groups`` (list)
The full list of ExhaleNodes of kind ``group``. Pupulated, not used.
``namespaces`` (list)
The full list of ExhaleNodes of kind ``namespace``.
``typedefs`` (list)
The full list of ExhaleNodes of kind ``typedef``.
``unions`` (list)
The full list of ExhaleNodes of kind ``union``.
``variables`` (list)
The full list of ExhaleNodes of kind ``variable``.
'''
def __init__(self, breatheRoot, rootDirectory, rootFileName, rootFileTitle,
rootFileDescription, rootFileSummary, createTreeView):
# the Breathe root object (main entry point to Breathe graph)
self.breathe_root = breatheRoot
# file generation location and root index data
self.root_directory = rootDirectory
self.root_file_name = rootFileName
self.full_root_file_path = "{}/{}".format(self.root_directory, self.root_file_name)
self.root_file_title = rootFileTitle
self.root_file_description = rootFileDescription
self.root_file_summary = rootFileSummary
self.class_view_file = "{}.rst".format(
self.full_root_file_path.replace(self.root_file_name, "class_view_hierarchy")
)
self.directory_view_file = "{}.rst".format(
self.full_root_file_path.replace(self.root_file_name, "directory_view_hierarchy")
)
self.unabridged_api_file = "{}.rst".format(
self.full_root_file_path.replace(self.root_file_name, "unabridged_api")
)
# whether or not we should generate the raw html tree view
self.use_tree_view = createTreeView
# track all compounds (from Breathe) to build all nodes (ExhaleNodes)
self.all_compounds = [self.breathe_root.get_compound()]
self.all_nodes = []
# convenience lookup: keys are string Doxygen refid's, values are ExhaleNodes
self.node_by_refid = {}
# breathe directive breathe kind
#--------------------+----------------+
# autodoxygenfile <-+-> IGNORE |
# doxygenindex <-+-> IGNORE |
# autodoxygenindex <-+-> IGNORE |
#--------------------+----------------+
# doxygenclass <-+-> "class" |
# doxygenstruct <-+-> "struct" |
self.class_like = [] # |
# doxygendefine <-+-> "define" |
self.defines = [] # |
# doxygenenum <-+-> "enum" |
self.enums = [] # |
# ---> largely ignored by framework, |
# but stored if desired |
# doxygenenumvalue <-+-> "enumvalue" |
self.enum_values = [] # |
# doxygenfunction <-+-> "function" |
self.functions = [] # |
# no directive <-+-> "dir" |
self.dirs = [] # |
# doxygenfile <-+-> "file" |
self.files = [] # |
# not used, but could be supported in |
# the future? |
# doxygengroup <-+-> "group" |
self.groups = [] # |
# doxygennamespace <-+-> "namespace" |
self.namespaces = [] # |
# doxygentypedef <-+-> "typedef" |
self.typedefs = [] # |
# doxygenunion <-+-> "union" |
self.unions = [] # |
# doxygenvariable <-+-> "variable" |
self.variables = [] # |
#-------------------------------------+
####################################################################################
#
##
### Parsing
##
#
####################################################################################
def parse(self):
'''
The first method that should be called after creating an ExhaleRoot object. The
Breathe graph is parsed first, followed by the Doxygen xml documents. By the
end of this method, all of the ``self.<breathe_kind>``, ``self.all_compounds``,
and ``self.all_nodes`` lists as well as the ``self.node_by_refid`` dictionary
will be populated. Lastly, this method sorts all of the internal lists. The
order of execution is exactly
1. :func:`exhale.ExhaleRoot.discoverAllNodes`
2. :func:`exhale.ExhaleRoot.reparentAll`
3. Populate ``self.node_by_refid`` using ``self.all_nodes``.
4. :func:`exhale.ExhaleRoot.fileRefDiscovery`
5. :func:`exhale.ExhaleRoot.filePostProcess`
6. :func:`exhale.ExhaleRoot.sortInternals`
'''
# Find and reparent everything from the Breathe graph.
self.discoverAllNodes()
self.reparentAll()
# now that we have all of the nodes, store them in a convenient manner for refid
# lookup when parsing the Doxygen xml files
for n in self.all_nodes:
self.node_by_refid[n.refid] = n
# find missing relationships using the Doxygen xml files
self.fileRefDiscovery()
self.filePostProcess()
# sort all of the lists we just built
self.sortInternals()
def discoverAllNodes(self):
'''
Stack based traversal of breathe graph, creates some parental relationships
between different ExhaleNode objects. Upon termination, this method will have
populated the lists ``self.all_compounds``, ``self.all_nodes``, and the
``self.<breathe_kind>`` lists for different types of objects.
'''
# When you call the breathe_root.get_compound() method, it returns a list of the
# top level source nodes. These start out on the stack, and we add their
# children if they have not already been visited before.
nodes_remaining = [ExhaleNode(compound) for compound in self.breathe_root.get_compound()]
while len(nodes_remaining) > 0:
curr_node = nodes_remaining.pop()
self.trackNodeIfUnseen(curr_node)
self.discoverNeigbors(nodes_remaining, curr_node)
def trackNodeIfUnseen(self, node):
'''
Helper method for :func:`exhale.ExhaleRoot.discoverAllNodes`. If the node is
not in self.all_nodes yet, add it to both self.all_nodes as well as the
corresponding ``self.<breathe_kind>`` list.
:Parameters:
``node`` (ExhaleNode)
The node to begin tracking if not already present.
'''
if node not in self.all_nodes:
self.all_nodes.append(node)
if node.kind == "class" or node.kind == "struct":
self.class_like.append(node)
elif node.kind == "namespace":
self.namespaces.append(node)
elif node.kind == "enum":
self.enums.append(node)
elif node.kind == "enumvalue":
self.enum_values.append(node)
elif node.kind == "define":
self.defines.append(node)
elif node.kind == "file":
self.files.append(node)
elif node.kind == "dir":
self.dirs.append(node)
elif node.kind == "function":
self.functions.append(node)
elif node.kind == "variable":
self.variables.append(node)
elif node.kind == "group":
self.groups.append(node)
elif node.kind == "typedef":
self.typedefs.append(node)
elif node.kind == "union":
self.unions.append(node)
def discoverNeigbors(self, nodesRemaining, node):
'''
Helper method for :func:`exhale.ExhaleRoot.discoverAllNodes`. Some of the
compound objects received from Breathe have a member function ``get_member()``
that returns all of the children. Some do not. This method checks to see if
the method is present first, and if so performs the following::
For every compound in node.compound.get_member():
If compound not present in self.all_compounds:
- Add compound to self.all_compounds
- Create a child ExhaleNode
- If it is not a class, struct, or union, add to nodesRemaining
- If it is not an enumvalue, make it a child of node parameter
:Parameters:
``nodesRemaining`` (list)
The list of nodes representing the stack traversal being done by
:func:`exhale.ExhaleRoot.discoverAllNodes`. New neighbors found will
be appended to this list.
``node`` (ExhaleNode)
The node we are trying to discover potential new neighbors from.
'''
# discover neighbors of current node; some seem to not have get_member()
if "member" in node.compound.__dict__:
for member in node.compound.get_member():
# keep track of every breathe compound we have seen
if member not in self.all_compounds:
self.all_compounds.append(member)
# if we haven't seen this compound yet, make a node
child_node = ExhaleNode(member)
# if the current node is a class, struct, union, or enum ignore
# its variables, functions, etc
if node.kind == "class" or node.kind == "struct" or node.kind == "union":
if child_node.kind == "enum" or child_node.kind == "union":
nodesRemaining.append(child_node)
else:
nodesRemaining.append(child_node)
# the enum is presented separately, enumvals are haphazard and i hate them
# ... determining the enumvalue parent would be painful and i don't want to do it
if child_node.kind != "enumvalue":
node.children.append(child_node)
child_node.parent = node
def reparentAll(self):
'''
Fixes some of the parental relationships lost in parsing the Breathe graph.
File relationships are recovered in :func:`exhale.ExhaleRoot.fileRefDiscovery`.
This method simply calls in this order:
1. :func:`exhale.ExhaleRoot.reparentUnions`
2. :func:`exhale.ExhaleRoot.reparentClassLike`
3. :func:`exhale.ExhaleRoot.reparentDirectories`
4. :func:`exhale.ExhaleRoot.renameToNamespaceScopes`
5. :func:`exhale.ExhaleRoot.reparentNamespaces`
'''
self.reparentUnions()
self.reparentClassLike()
self.reparentDirectories()
self.renameToNamespaceScopes()
self.reparentNamespaces()
def reparentUnions(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Namespaces and classes
should have the unions defined in them to be in the child list of itself rather
than floating around. Union nodes that are reparented (e.g. a union defined in
a class) will be removed from the list ``self.unions`` since the Breathe
directive for its parent (e.g. the class) will include the documentation for the
union. The consequence of this is that a union defined in a class will **not**
appear in the full api listing of Unions.
'''
# unions declared in a class will not link to the individual union page, so
# we will instead elect to remove these from the list of unions
removals = []
for u in self.unions:
parts = u.name.split("::")
num_parts = len(parts)
if num_parts > 1:
# it can either be a child of a namespace or a class_like
if num_parts > 2:
namespace_name = "::".join(p for p in parts[:-2])
potential_class = parts[-2]
# see if it belongs to a class like object first. if so, remove this
# union from the list of unions
reparented = False
for cl in self.class_like:
if cl.name == potential_class:
cl.children.append(u)
u.parent = cl
reparented = True
break
if reparented:
removals.append(u)
continue
# otherwise, see if it belongs to a namespace
alt_namespace_name = "{}::{}".format(namespace_name, potential_class)
for n in self.namespaces:
if namespace_name == n.name or alt_namespace_name == n.name:
n.children.append(u)
u.parent = n
break
else:
name_or_class_name = "::".join(p for p in parts[:-1])
# see if it belongs to a class like object first. if so, remove this
# union from the list of unions
reparented = False
for cl in self.class_like:
if cl.name == name_or_class_name:
cl.children.append(u)
u.parent = cl
reparented = True
break
if reparented:
removals.append(u)
continue
# next see if it belongs to a namespace
for n in self.namespaces:
if n.name == name_or_class_name:
n.children.append(u)
u.parent = n
break
# remove the unions from self.unions that were declared in class_like objects
for rm in removals:
self.unions.remove(rm)
def reparentClassLike(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Iterates over the
``self.class_like`` list and adds each object as a child to a namespace if the
class, or struct is a member of that namespace. Many classes / structs will be
reparented to a namespace node, these will remain in ``self.class_like``.
However, if a class or struct is reparented to a different class or struct (it
is a nested class / struct), it *will* be removed from so that the class view
hierarchy is generated correctly.
'''
removals = []
for cl in self.class_like:
parts = cl.name.split("::")
if len(parts) > 1:
# first try and reparent to namespaces
namespace_name = "::".join(parts[:-1])
parent_found = False
for n in self.namespaces:
if n.name == namespace_name:
n.children.append(cl)
cl.parent = n
parent_found = True
break
# if a namespace parent wasn not found, try and reparent to a class
if not parent_found:
# parent class name would be namespace_name
for p_cls in self.class_like:
if p_cls.name == namespace_name:
p_cls.children.append(cl)
cl.parent = p_cls
removals.append(cl)
break
for rm in removals:
if rm in self.class_like:
self.class_like.remove(rm)
def reparentDirectories(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Adds subdirectories as
children to the relevant directory ExhaleNode. If a node in ``self.dirs`` is
added as a child to a different directory node, it is removed from the
``self.dirs`` list.
'''
dir_parts = []
dir_ranks = []
for d in self.dirs:
parts = d.name.split("/")
for p in parts:
if p not in dir_parts:
dir_parts.append(p)
dir_ranks.append((len(parts), d))
traversal = sorted(dir_ranks)
removals = []
for rank, directory in reversed(traversal):
# rank one means top level directory
if rank < 2:
break
# otherwise, this is nested
for p_rank, p_directory in reversed(traversal):
if p_rank == rank - 1:
if p_directory.name == "/".join(directory.name.split("/")[:-1]):
p_directory.children.append(directory)
directory.parent = p_directory
if directory not in removals:
removals.append(directory)
break
for rm in removals:
self.dirs.remove(rm)
def renameToNamespaceScopes(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Some compounds in
Breathe such as functions and variables do not have the namespace name they are
declared in before the name of the actual compound. This method prepends the
appropriate (nested) namespace name before the name of any child that does not
already have it.
For example, the variable ``MAX_DEPTH`` declared in namespace ``external`` would
have its ExhaleNode's ``name`` attribute changed from ``MAX_DEPTH`` to
``external::MAX_DEPTH``.
'''
for n in self.namespaces:
namespace_name = "{}::".format(n.name)
for child in n.children:
if namespace_name not in child.name:
child.name = "{}{}".format(namespace_name, child.name)
def reparentNamespaces(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Adds nested namespaces
as children to the relevant namespace ExhaleNode. If a node in
``self.namespaces`` is added as a child to a different namespace node, it is
removed from the ``self.namespaces`` list. Because these are removed from
``self.namespaces``, it is important that
:func:`exhale.ExhaleRoot.renameToNamespaceScopes` is called before this method.
'''
namespace_parts = []
namespace_ranks = []
for n in self.namespaces:
parts = n.name.split("::")
for p in parts:
if p not in namespace_parts:
namespace_parts.append(p)
namespace_ranks.append((len(parts), n))
traversal = sorted(namespace_ranks)
removals = []
for rank, namespace in reversed(traversal):
# rank one means top level namespace
if rank < 2:
break
# otherwise, this is nested
for p_rank, p_namespace in reversed(traversal):
if p_rank == rank - 1:
if p_namespace.name == "::".join(namespace.name.split("::")[:-1]):
p_namespace.children.append(namespace)
namespace.parent = p_namespace
if namespace not in removals:
removals.append(namespace)
break
for rm in removals:
self.namespaces.remove(rm)
def fileRefDiscovery(self):
'''
Finds the missing components for file nodes by parsing the Doxygen xml (which is
just the ``doxygen_output_dir/node.refid``). Additional items parsed include
adding items whose ``refid`` tag are used in this file, the <programlisting> for
the file, what it includes and what includes it, as well as the location of the
file (with respsect to the *Doxygen* root).
Care must be taken to only include a refid found with specific tags. The
parsing of the xml file was done by just looking at some example outputs. It
seems to be working correctly, but there may be some subtle use cases that break
it.
.. warning::
Some enums, classes, variables, etc declared in the file will not have their
associated refid in the declaration of the file, but will be present in the
<programlisting>. These are added to the files' list of children when they
are found, but this parental relationship cannot be formed if you set
``XML_PROGRAMLISTING = NO`` with Doxygen. An example of such an enum would
be an enum declared inside of a namespace within this file.
'''
if EXHALE_API_DOXY_OUTPUT_DIR == "":
exclaimError("The doxygen xml output directory was not specified!")
return
# parse the doxygen xml file and extract all refid's put in it
# keys: file object, values: list of refid's
doxygen_xml_file_ownerships = {}
# innerclass, innernamespace, etc
ref_regex = re.compile(r'.*<inner.*refid="(\w+)".*')
# what files this file includes
inc_regex = re.compile(r'.*<includes.*>(.+)</includes>')
# what files include this file
inc_by_regex = re.compile(r'.*<includedby refid="(\w+)".*>(.*)</includedby>')
# the actual location of the file
loc_regex = re.compile(r'.*<location file="(.*)"/>')
for f in self.files:
doxygen_xml_file_ownerships[f] = []
try:
doxy_xml_path = "{}{}.xml".format(EXHALE_API_DOXY_OUTPUT_DIR, f.refid)
with open(doxy_xml_path, "r") as doxy_file:
processing_code_listing = False # shows up at bottom of xml
for line in doxy_file:
# see if this line represents the location tag
match = loc_regex.match(line)
if match is not None:
f.location = match.groups()[0]
continue
if not processing_code_listing:
# gather included by references
match = inc_by_regex.match(line)
if match is not None:
ref, name = match.groups()
f.included_by.append((ref, name))
continue
# gather includes lines
match = inc_regex.match(line)
if match is not None:
inc = match.groups()[0]
f.includes.append(inc)
continue
# gather any classes, namespaces, etc declared in the file
match = ref_regex.match(line)
if match is not None:
match_refid = match.groups()[0]
if match_refid in self.node_by_refid:
doxygen_xml_file_ownerships[f].append(match_refid)
continue
# lastly, see if we are starting the code listing
if "<programlisting>" in line:
processing_code_listing = True
elif processing_code_listing:
if "</programlisting>" in line:
processing_code_listing = False
else:
f.program_listing.append(line)
except:
exclaimError("Unable to process doxygen xml for file [{}].\n".format(f.name))
#
# IMPORTANT: do not set the parent field of anything being added as a child to the file
#
# hack to make things work right on RTD
if EXHALE_API_DOXYGEN_STRIP_FROM_PATH is not None:
for f in self.files:
f.location = f.location.replace(EXHALE_API_DOXYGEN_STRIP_FROM_PATH, "")
if f.location[0] == "/":
f.location = f.location[1:]
# now that we have parsed all the listed refid's in the doxygen xml, reparent
# the nodes that we care about
for f in self.files:
for match_refid in doxygen_xml_file_ownerships[f]:
child = self.node_by_refid[match_refid]
if child.kind == "struct" or child.kind == "class" or child.kind == "function" or \
child.kind == "typedef" or child.kind == "define" or child.kind == "enum" or \
child.kind == "union":
already_there = False
for fc in f.children:
if child.name == fc.name:
already_there = True
break
if not already_there:
# special treatment for unions: ignore if it is a class union
if child.kind == "union":
for u in self.unions:
if child.name == u.name:
f.children.append(child)
break
else:
f.children.append(child)
elif child.kind == "namespace":
already_there = False
for fc in f.namespaces_used:
if child.name == fc.name:
already_there = True
break
if not already_there:
f.namespaces_used.append(child)
# last but not least, some different kinds declared in the file that are scoped
# in a namespace they will show up in the programlisting, but not at the toplevel.
for f in self.files:
potential_orphans = []
for n in f.namespaces_used:
for child in n.children:
if child.kind == "enum" or child.kind == "variable" or \
child.kind == "function" or child.kind == "typedef" or \
child.kind == "union":
potential_orphans.append(child)
# now that we have a list of potential orphans, see if this doxygen xml had
# the refid of a given child present.
for orphan in potential_orphans:
unresolved_name = orphan.name.split("::")[-1]
if f.refid in orphan.refid and any(unresolved_name in line for line in f.program_listing):
if orphan not in f.children:
f.children.append(orphan)
def filePostProcess(self):
'''
The real name of this method should be ``reparentFiles``, but to avoid confusion
with what stage this must happen at it is called this instead. After the
:func:`exhale.ExhaleRoot.fileRefDiscovery` method has been called, each file
will have its location parsed. This method reparents files to directories
accordingly, so the file view hierarchy can be complete.
'''
for f in self.files:
dir_loc_parts = f.location.split("/")[:-1]
num_parts = len(dir_loc_parts)
# nothing to do, at the top level
if num_parts == 0:
continue
dir_path = "/".join(p for p in dir_loc_parts)
nodes_remaining = [d for d in self.dirs]
while len(nodes_remaining) > 0:
d = nodes_remaining.pop()
if d.name in dir_path:
# we have found the directory we want
if d.name == dir_path:
d.children.append(f)
f.parent = d
break
# otherwise, try and find an owner
else:
nodes_remaining = []
for child in d.children:
if child.kind == "dir":
nodes_remaining.append(child)
def sortInternals(self):
'''
Sort all internal lists (``class_like``, ``namespaces``, ``variables``, etc)
mostly how doxygen would, alphabetical but also hierarchical (e.g. structs
appear before classes in listings). Some internal lists are just sorted, and
some are deep sorted (:func:`exhale.ExhaleRoot.deepSortList`).
'''
# some of the lists only need to be sorted, some of them need to be sorted and
# have each node sort its children
# leaf-like lists: no child sort
self.defines.sort()
self.enums.sort()
self.enum_values.sort()
self.functions.sort()
self.groups.sort()
self.typedefs.sort()
self.variables.sort()
# hierarchical lists: sort children
self.deepSortList(self.class_like)
self.deepSortList(self.namespaces)
self.deepSortList(self.unions)
self.deepSortList(self.files)
self.deepSortList(self.dirs)
def deepSortList(self, lst):
'''
For hierarchical internal lists such as ``namespaces``, we want to sort both the
list as well as have each child sort its children by calling
:func:`exhale.ExhaleNode.typeSort`.
:Parameters:
``lst`` (list)
The list of ExhaleNode objects to be deep sorted.
'''
lst.sort()
for l in lst:
l.typeSort()
####################################################################################
#
##
### Library generation.
##
#
####################################################################################
def generateFullAPI(self):
'''
Since we are not going to use some of the breathe directives (e.g. namespace or
file), when representing the different views of the generated API we will need:
1. Generate a single file restructured text document for all of the nodes that
have either no children, or children that are leaf nodes.
2. When building the view hierarchies (class view and file view), provide a link
to the appropriate files generated previously.
If adding onto the framework to say add another view (from future import groups)
you would link from a restructured text document to one of the individually
generated files using the value of ``link_name`` for a given ExhaleNode object.
This method calls in this order:
1. :func:`exhale.ExhaleRoot.generateAPIRootHeader`
2. :func:`exhale.ExhaleRoot.generateNodeDocuments`
3. :func:`exhale.ExhaleRoot.generateAPIRootBody`
4. :func:`exhale.ExhaleRoot.generateAPIRootSummary`
'''
self.generateAPIRootHeader()
self.generateNodeDocuments()
self.generateAPIRootBody()
self.generateAPIRootSummary()
def generateAPIRootHeader(self):
'''
This method creates the root library api file that will include all of the
different hierarchy views and full api listing. If ``self.root_directory`` is
not a current directory, it is created first. Afterward, the root API file is
created and its title is written, as well as the value of
``self.root_file_description``.
'''
try:
if not os.path.isdir(self.root_directory):
os.mkdir(self.root_directory)
except Exception as e:
exclaimError("Cannot create the directory: {}\nError message: {}".format(self.root_directory, e))
raise Exception("Fatal error generating the api root, cannot continue.")
try:
with open(self.full_root_file_path, "w") as generated_index:
generated_index.write("{}\n{}\n\n{}\n\n".format(
self.root_file_title, EXHALE_FILE_HEADING, self.root_file_description)
)
except:
exclaimError("Unable to create the root api file / header: {}".format(self.full_root_file_path))
raise Exception("Fatal error generating the api root, cannot continue.")
def generateNodeDocuments(self):
'''
Creates all of the reStructuredText documents related to types parsed by
Doxygen. This includes all leaf-like documents (``class``, ``struct``,
``enum``, ``typedef``, ``union``, ``variable``, and ``define``), as well as
namespace, file, and directory pages.
During the reparenting phase of the parsing process, nested items were added as
a child to their actual parent. For classes, structs, enums, and unions, if
it was reparented to a ``namespace`` it will *remain* in its respective
``self.<breathe_kind>`` list. However, if it was an internally declared child
of a class or struct (nested classes, structs, enums, and unions), this node
will be removed from its ``self.<breathe_kind>`` list to avoid duplication in
the class hierarchy generation.
When generating the full API, though, we will want to include all of these and
therefore must call :func:`exhale.ExhaleRoot.generateSingleNodeRST` with all of
the nested items. For nested classes and structs, this is done by just calling
``node.findNestedClassLike`` for every node in ``self.class_like``. The
resulting list then has all of ``self.class_like``, as well as any nested
classes and structs found. With ``enum`` and ``union``, these would have been
reparented to a **class** or **struct** if it was removed from the relevant
``self.<breathe_kind>`` list. Meaning we must make sure that we genererate the
single node RST documents for everything by finding the nested enums and unions
from ``self.class_like``, as well as everything in ``self.enums`` and
``self.unions``.
'''
# initialize all of the nodes
for node in self.all_nodes:
self.initializeNodeFilenameAndLink(node)
# find the potentially nested items that were reparented
nested_enums = []
nested_unions = []
nested_class_like = []
for cl in self.class_like:
cl.findNestedEnums(nested_enums)
cl.findNestedUnions(nested_unions)
cl.findNestedClassLike(nested_class_like)
# generate all of the leaf-like documents
for node in itertools.chain(nested_class_like, self.enums, nested_enums,
self.unions, nested_unions, self.functions,
self.typedefs, self.variables, self.defines):
self.generateSingleNodeRST(node)
# generate the remaining parent-like documents
self.generateNamespaceNodeDocuments()
self.generateFileNodeDocuments()
self.generateDirectoryNodeDocuments()
def initializeNodeFilenameAndLink(self, node):
'''
Sets the ``file_name`` and ``link_name`` for the specified node. If the kind
of this node is "file", then this method will also set the ``program_file``
as well as the ``program_link_name`` fields.
Since we are operating inside of a ``containmentFolder``, this method **will**
include ``self.root_directory`` in this path so that you can just use::
with open(node.file_name, "w") as gen_file:
... write the file ...
Having the ``containmentFolder`` is important for when we want to generate the
file, but when we want to use it with ``include`` or ``toctree`` this will
need to change. Refer to :func:`exhale.ExhaleRoot.gerrymanderNodeFilenames`.
This method also sets the value of ``node.title``, which will be used in both
the reStructuredText document of the node as well as the links generated in the
class view hierarchy (<a href="..."> for the ``createTreeView = True`` option).
:type: exhale.ExhaleNode
:param: node
The node that we are setting the above information for.
'''
# create the file and link names
html_safe_name = node.name.replace(":", "_").replace("/", "_")
node.file_name = "{}/exhale_{}_{}.rst".format(self.root_directory, node.kind, html_safe_name)
node.link_name = "{}_{}".format(qualifyKind(node.kind).lower(), html_safe_name)
if node.kind == "file":
# account for same file name in different directory
html_safe_name = node.location.replace("/", "_")
node.file_name = "{}/exhale_{}_{}.rst".format(self.root_directory, node.kind, html_safe_name)
node.link_name = "{}_{}".format(qualifyKind(node.kind).lower(), html_safe_name)
node.program_file = "{}/exhale_program_listing_file_{}.rst".format(
self.root_directory, html_safe_name
)
node.program_link_name = "program_listing_file_{}".format(html_safe_name)
# create the title for this node.
if node.kind == "dir":
title = node.name.split("/")[-1]
# breathe does not prepend the namespace for variables and typedefs, so
# I choose to leave the fully qualified name in the title for added clarity
elif node.kind == "variable" or node.kind == "typedef":
title = node.name
else:
#
# :TODO: This is probably breaking template specializations, need to redo
# the html_safe_name, file_name, and link_name to account for these
# as well as include documentation for how to link to partial
# template specializations.
#
# That is, need to do something like
#
# html_safe_name = node.name.replace(":", "_")
# .replace("/", "_")
# .replace(" ", "_")
# .replace("<", "LT_")
# .replace(">", "_GT")
#
# Or something like that...
#
first_lt = node.name.find("<")
last_gt = node.name.rfind(">")
# dealing with a template, special treatment necessary
if first_lt > -1 and last_gt > -1:
title = "{}{}".format(
node.name[:first_lt].split("::")[-1], # remove namespaces
node.name[first_lt:last_gt + 1] # template params
)
html_safe_name = title.replace(":", "_").replace("/", "_").replace(" ", "_").replace("<", "LT_").replace(">", "_GT").replace(",", "")
node.file_name = "{}/exhale_{}_{}.rst".format(self.root_directory, node.kind, html_safe_name)
node.link_name = "{}_{}".format(qualifyKind(node.kind).lower(), html_safe_name)
if node.kind == "file":
node.program_file = "{}/exhale_program_listing_file_{}.rst".format(
self.root_directory, html_safe_name
)
node.program_link_name = "program_listing_file_{}".format(html_safe_name)
else:
title = node.name.split("::")[-1]
# additionally, I feel that nested classes should have their fully qualified
# name without namespaces for clarity
prepend_parent = False
if node.kind == "class" or node.kind == "struct" or node.kind == "enum" or node.kind == "union":
if node.parent is not None and (node.parent.kind == "class" or node.parent.kind == "struct"):
prepend_parent = True
if prepend_parent:
title = "{}::{}".format(node.parent.name.split("::")[-1], title)
node.title = "{} {}".format(qualifyKind(node.kind), title)
def generateSingleNodeRST(self, node):
'''
Creates the reStructuredText document for the leaf like node object. This
method should only be used with nodes in the following member lists:
- ``self.class_like``
- ``self.enums``
- ``self.functions``
- ``self.typedefs``
- ``self.unions``
- ``self.variables``
- ``self.defines``
File, directory, and namespace nodes are treated separately.
:Parameters:
``node`` (ExhaleNode)
The leaf like node being generated by this method.
'''
try:
with open(node.file_name, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(node.link_name)
header = "{}\n{}\n\n".format(node.title, EXHALE_FILE_HEADING)
# link back to the file this was defined in
file_included = False
for f in self.files:
if node in f.children:
if file_included:
raise RuntimeError("Critical error: this node is parented to multiple files.\n\nNode: {}".format(node.name))
header = "{}- Defined in :ref:`{}`\n\n".format(header, f.link_name)
file_included = True
# if this is a nested type, link back to its parent
if node.parent is not None and (node.parent.kind == "struct" or node.parent.kind == "class"):
# still a chance to recover if the parent worked. probably doesn't work past one layer
# TODO: create like quadruple nested classes and find a way to reverse upward. parent links
# should just be class or struct until it is a namespace or file?
if not file_included:
parent_traverser = node.parent
while parent_traverser is not None:
for f in self.files:
if node.parent in f.children:
if file_included:
raise RuntimeError("Critical error: this node is parented to multiple files.\n\nNode: {}".format(node.name))
header = "{}- Defined in :ref:`{}`\n\n".format(header, f.link_name)
file_included = True
if node not in f.children:
f.children.append(node)
if file_included:
parent_traverser = None
else:
parent_traverser = parent_traverser.parent
header = "{}- Nested type of :ref:`{}`\n\n".format(header, node.parent.link_name)
# if this has nested types, link to them
if node.kind == "class" or node.kind == "struct":
nested_children = []
for c in node.children:
c.findNestedEnums(nested_children)
c.findNestedUnions(nested_children)
c.findNestedClassLike(nested_children)
if nested_children:
# build up a list of links, custom sort function will force
# double nested and beyond to appear after their parent by
# sorting on their name
nested_children.sort(key=lambda x: x.name)
nested_child_stream = StringIO()
for nc in nested_children:
nested_child_stream.write("- :ref:`{}`\n".format(nc.link_name))
# extract the list of links and add them as a subsection in the header
nested_child_string = nested_child_stream.getvalue()
nested_child_stream.close()
header = "{}**Nested Types**:\n\n{}\n\n".format(header, nested_child_string)
# inject the appropriate doxygen directive and name of this node
directive = ".. {}:: {}\n".format(kindAsBreatheDirective(node.kind), node.name)
# include any specific directives for this doxygen directive
specifications = "{}\n\n".format(specificationsForKind(node.kind))
gen_file.write("{}{}{}{}".format(link_declaration, header, directive, specifications))
except:
exclaimError("Critical error while generating the file for [{}]".format(node.file_name))
def generateNamespaceNodeDocuments(self):
'''
Generates the reStructuredText document for every namespace, including nested
namespaces that were removed from ``self.namespaces`` (but added as children
to one of the namespaces in ``self.namespaces``).
The documents generated do not use the Breathe namespace directive, but instead
link to the relevant documents associated with this namespace.
'''
# go through all of the top level namespaces
for n in self.namespaces:
# find any nested namespaces
nested_namespaces = []
for child in n.children:
child.findNestedNamespaces(nested_namespaces)
# generate the children first
for nested in reversed(sorted(nested_namespaces)):
self.generateSingleNamespace(nested)
# generate this top level namespace
self.generateSingleNamespace(n)
def generateSingleNamespace(self, nspace):
'''
Helper method for :func:`exhale.ExhaleRoot.generateNamespaceNodeDocuments`.
Writes the reStructuredText file for the given namespace.
:Parameters:
``nspace`` (ExhaleNode)
The namespace node to create the reStructuredText document for.
'''
try:
with open(nspace.file_name, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(nspace.link_name)
# every generated file must have a header for sphinx to be happy
nspace.title = "{} {}".format(qualifyKind(nspace.kind), nspace.name)
header = "{}\n{}\n\n".format(nspace.title, EXHALE_FILE_HEADING)
# generate the headings and links for the children
children_string = self.generateNamespaceChildrenString(nspace)
# write it all out
gen_file.write("{}{}{}\n\n".format(link_declaration, header, children_string))
except:
exclaimError("Critical error while generating the file for [{}]".format(nspace.file_name))
def generateNamespaceChildrenString(self, nspace):
'''
Helper method for :func:`exhale.ExhaleRoot.generateSingleNamespace`, and
:func:`exhale.ExhaleRoot.generateFileNodeDocuments`. Builds the
body text for the namespace node document that links to all of the child
namespaces, structs, classes, functions, typedefs, unions, and variables
associated with this namespace.
:Parameters:
``nspace`` (ExhaleNode)
The namespace node we are generating the body text for.
:Return (str):
The string to be written to the namespace node's reStructuredText document.
'''
# sort the children
nsp_namespaces = []
nsp_nested_class_like = []
nsp_enums = []
nsp_functions = []
nsp_typedefs = []
nsp_unions = []
nsp_variables = []
for child in nspace.children:
if child.kind == "namespace":
nsp_namespaces.append(child)
elif child.kind == "struct" or child.kind == "class":
child.findNestedClassLike(nsp_nested_class_like)
child.findNestedEnums(nsp_enums)
child.findNestedUnions(nsp_unions)
elif child.kind == "enum":
nsp_enums.append(child)
elif child.kind == "function":
nsp_functions.append(child)
elif child.kind == "typedef":
nsp_typedefs.append(child)
elif child.kind == "union":
nsp_unions.append(child)
elif child.kind == "variable":
nsp_variables.append(child)
# generate their headings if they exist (no Defines...that's not a C++ thing...)
children_string = self.generateSortedChildListString("Namespaces", "", nsp_namespaces)
children_string = self.generateSortedChildListString("Classes", children_string, nsp_nested_class_like)
children_string = self.generateSortedChildListString("Enums", children_string, nsp_enums)
children_string = self.generateSortedChildListString("Functions", children_string, nsp_functions)
children_string = self.generateSortedChildListString("Typedefs", children_string, nsp_typedefs)
children_string = self.generateSortedChildListString("Unions", children_string, nsp_unions)
children_string = self.generateSortedChildListString("Variables", children_string, nsp_variables)
return children_string
def generateSortedChildListString(self, sectionTitle, previousString, lst):
'''
Helper method for :func:`exhale.ExhaleRoot.generateNamespaceChildrenString`.
Used to build up a continuous string with all of the children separated out into
titled sections.
This generates a new titled section with ``sectionTitle`` and puts a link to
every node found in ``lst`` in this section. The newly created section is
appended to ``previousString`` and then returned.
:TODO:
Change this to use string streams like the other methods instead.
:Parameters:
``sectionTitle`` (str)
The title of the section for this list of children.
``previousString`` (str)
The string to append the newly created section to.
``lst`` (list)
A list of ExhaleNode objects that are to be linked to from this section.
This method sorts ``lst`` in place.
'''
if lst:
lst.sort()
new_string = "{}\n\n{}\n{}\n".format(previousString, sectionTitle, EXHALE_SECTION_HEADING)
for l in lst:
new_string = "{}\n- :ref:`{}`".format(new_string, l.link_name)
return new_string
else:
return previousString
def generateFileNodeDocuments(self):
'''
Generates the reStructuredText documents for files as well as the file's
program listing reStructuredText document if applicable. Refer to
:ref:`usage_customizing_file_pages` for changing the output of this method.
The remainder of the file lists all nodes that have been discovered to be
defined (e.g. classes) or referred to (e.g. included files or files that include
this file).
'''
for f in self.files:
# if the programlisting was included, length will be at least 1 line
if len(f.program_listing) > 0:
include_program_listing = True
full_program_listing = '.. code-block:: cpp\n\n'
# need to reformat each line to remove xml tags / put <>& back in
for pgf_line in f.program_listing:
fixed_whitespace = re.sub(r'<sp/>', ' ', pgf_line)
# for our purposes, this is good enough:
# http://stackoverflow.com/a/4869782/3814202
no_xml_tags = re.sub(r'<[^<]+?>', '', fixed_whitespace)
revive_lt = re.sub(r'<', '<', no_xml_tags)
revive_gt = re.sub(r'>', '>', revive_lt)
revive_quote = re.sub(r'"', '"', revive_gt)
revive_apos = re.sub(r''', "'", revive_quote)
revive_amp = re.sub(r'&', '&', revive_apos)
full_program_listing = "{} {}".format(full_program_listing, revive_amp)
# create the programlisting file
try:
with open(f.program_file, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(f.program_link_name)
# every generated file must have a header for sphinx to be happy
prog_title = "Program Listing for {} {}".format(qualifyKind(f.kind), f.name)
header = "{}\n{}\n\n".format(prog_title, EXHALE_FILE_HEADING)
return_link = "- Return to documentation for :ref:`{}`\n\n".format(f.link_name)
# write it all out
gen_file.write("{}{}{}{}\n\n".format(
link_declaration, header, return_link, full_program_listing)
)
except:
exclaimError("Critical error while generating the file for [{}]".format(f.file_name))
else:
include_program_listing = False
for f in self.files:
if len(f.location) > 0:
file_definition = "Definition (``{}``)\n{}\n\n".format(
f.location, EXHALE_SECTION_HEADING
)
else:
file_definition = ""
if include_program_listing and file_definition != "":
file_definition = "{}.. toctree::\n :maxdepth: 1\n\n {}\n\n".format(
file_definition, f.program_file.split("/")[-1] # file path still has directory
)
if len(f.includes) > 0:
file_includes = "Includes\n{}\n\n".format(EXHALE_SECTION_HEADING)
for incl in sorted(f.includes):
local_file = None
for incl_file in self.files:
if incl in incl_file.location:
local_file = incl_file
break
if local_file is not None:
file_includes = "{}- ``{}`` (:ref:`{}`)\n".format(
file_includes, incl, local_file.link_name
)
else:
file_includes = "{}- ``{}``\n".format(file_includes, incl)
else:
file_includes = ""
if len(f.included_by) > 0:
file_included_by = "Included By\n{}\n\n".format(EXHALE_SECTION_HEADING)
for incl_ref, incl_name in f.included_by:
for incl_file in self.files:
if incl_ref == incl_file.refid:
file_included_by = "{}- :ref:`{}`\n".format(file_included_by, incl_file.link_name)
break
else:
file_included_by = ""
# generate their headings if they exist --- DO NOT USE findNested*, these are included recursively
file_structs = []
file_classes = []
file_enums = []
file_functions = []
file_typedefs = []
file_unions = []
file_variables = []
file_defines = []
for child in f.children:
if child.kind == "struct":
file_structs.append(child)
elif child.kind == "class":
file_classes.append(child)
elif child.kind == "enum":
file_enums.append(child)
elif child.kind == "function":
file_functions.append(child)
elif child.kind == "typedef":
file_typedefs.append(child)
elif child.kind == "union":
file_unions.append(child)
elif child.kind == "variable":
file_variables.append(child)
elif child.kind == "define":
file_defines.append(child)
children_string = self.generateSortedChildListString("Namespaces", "", f.namespaces_used)
children_string = self.generateSortedChildListString("Classes", children_string, file_structs + file_classes)
children_string = self.generateSortedChildListString("Enums", children_string, file_enums)
children_string = self.generateSortedChildListString("Functions", children_string, file_functions)
children_string = self.generateSortedChildListString("Defines", children_string, file_defines)
children_string = self.generateSortedChildListString("Typedefs", children_string, file_typedefs)
children_string = self.generateSortedChildListString("Unions", children_string, file_unions)
children_string = self.generateSortedChildListString("Variables", children_string, file_variables)
try:
with open(f.file_name, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(f.link_name)
# every generated file must have a header for sphinx to be happy
f.title = "{} {}".format(qualifyKind(f.kind), f.name)
header = "{}\n{}\n\n".format(f.title, EXHALE_FILE_HEADING)
# write it all out
gen_file.write("{}{}{}{}\n{}\n{}\n\n".format(
link_declaration, header, file_definition, file_includes, file_included_by, children_string)
)
except:
exclaimError("Critical error while generating the file for [{}]".format(f.file_name))
if EXHALE_GENERATE_BREATHE_FILE_DIRECTIVES:
try:
with open(f.file_name, "a") as gen_file:
# add the breathe directive ???
gen_file.write(
"\nFull File Listing\n{}\n\n"
".. {}:: {}\n"
"{}\n\n".format(EXHALE_SECTION_HEADING, kindAsBreatheDirective(f.kind), f.location, specificationsForKind(f.kind))
)
except:
exclaimError("Critical error while generating the breathe directive for [{}]".format(f.file_name))
def generateDirectoryNodeDocuments(self):
'''
Generates all of the directory reStructuredText documents.
'''
all_dirs = []
for d in self.dirs:
d.findNestedDirectories(all_dirs)
for d in all_dirs:
self.generateDirectoryNodeRST(d)
def generateDirectoryNodeRST(self, node):
'''
Helper method for :func:`exhale.ExhaleRoot.generateDirectoryNodeDocuments`.
Generates the reStructuredText documents for the given directory node.
Directory nodes will only link to files and subdirectories within it.
:Parameters:
``node`` (ExhaleNode)
The directory node to generate the reStructuredText document for.
'''
# find the relevant children: directories and files only
child_dirs = []
child_files = []
for c in node.children:
if c.kind == "dir":
child_dirs.append(c)
elif c.kind == "file":
child_files.append(c)
# generate the subdirectory section
if len(child_dirs) > 0:
child_dirs_string = "Subdirectories\n{}\n\n".format(EXHALE_SECTION_HEADING)
for child_dir in sorted(child_dirs):
child_dirs_string = "{}- :ref:`{}`\n".format(child_dirs_string, child_dir.link_name)
else:
child_dirs_string = ""
# generate the files section
if len(child_files) > 0:
child_files_string = "Files\n{}\n\n".format(EXHALE_SECTION_HEADING)
for child_file in sorted(child_files):
child_files_string = "{}- :ref:`{}`\n".format(child_files_string, child_file.link_name)
else:
child_files_string = ""
# generate the file for this directory
try:
with open(node.file_name, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(node.link_name)
header = "{}\n{}\n\n".format(node.title, EXHALE_FILE_HEADING)
# generate the headings and links for the children
# write it all out
gen_file.write("{}{}{}\n{}\n\n".format(
link_declaration, header, child_dirs_string, child_files_string)
)
except:
exclaimError("Critical error while generating the file for [{}]".format(node.file_name))
def generateAPIRootBody(self):
'''
Generates the root library api file's body text. The method calls
:func:`exhale.ExhaleRoot.gerrymanderNodeFilenames` first to enable proper
internal linkage between reStructuredText documents. Afterward, it calls
:func:`exhale.ExhaleRoot.generateViewHierarchies` followed by
:func:`exhale.ExhaleRoot.generateUnabridgedAPI` to generate both hierarchies as
well as the full API listing. As a result, three files will now be ready:
1. ``self.class_view_file``
2. ``self.directory_view_file``
3. ``self.unabridged_api_file``
These three files are then *included* into the root library file. The
consequence of using an ``include`` directive is that Sphinx will complain about
these three files never being included in any ``toctree`` directive. These
warnings are expected, and preferred to using a ``toctree`` because otherwise
the user would have to click on the class view link from the ``toctree`` in
order to see it. This behavior has been acceptable for me so far, but if it
is causing you problems please raise an issue on GitHub and I may be able to
conditionally use a ``toctree`` if you really need it.
'''
try:
self.gerrymanderNodeFilenames()
self.generateViewHierarchies()
self.generateUnabridgedAPI()
with open(self.full_root_file_path, "a") as generated_index:
generated_index.write(
".. include:: {}\n\n".format(self.class_view_file.split("/")[-1])
)
generated_index.write(
".. include:: {}\n\n".format(self.directory_view_file.split("/")[-1])
)
generated_index.write(
".. include:: {}\n\n".format(self.unabridged_api_file.split("/")[-1])
)
except Exception as e:
exclaimError("Unable to create the root api body: {}".format(e))
def gerrymanderNodeFilenames(self):
'''
When creating nodes, the filename needs to be relative to ``conf.py``, so it
will include ``self.root_directory``. However, when generating the API, the
file we are writing to is in the same directory as the generated node files so
we need to remove the directory path from a given ExhaleNode's ``file_name``
before we can ``include`` it or use it in a ``toctree``.
'''
for node in self.all_nodes:
node.file_name = node.file_name.split("/")[-1]
if node.kind == "file":
node.program_file = node.program_file.split("/")[-1]
def generateViewHierarchies(self):
'''
Wrapper method to create the view hierarchies. Currently it just calls
:func:`exhale.ExhaleRoot.generateClassView` and
:func:`exhale.ExhaleRoot.generateDirectoryView` --- if you want to implement
additional hierarchies, implement the additionaly hierarchy method and call it
from here. Then make sure to ``include`` it in
:func:`exhale.ExhaleRoot.generateAPIRootBody`.
'''
self.generateClassView(self.use_tree_view)
self.generateDirectoryView(self.use_tree_view)
def generateClassView(self, treeView):
'''
Generates the class view hierarchy, writing it to ``self.class_view_file``.
:Parameters:
``treeView`` (bool)
Whether or not to use the collapsibleList version. See the
``createTreeView`` description in :func:`exhale.generate`.
'''
class_view_stream = StringIO()
for n in self.namespaces:
n.toClassView(0, class_view_stream, treeView)
# Add everything that was not nested in a namespace.
missing = []
# class-like objects (structs and classes)
for cl in sorted(self.class_like):
if not cl.in_class_view:
missing.append(cl)
# enums
for e in sorted(self.enums):
if not e.in_class_view:
missing.append(e)
# unions
for u in sorted(self.unions):
if not u.in_class_view:
missing.append(u)
if len(missing) > 0:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toClassView(0, class_view_stream, treeView, idx == last_missing_child)
idx += 1
elif treeView:
# need to restart since there were no missing children found, otherwise the
# last namespace will not correctly have a lastChild
class_view_stream.close()
class_view_stream = StringIO()
last_nspace_index = len(self.namespaces) - 1
for idx in range(last_nspace_index + 1):
nspace = self.namespaces[idx]
nspace.toClassView(0, class_view_stream, treeView, idx == last_nspace_index)
# extract the value from the stream and close it down
class_view_string = class_view_stream.getvalue()
class_view_stream.close()
# inject the raw html for the treeView unordered lists
if treeView:
# we need to indent everything to be under the .. raw:: html directive, add
# indentation so the html is readable while we are at it
indented = re.sub(r'(.+)', r' \1', class_view_string)
class_view_string = \
'.. raw:: html\n\n' \
' <ul class="treeView">\n' \
' <li>\n' \
' <ul class="collapsibleList">\n' \
'{}' \
' </ul><!-- collapsibleList -->\n' \
' </li><!-- only tree view element -->\n' \
' </ul><!-- treeView -->\n'.format(indented)
# write everything to file to be included in the root api later
try:
with open(self.class_view_file, "w") as cvf:
cvf.write("Class Hierarchy\n{}\n\n{}\n\n".format(EXHALE_SECTION_HEADING,
class_view_string))
except Exception as e:
exclaimError("Error writing the class hierarchy: {}".format(e))
def generateDirectoryView(self, treeView):
'''
Generates the file view hierarchy, writing it to ``self.directory_view_file``.
:Parameters:
``treeView`` (bool)
Whether or not to use the collapsibleList version. See the
``createTreeView`` description in :func:`exhale.generate`.
'''
directory_view_stream = StringIO()
for d in self.dirs:
d.toDirectoryView(0, directory_view_stream, treeView)
# add potential missing files (not sure if this is possible though)
missing = []
for f in sorted(self.files):
if not f.in_directory_view:
missing.append(f)
found_missing = len(missing) > 0
if found_missing:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toDirectoryView(0, directory_view_stream, treeView, idx == last_missing_child)
idx += 1
elif treeView:
# need to restart since there were no missing children found, otherwise the
# last directory will not correctly have a lastChild
directory_view_stream.close()
directory_view_stream = StringIO()
last_dir_index = len(self.dirs) - 1
for idx in range(last_dir_index + 1):
curr_d = self.dirs[idx]
curr_d.toDirectoryView(0, directory_view_stream, treeView, idx == last_dir_index)
# extract the value from the stream and close it down
directory_view_string = directory_view_stream.getvalue()
directory_view_stream.close()
# inject the raw html for the treeView unordered lists
if treeView:
# we need to indent everything to be under the .. raw:: html directive, add
# indentation so the html is readable while we are at it
indented = re.sub(r'(.+)', r' \1', directory_view_string)
directory_view_string = \
'.. raw:: html\n\n' \
' <ul class="treeView">\n' \
' <li>\n' \
' <ul class="collapsibleList">\n' \
'{}' \
' </ul><!-- collapsibleList -->\n' \
' </li><!-- only tree view element -->\n' \
' </ul><!-- treeView -->\n'.format(indented)
# write everything to file to be included in the root api later
try:
with open(self.directory_view_file, "w") as dvf:
dvf.write("File Hierarchy\n{}\n\n{}\n\n".format(EXHALE_SECTION_HEADING,
directory_view_string))
except Exception as e:
exclaimError("Error writing the directory hierarchy: {}".format(e))
def generateUnabridgedAPI(self):
'''
Generates the unabridged (full) API listing into ``self.unabridged_api_file``.
This is necessary as some items may not show up in either hierarchy view,
depending on:
1. The item. For example, if a namespace has only one member which is a
variable, then neither the namespace nor the variable will be declared in the
class view hierarchy. It will be present in the file page it was declared in
but not on the main library page.
2. The configurations of Doxygen. For example, see the warning in
:func:`exhale.ExhaleRoot.fileRefDiscovery`. Items whose parents cannot be
rediscovered withouth the programlisting will still be documented, their link
appearing in the unabridged API listing.
Currently, the API is generated in the following (somewhat arbitrary) order:
- Namespaces
- Classes and Structs
- Enums
- Unions
- Functions
- Variables
- Defines
- Typedefs
- Directories
- Files
If you want to change the ordering, just change the order of the calls to
:func:`exhale.ExhaleRoot.enumerateAll` in this method.
'''
try:
with open(self.unabridged_api_file, "w") as full_api_file:
# write the header
full_api_file.write("Full API\n{}\n\n".format(EXHALE_SECTION_HEADING))
# recover all namespaces that were reparented
all_namespaces = []
for n in self.namespaces:
n.findNestedNamespaces(all_namespaces)
# recover all directories that were reparented
all_directories = []
for d in self.dirs:
d.findNestedDirectories(all_directories)
# recover classes and structs that were reparented
all_class_like = []
for cl in self.class_like:
cl.findNestedClassLike(all_class_like)
# write everything to file: reorder these lines for different outcomes
self.enumerateAll("Namespaces", all_namespaces, full_api_file)
self.enumerateAll("Classes and Structs", all_class_like, full_api_file)
self.enumerateAll("Enums", self.enums, full_api_file)
self.enumerateAll("Unions", self.unions, full_api_file)
self.enumerateAll("Functions", self.functions, full_api_file)
self.enumerateAll("Variables", self.variables, full_api_file)
self.enumerateAll("Defines", self.defines, full_api_file)
self.enumerateAll("Typedefs", self.typedefs, full_api_file)
self.enumerateAll("Directories", all_directories, full_api_file)
self.enumerateAll("Files", self.files, full_api_file)
except Exception as e:
exclaimError("Error writing the unabridged API: {}".format(e))
def enumerateAll(self, subsectionTitle, lst, openFile):
'''
Helper function for :func:`exhale.ExhaleRoot.generateUnabridgedAPI`. Simply
writes a subsection to ``openFile`` (a ``toctree`` to the ``file_name``) of each
ExhaleNode in ``sorted(lst)`` if ``len(lst) > 0``. Otherwise, nothing is
written to the file.
:Parameters:
``subsectionTitle`` (str)
The title of this subsection, e.g. ``"Namespaces"`` or ``"Files"``.
``lst`` (list)
The list of ExhaleNodes to be enumerated in this subsection.
``openFile`` (File)
The **already open** file object to write to directly. No safety checks
are performed, make sure this is a real file object that has not been
closed already.
'''
if len(lst) > 0:
openFile.write("{}\n{}\n\n".format(subsectionTitle, EXHALE_SUBSECTION_HEADING))
for l in sorted(lst):
openFile.write(
".. toctree::\n"
" :maxdepth: {}\n\n"
" {}\n\n".format(EXHALE_API_TOCTREE_MAX_DEPTH, l.file_name)
)
def generateAPIRootSummary(self):
'''
Writes the library API root summary to the main library file. See the
documentation for the key ``afterBodySummary`` in :func:`exhale.generate`.
'''
try:
with open(self.full_root_file_path, "a") as generated_index:
generated_index.write("{}\n\n".format(self.root_file_summary))
except Exception as e:
exclaimError("Unable to create the root api summary: {}".format(e))
####################################################################################
#
##
### Miscellaneous utility functions.
##
#
####################################################################################
def toConsole(self):
'''
Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;)
'''
self.consoleFormat("Classes and Structs", self.class_like)
self.consoleFormat("Defines", self.defines)
self.consoleFormat("Enums", self.enums)
self.consoleFormat("Enum Values", self.enum_values)
self.consoleFormat("Functions", self.functions)
self.consoleFormat("Files", self.files)
self.consoleFormat("Directories", self.dirs)
self.consoleFormat("Groups", self.groups)
self.consoleFormat("Namespaces", self.namespaces)
self.consoleFormat("Typedefs", self.typedefs)
self.consoleFormat("Unions", self.unions)
self.consoleFormat("Variables", self.variables)
def consoleFormat(self, sectionTitle, lst):
'''
Helper method for :func:`exhale.ExhaleRoot.toConsole`. Prints the given
``sectionTitle`` and calls :func:`exhale.ExhaleNode.toConsole` with ``0`` as the
level for every ExhaleNode in ``lst``.
:Parameters:
``sectionTitle`` (str)
The title that will be printed with some visual separators around it.
``lst`` (list)
The list of ExhaleNodes to print to the console.
'''
print("###########################################################")
print("## {}".format(sectionTitle))
print("###########################################################")
for l in lst:
l.toConsole(0)
| apache-2.0 |
birryree/servo | tests/wpt/web-platform-tests/old-tests/webdriver/user_input/click_test.py | 141 | 10579 | import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
repo_root = os.path.abspath(os.path.join(__file__, "../../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
from webdriver import exceptions, wait
class ClickTest(base_test.WebDriverBaseTest):
def setUp(self):
self.wait = wait.WebDriverWait(self.driver, 5, ignored_exceptions = [exceptions.NoSuchAlertException])
self.driver.get(self.webserver.where_is('modal/res/alerts.html'))
def tearDown(self):
try:
self.driver.switch_to_alert().dismiss()
except exceptions.NoSuchAlertException:
pass
def test_click_div(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("div")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "div")
def test_click_p(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("p")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "p")
def test_click_h1(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("h1")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "h1")
def test_click_pre(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("pre")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "pre")
def test_click_ol(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("ol")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "ol")
def test_click_ul(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("ul")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "ul")
def test_click_a(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("a")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "a")
def test_click_img(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("img")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "img")
def test_click_video(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("video")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "video")
def test_click_canvas(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("canvas")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "canvas")
def test_click_progress(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("progress")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "progress")
def test_click_textarea(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("textarea")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "textarea")
def test_click_button(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("button")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "button")
def test_click_svg(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("svg")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "svg")
def test_click_input_range(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_range")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_range")
def test_click_input_button(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_button")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_button")
def test_click_input_submit(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_submit")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_submit")
def test_click_input_reset(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_reset")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_reset")
def test_click_input_checkbox(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_checkbox")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_checkbox")
def test_click_input_radio(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_radio")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_radio")
def test_click_input_text(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_text")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_text")
def test_click_input_number(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_number")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_number")
def test_click_input_tel(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_tel")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_tel")
def test_click_input_url(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_url")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_url")
def test_click_input_email(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_email")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_email")
def test_click_input_search(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_search")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_search")
def test_click_input_image(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_image")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_image")
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
tpaszkowski/quantum | quantum/plugins/ryu/ryu_quantum_plugin.py | 2 | 10994 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# <yamahata at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Isaku Yamahata
from oslo.config import cfg
from ryu.app import client
from ryu.app import rest_nw_id
from quantum.agent import securitygroups_rpc as sg_rpc
from quantum.common import constants as q_const
from quantum.common import exceptions as q_exc
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import extraroute_db
from quantum.db import l3_rpc_base
from quantum.db import models_v2
from quantum.db import securitygroups_rpc_base as sg_db_rpc
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.openstack.common.rpc import proxy
from quantum.plugins.ryu.common import config # noqa
from quantum.plugins.ryu.db import api_v2 as db_api_v2
LOG = logging.getLogger(__name__)
class RyuRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def __init__(self, ofp_rest_api_addr):
self.ofp_rest_api_addr = ofp_rest_api_addr
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self])
def get_ofp_rest_api(self, context, **kwargs):
LOG.debug(_("get_ofp_rest_api: %s"), self.ofp_rest_api_addr)
return self.ofp_rest_api_addr
@classmethod
def get_port_from_device(cls, device):
port = db_api_v2.get_port_from_device(device)
if port:
port['device'] = device
return port
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update', port=port),
topic=self.topic_port_update)
class RyuQuantumPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
extraroute_db.ExtraRoute_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["router", "extraroute", "security-group"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
def __init__(self, configfile=None):
db.configure_db()
self.tunnel_key = db_api_v2.TunnelKey(
cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max)
self.ofp_api_host = cfg.CONF.OVS.openflow_rest_api
if not self.ofp_api_host:
raise q_exc.Invalid(_('Invalid configuration. check ryu.ini'))
self.client = client.OFPClient(self.ofp_api_host)
self.tun_client = client.TunnelClient(self.ofp_api_host)
self.iface_client = client.QuantumIfaceClient(self.ofp_api_host)
for nw_id in rest_nw_id.RESERVED_NETWORK_IDS:
if nw_id != rest_nw_id.NW_ID_UNKNOWN:
self.client.update_network(nw_id)
self._setup_rpc()
# register known all network list on startup
self._create_all_tenant_network()
def _setup_rpc(self):
self.conn = rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.callbacks = RyuRpcCallbacks(self.ofp_api_host)
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(topics.PLUGIN, self.dispatcher, fanout=False)
self.conn.consume_in_thread()
def _create_all_tenant_network(self):
for net in db_api_v2.network_all_tenant_list():
self.client.update_network(net.id)
for tun in self.tunnel_key.all_list():
self.tun_client.update_tunnel_key(tun.network_id, tun.tunnel_key)
session = db.get_session()
for port in session.query(models_v2.Port).all():
self.iface_client.update_network_id(port.id, port.network_id)
def _client_create_network(self, net_id, tunnel_key):
self.client.create_network(net_id)
self.tun_client.create_tunnel_key(net_id, tunnel_key)
def _client_delete_network(self, net_id):
client.ignore_http_not_found(
lambda: self.client.delete_network(net_id))
client.ignore_http_not_found(
lambda: self.tun_client.delete_tunnel_key(net_id))
def create_network(self, context, network):
session = context.session
with session.begin(subtransactions=True):
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
net = super(RyuQuantumPluginV2, self).create_network(context,
network)
self._process_l3_create(context, network['network'], net['id'])
self._extend_network_dict_l3(context, net)
tunnel_key = self.tunnel_key.allocate(session, net['id'])
try:
self._client_create_network(net['id'], tunnel_key)
except:
self._client_delete_network(net['id'])
raise
return net
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(RyuQuantumPluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, network['network'], id)
self._extend_network_dict_l3(context, net)
return net
def delete_network(self, context, id):
self._client_delete_network(id)
session = context.session
with session.begin(subtransactions=True):
self.tunnel_key.delete(session, id)
super(RyuQuantumPluginV2, self).delete_network(context, id)
def get_network(self, context, id, fields=None):
net = super(RyuQuantumPluginV2, self).get_network(context, id, None)
self._extend_network_dict_l3(context, net)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None):
nets = super(RyuQuantumPluginV2, self).get_networks(context, filters,
None)
for net in nets:
self._extend_network_dict_l3(context, net)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
port = super(RyuQuantumPluginV2, self).create_port(context, port)
self._process_port_create_security_group(
context, port['id'], sgids)
self._extend_port_dict_security_group(context, port)
self.notify_security_groups_member_updated(context, port)
self.iface_client.create_network_id(port['id'], port['network_id'])
return port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
with context.session.begin(subtransactions=True):
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(RyuQuantumPluginV2, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
def update_port(self, context, id, port):
deleted = port['port'].get('deleted', False)
session = context.session
need_port_update_notify = False
with session.begin(subtransactions=True):
original_port = super(RyuQuantumPluginV2, self).get_port(
context, id)
updated_port = super(RyuQuantumPluginV2, self).update_port(
context, id, port)
need_port_update_notify = self.update_security_group_on_port(
context, id, port, original_port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
need_port_update_notify |= (original_port['admin_state_up'] !=
updated_port['admin_state_up'])
if need_port_update_notify:
self.notifier.port_update(context, updated_port)
if deleted:
db_api_v2.set_port_status(session, id, q_const.PORT_STATUS_DOWN)
return updated_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(RyuQuantumPluginV2, self).get_port(context, id,
fields)
self._extend_port_dict_security_group(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(RyuQuantumPluginV2, self).get_ports(
context, filters, fields)
for port in ports:
self._extend_port_dict_security_group(context, port)
return [self._fields(port, fields) for port in ports]
| apache-2.0 |
chepazzo/trigger | trigger/contrib/commando/plugins/config_device.py | 6 | 5188 |
import os.path
import re
from socket import getfqdn, gethostbyname
from twisted.python import log
from trigger.contrib.commando import CommandoApplication
from trigger.conf import settings
from trigger.utils import xmltodict, strip_juniper_namespace
import xml.etree.ElementTree as ET
from xml.etree.cElementTree import ElementTree, Element, SubElement
task_name = 'config_device'
if not hasattr(settings, 'TFTPROOT_DIR'):
settings.TFTPROOT_DIR = ''
if not hasattr(settings, 'TFTP_HOST'):
settings.TFTP_HOST = ''
def xmlrpc_config_device(*args, **kwargs):
c = ConfigDevice(*args, **kwargs)
d = c.run()
return d
class ConfigDevice(CommandoApplication):
tftp_dir = settings.TFTPROOT_DIR
tftp_host = settings.TFTP_HOST
tftp_ip = gethostbyname(tftp_host)
def __init__(self, action='replace', files=None, commands=None, debug=False, **kwargs):
if commands is None:
commands = []
if files is None:
files = []
self.data=[]
self.commands = commands
self.files = files
self.action = action
##
## available actions:
## replace
## overwrite
## merge
## set
##
self.debug = debug
super(ConfigDevice, self).__init__(**kwargs)
##
## to_<vendor> methods
##
## Used to construct the cmds sent to specific devices.
## The dev is passed to allow for creating different
## commands based on model and version!!
def to_cisco(self, dev, commands=None, extra=None):
cmds = []
files = self.files
for fn in files:
copytftpcmd = "copy tftp://%s/%s running-config" % (self.tftp_ip, fn)
cmds.append(copytftpcmd)
cmds.append('copy running-config startup-config')
return cmds
to_arista = to_cisco
def to_brocade(self, dev, commands=None, extra=None):
cmds = []
action = self.action
files = self.files
if re.match(r"^BRMLXE", dev.make):
log.msg('Device Type (%s %s) not supported' % (dev.vendor, dev.make))
return []
for fn in files:
copytftpcmd = "copy tftp running-config %s %s" % (self.tftp_ip, fn)
if action == 'overwrite':
copytftpcmd += ' overwrite'
cmds.append(copytftpcmd)
cmds.append('copy running-config startup-config')
return cmds
def to_dell(self, dev, commands=None, extra=None):
cmds = []
files = self.files
if dev.make != 'POWERCONNECT':
log.msg('Device Type (%s %s) not supported' % (dev.vendor, dev.make))
return cmds
for fn in files:
copytftpcmd = "copy tftp://%s/%s running-config" % (self.tftp_ip, fn)
cmds.append(copytftpcmd)
cmds.append('copy running-config startup-config')
return cmds
def to_a10(self, dev, commands=None, extra=None):
cmds = []
files = self.files
log.msg('Device Type (%s) not supported' % dev.vendor)
return cmds
def to_juniper(self, dev, commands=None, extra=None):
if commands is None:
commands = []
cmds = [Element('lock-configuration')]
files = self.files
action = self.action
if action == 'overwrite':
action = 'override'
for fname in files:
#log.msg("fname: %s" % fname)
filecontents = ''
if not os.path.isfile(fname):
fname = tftp_dir + fname
try:
filecontents = file(fname).read()
except IOError as e:
log.msg("Unable to open file: %s" % fname)
if filecontents == '':
continue
lc = Element('load-configuration', action=action, format='text')
body = SubElement(lc, 'configuration-text')
body.text = filecontents
cmds.append(lc)
if len(commands) > 0:
lc = Element('load-configuration', action=action, format='text')
body = SubElement(lc, 'configuration-text')
body.text = "\n".join(commands)
cmds.append(lc)
cmds.append(Element('commit-configuration'))
return cmds
def from_juniper(self, data, device, commands=None):
"""Do all the magic to parse Junos interfaces"""
#print 'device:', device
#print 'data len:', len(data)
self.raw = data
results = []
for xml in data:
jdata = xmltodict.parse(
ET.tostring(xml),
postprocessor=strip_juniper_namespace,
xml_attribs=False
)
##
## Leaving jdata structure native until I have a chance
## to look at it (and other vendors' results) and restructure
## into something sane.
## At that point, I will want to make sure that all vendors
## return a dict with the same structure.
##
self.data.append({'device':device, 'data':jdata})
results.append(jdata)
self.store_results(device, results)
| bsd-3-clause |
l0b0/cds-invenio-vengmark | modules/websession/lib/webgroup_tests.py | 3 | 1876 | # -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the group handling library."""
__revision__ = "$Id$"
import unittest
import sys
from invenio.testutils import make_test_suite, run_test_suite
if sys.hexversion < 0x2040000:
# pylint: disable-msg=W0622
from sets import Set as set
# pylint: enable-msg=W0622
class WebGroupTests(unittest.TestCase):
"""Test functions related to the WebGroup usage."""
def test_set(self):
"""webgroup - test fancy usage of set (differences among Python versions)"""
# These should succeed:
self.failUnless(set([1,2,3]))
self.assertEqual(set([1,2,3]) - set([3,4,5]), set([1,2]))
self.assertEqual(set([1,2,3,3]), set([1,2,3]))
self.assertEqual(set([1,2,3]), set([3,2,1]))
self.assertEqual(set([1,2,3]) & set([2,3,4]), set([2,3]))
self.assertEqual(set([1,2,3]) | set([2,3,4]), set([1,2,3,4]))
self.assertEqual(set([1,2,3]), set([3,2,1]))
TEST_SUITE = make_test_suite(WebGroupTests,)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
varunkumta/azure-linux-extensions | Common/libpsutil/py2.6-glibc-2.12-pre/psutil/_psbsd.py | 36 | 13237 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""FreeBSD platform implementation."""
import errno
import functools
import os
import sys
from collections import namedtuple
from psutil import _common
from psutil import _psposix
from psutil._common import conn_tmap, usage_percent
import _psutil_bsd as cext
import _psutil_posix
__extra__all__ = []
# --- constants
PROC_STATUSES = {
cext.SSTOP: _common.STATUS_STOPPED,
cext.SSLEEP: _common.STATUS_SLEEPING,
cext.SRUN: _common.STATUS_RUNNING,
cext.SIDL: _common.STATUS_IDLE,
cext.SWAIT: _common.STATUS_WAITING,
cext.SLOCK: _common.STATUS_LOCKED,
cext.SZOMB: _common.STATUS_ZOMBIE,
}
TCP_STATUSES = {
cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.TCPS_CLOSED: _common.CONN_CLOSE,
cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
cext.TCPS_LISTEN: _common.CONN_LISTEN,
cext.TCPS_CLOSING: _common.CONN_CLOSING,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
}
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
# extend base mem ntuple with BSD-specific memory metrics
svmem = namedtuple(
'svmem', ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
scputimes = namedtuple(
'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
pextmem = namedtuple('pextmem', ['rss', 'vms', 'text', 'data', 'stack'])
pmmap_grouped = namedtuple(
'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
pmmap_ext = namedtuple(
'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
# set later from __init__.py
NoSuchProcess = None
AccessDenied = None
TimeoutExpired = None
def virtual_memory():
"""System virtual memory as a namedtuple."""
mem = cext.virtual_mem()
total, free, active, inactive, wired, cached, buffers, shared = mem
avail = inactive + cached + free
used = active + wired + cached
percent = usage_percent((total - avail), total, _round=1)
return svmem(total, avail, percent, used, free,
active, inactive, buffers, cached, shared, wired)
def swap_memory():
"""System swap memory as (total, used, free, sin, sout) namedtuple."""
total, used, free, sin, sout = [x * PAGESIZE for x in cext.swap_mem()]
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent, sin, sout)
def cpu_times():
"""Return system per-CPU times as a namedtuple"""
user, nice, system, idle, irq = cext.cpu_times()
return scputimes(user, nice, system, idle, irq)
if hasattr(cext, "per_cpu_times"):
def per_cpu_times():
"""Return system CPU times as a namedtuple"""
ret = []
for cpu_t in cext.per_cpu_times():
user, nice, system, idle, irq = cpu_t
item = scputimes(user, nice, system, idle, irq)
ret.append(item)
return ret
else:
# XXX
# Ok, this is very dirty.
# On FreeBSD < 8 we cannot gather per-cpu information, see:
# https://github.com/giampaolo/psutil/issues/226
# If num cpus > 1, on first call we return single cpu times to avoid a
# crash at psutil import time.
# Next calls will fail with NotImplementedError
def per_cpu_times():
if cpu_count_logical() == 1:
return [cpu_times()]
if per_cpu_times.__called__:
raise NotImplementedError("supported only starting from FreeBSD 8")
per_cpu_times.__called__ = True
return [cpu_times()]
per_cpu_times.__called__ = False
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
return cext.cpu_count_logical()
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
# From the C module we'll get an XML string similar to this:
# http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
# We may get None in case "sysctl kern.sched.topology_spec"
# is not supported on this BSD version, in which case we'll mimic
# os.cpu_count() and return None.
s = cext.cpu_count_phys()
if s is not None:
# get rid of padding chars appended at the end of the string
index = s.rfind("</groups>")
if index != -1:
s = s[:index + 9]
if sys.version_info >= (2, 5):
import xml.etree.ElementTree as ET
root = ET.fromstring(s)
return len(root.findall('group/children/group/cpu')) or None
else:
s = s[s.find('<children>'):]
return s.count("<cpu") or None
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
return cext.boot_time()
def disk_partitions(all=False):
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if not os.path.isabs(device) or not os.path.exists(device):
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def users():
retlist = []
rawlist = cext.users()
for item in rawlist:
user, tty, hostname, tstamp = item
if tty == '~':
continue # reboot or shutdown
nt = _common.suser(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
def net_connections(kind):
if kind not in _common.conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
ret = []
rawlist = cext.net_connections()
for item in rawlist:
fd, fam, type, laddr, raddr, status, pid = item
# TODO: apply filter at C level
if fam in families and type in types:
status = TCP_STATUSES[status]
nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
ret.append(nt)
return ret
pids = cext.pids
pid_exists = _psposix.pid_exists
disk_usage = _psposix.disk_usage
net_io_counters = cext.net_io_counters
disk_io_counters = cext.disk_io_counters
def wrap_exceptions(fun):
"""Decorator which translates bare OSError exceptions into
NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name"]
def __init__(self, pid):
self.pid = pid
self._name = None
@wrap_exceptions
def name(self):
return cext.proc_name(self.pid)
@wrap_exceptions
def exe(self):
return cext.proc_exe(self.pid)
@wrap_exceptions
def cmdline(self):
return cext.proc_cmdline(self.pid)
@wrap_exceptions
def terminal(self):
tty_nr = cext.proc_tty_nr(self.pid)
tmap = _psposix._get_terminal_map()
try:
return tmap[tty_nr]
except KeyError:
return None
@wrap_exceptions
def ppid(self):
return cext.proc_ppid(self.pid)
@wrap_exceptions
def uids(self):
real, effective, saved = cext.proc_uids(self.pid)
return _common.puids(real, effective, saved)
@wrap_exceptions
def gids(self):
real, effective, saved = cext.proc_gids(self.pid)
return _common.pgids(real, effective, saved)
@wrap_exceptions
def cpu_times(self):
user, system = cext.proc_cpu_times(self.pid)
return _common.pcputimes(user, system)
@wrap_exceptions
def memory_info(self):
rss, vms = cext.proc_memory_info(self.pid)[:2]
return _common.pmem(rss, vms)
@wrap_exceptions
def memory_info_ex(self):
return pextmem(*cext.proc_memory_info(self.pid))
@wrap_exceptions
def create_time(self):
return cext.proc_create_time(self.pid)
@wrap_exceptions
def num_threads(self):
return cext.proc_num_threads(self.pid)
@wrap_exceptions
def num_ctx_switches(self):
return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
@wrap_exceptions
def threads(self):
rawlist = cext.proc_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = _common.pthread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def connections(self, kind='inet'):
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
rawlist = cext.proc_connections(self.pid, families, types)
ret = []
for item in rawlist:
fd, fam, type, laddr, raddr, status = item
status = TCP_STATUSES[status]
nt = _common.pconn(fd, fam, type, laddr, raddr, status)
ret.append(nt)
return ret
@wrap_exceptions
def wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except _psposix.TimeoutExpired:
# support for private module import
if TimeoutExpired is None:
raise
raise TimeoutExpired(timeout, self.pid, self._name)
@wrap_exceptions
def nice_get(self):
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def nice_set(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def status(self):
code = cext.proc_status(self.pid)
if code in PROC_STATUSES:
return PROC_STATUSES[code]
# XXX is this legit? will we even ever get here?
return "?"
@wrap_exceptions
def io_counters(self):
rc, wc, rb, wb = cext.proc_io_counters(self.pid)
return _common.pio(rc, wc, rb, wb)
nt_mmap_grouped = namedtuple(
'mmap', 'path rss, private, ref_count, shadow_count')
nt_mmap_ext = namedtuple(
'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
# FreeBSD < 8 does not support functions based on kinfo_getfile()
# and kinfo_getvmmap()
if hasattr(cext, 'proc_open_files'):
@wrap_exceptions
def open_files(self):
"""Return files opened by process as a list of namedtuples."""
rawlist = cext.proc_open_files(self.pid)
return [_common.popenfile(path, fd) for path, fd in rawlist]
@wrap_exceptions
def cwd(self):
"""Return process current working directory."""
# sometimes we get an empty string, in which case we turn
# it into None
return cext.proc_cwd(self.pid) or None
@wrap_exceptions
def memory_maps(self):
return cext.proc_memory_maps(self.pid)
@wrap_exceptions
def num_fds(self):
"""Return the number of file descriptors opened by this process."""
return cext.proc_num_fds(self.pid)
else:
def _not_implemented(self):
raise NotImplementedError("supported only starting from FreeBSD 8")
open_files = _not_implemented
proc_cwd = _not_implemented
memory_maps = _not_implemented
num_fds = _not_implemented
@wrap_exceptions
def cpu_affinity_get(self):
return cext.proc_cpu_affinity_get(self.pid)
@wrap_exceptions
def cpu_affinity_set(self, cpus):
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
# 'man cpuset_setaffinity' about EDEADLK:
# <<the call would leave a thread without a valid CPU to run
# on because the set does not overlap with the thread's
# anonymous mask>>
if err.errno in (errno.EINVAL, errno.EDEADLK):
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
raise
| apache-2.0 |
truongdq/chainer | chainer/functions/connection/linear.py | 3 | 2243 | import numpy
from chainer import function
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
class LinearFunction(function.Function):
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check.expect(
x_type.dtype == numpy.float32,
w_type.dtype == numpy.float32,
x_type.ndim >= 2,
w_type.ndim == 2,
type_check.prod(x_type.shape[1:]) == w_type.shape[1],
)
if n_in.eval() == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward(self, inputs):
x = _as_mat(inputs[0])
W = inputs[1]
y = x.dot(W.T)
if len(inputs) == 3:
b = inputs[2]
y += b
return y,
def backward(self, inputs, grad_outputs):
x = _as_mat(inputs[0])
W = inputs[1]
gy = grad_outputs[0]
gx = gy.dot(W).reshape(inputs[0].shape)
gW = gy.T.dot(x)
if len(inputs) == 3:
gb = gy.sum(0)
return gx, gW, gb
else:
return gx, gW
def linear(x, W, b=None):
"""Linear function, or affine transformation.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
:math:`Y = xW^\\top + b`.
Args:
x (~chainer.Variable): Input variable. Its first dimension is assumed
to be the *minibatch dimension*. The other dimensions are treated
as concatenated one dimension whose size must be ``N``.
W (~chainer.Variable): Weight variable of shape ``(M, N)``.
b (~chainer.Variable): Bias variable (optional) of shape ``(M,)``..
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`~chainer.links.Linear`
"""
if b is None:
return LinearFunction()(x, W)
else:
return LinearFunction()(x, W, b)
| mit |
blackzw/openwrt_sdk_dev1 | staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/json/tests/test_recursion.py | 135 | 3374 | from json.tests import PyTest, CTest
class JSONTestObject:
pass
class TestRecursion(object):
def test_listrecursion(self):
x = []
x.append(x)
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on list recursion")
x = []
y = [x]
x.append(y)
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on alternating list recursion")
y = []
x = [y, y]
# ensure that the marker is cleared
self.dumps(x)
def test_dictrecursion(self):
x = {}
x["test"] = x
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on dict recursion")
x = {}
y = {"a": x, "b": x}
# ensure that the marker is cleared
self.dumps(x)
def test_defaultrecursion(self):
class RecursiveJSONEncoder(self.json.JSONEncoder):
recurse = False
def default(self, o):
if o is JSONTestObject:
if self.recurse:
return [JSONTestObject]
else:
return 'JSONTestObject'
return pyjson.JSONEncoder.default(o)
enc = RecursiveJSONEncoder()
self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"')
enc.recurse = True
try:
enc.encode(JSONTestObject)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on default recursion")
def test_highly_nested_objects_decoding(self):
# test that loading highly-nested objects doesn't segfault when C
# accelerations are used. See #12017
# str
with self.assertRaises(RuntimeError):
self.loads('{"a":' * 100000 + '1' + '}' * 100000)
with self.assertRaises(RuntimeError):
self.loads('{"a":' * 100000 + '[1]' + '}' * 100000)
with self.assertRaises(RuntimeError):
self.loads('[' * 100000 + '1' + ']' * 100000)
# unicode
with self.assertRaises(RuntimeError):
self.loads(u'{"a":' * 100000 + u'1' + u'}' * 100000)
with self.assertRaises(RuntimeError):
self.loads(u'{"a":' * 100000 + u'[1]' + u'}' * 100000)
with self.assertRaises(RuntimeError):
self.loads(u'[' * 100000 + u'1' + u']' * 100000)
def test_highly_nested_objects_encoding(self):
# See #12051
l, d = [], {}
for x in xrange(100000):
l, d = [l], {'k':d}
with self.assertRaises(RuntimeError):
self.dumps(l)
with self.assertRaises(RuntimeError):
self.dumps(d)
def test_endless_recursion(self):
# See #12051
class EndlessJSONEncoder(self.json.JSONEncoder):
def default(self, o):
"""If check_circular is False, this will keep adding another list."""
return [o]
with self.assertRaises(RuntimeError):
EndlessJSONEncoder(check_circular=False).encode(5j)
class TestPyRecursion(TestRecursion, PyTest): pass
class TestCRecursion(TestRecursion, CTest): pass
| gpl-2.0 |
paaschpa/badcomputering | tastypie/utils/formatting.py | 47 | 1142 | from __future__ import unicode_literals
import email
import datetime
import time
from django.utils import dateformat
from tastypie.utils.timezone import make_aware, make_naive, aware_datetime
# Try to use dateutil for maximum date-parsing niceness. Fall back to
# hard-coded RFC2822 parsing if that's not possible.
try:
from dateutil.parser import parse as mk_datetime
except ImportError:
def mk_datetime(string):
return make_aware(datetime.datetime.fromtimestamp(time.mktime(email.utils.parsedate(string))))
def format_datetime(dt):
"""
RFC 2822 datetime formatter
"""
return dateformat.format(make_naive(dt), 'r')
def format_date(d):
"""
RFC 2822 date formatter
"""
# workaround because Django's dateformat utility requires a datetime
# object (not just date)
dt = aware_datetime(d.year, d.month, d.day, 0, 0, 0)
return dateformat.format(dt, 'j M Y')
def format_time(t):
"""
RFC 2822 time formatter
"""
# again, workaround dateformat input requirement
dt = aware_datetime(2000, 1, 1, t.hour, t.minute, t.second)
return dateformat.format(dt, 'H:i:s O')
| bsd-3-clause |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/pyasn1/type/char.py | 11 | 13153 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.type import univ, tag
from pyasn1 import error
__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
NoValue = univ.NoValue
noValue = univ.noValue
class AbstractCharacterString(univ.OctetString):
"""Creates |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`unicode` or Python 3 :class:`str`.
When used in octet-stream context, |ASN.1| type assumes "|encoding|" encoding.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Raises
------
: :py:class:`pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
"""
if sys.version_info[0] <= 2:
def __str__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
"Can't encode string '%s' with codec %s" % (self._value, self.encoding)
)
def __unicode__(self):
return unicode(self._value)
def prettyIn(self, value):
try:
if isinstance(value, unicode):
return value
elif isinstance(value, str):
return value.decode(self.encoding)
elif isinstance(value, (tuple, list)):
return self.prettyIn(''.join([chr(x) for x in value]))
elif isinstance(value, univ.OctetString):
return value.asOctets().decode(self.encoding)
else:
return unicode(value)
except (UnicodeDecodeError, LookupError):
raise error.PyAsn1Error(
"Can't decode string '%s' with codec %s" % (value, self.encoding)
)
def asOctets(self, padding=True):
return str(self)
def asNumbers(self, padding=True):
return tuple([ord(x) for x in str(self)])
else:
def __str__(self):
return str(self._value)
def __bytes__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
"Can't encode string '%s' with codec %s" % (self._value, self.encoding)
)
def prettyIn(self, value):
try:
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode(self.encoding)
elif isinstance(value, (tuple, list)):
return self.prettyIn(bytes(value))
elif isinstance(value, univ.OctetString):
return value.asOctets().decode(self.encoding)
else:
return str(value)
except (UnicodeDecodeError, LookupError):
raise error.PyAsn1Error(
"Can't decode string '%s' with codec %s" % (value, self.encoding)
)
def asOctets(self, padding=True):
return bytes(self)
def asNumbers(self, padding=True):
return tuple(bytes(self))
def prettyOut(self, value):
return value
def __reversed__(self):
return reversed(self._value)
def clone(self, value=noValue, **kwargs):
"""Creates a copy of a |ASN.1| type or object.
Any parameters to the *clone()* method will replace corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :py:class:`unicode` (Python 2) or
:py:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Returns
-------
:
new instance of |ASN.1| type/value
"""
return univ.OctetString.clone(self, value, **kwargs)
def subtype(self, value=noValue, **kwargs):
"""Creates a copy of a |ASN.1| type or object.
Any parameters to the *subtype()* method will be added to the corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :py:class:`unicode` (Python 2) or
:py:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Returns
-------
:
new instance of |ASN.1| type/value
"""
return univ.OctetString.subtype(self, value, **kwargs)
class NumericString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class PrintableString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class TeletexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class T61String(TeletexString):
__doc__ = TeletexString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VideotexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class IA5String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GraphicString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VisibleString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class ISO646String(VisibleString):
__doc__ = VisibleString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GeneralString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UniversalString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class BMPString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UTF8String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
| gpl-2.0 |
moijes12/oh-mainline | vendor/packages/ghettoq/ghettoq/backends/beanstalk.py | 17 | 2211 | from itertools import ifilter
from Queue import Empty
from beanstalkc import Connection
from ghettoq.backends.base import BaseBackend
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 11300
class BeanstalkBackend(BaseBackend):
def _parse_job(self, job):
item, dest = None, None
if job:
try:
item = job.body
dest = job.stats()['tube']
except:
job.bury()
else:
job.delete()
else:
raise Empty
return item, dest
def establish_connection(self):
self.host = self.host or DEFAULT_HOST
self.port = self.port or DEFAULT_PORT
return Connection(host=self.host, port=self.port)
def put(self, queue, message, priority=0, **kwargs):
self.client.use(queue)
self.client.put(message, priority=priority)
def get(self, queue):
if not queue:
raise Empty
if queue not in self.client.watching():
self.client.watch(queue)
ignore = ifilter(lambda q: q!= queue, self.client.watching())
map(self.client.ignore, ignore)
job = self.client.reserve(timeout=1)
item, dest = self._parse_job(job)
return item
def get_many(self, queues, timeout=None):
if not queues:
raise Empty
# timeout of None will cause beanstalk to timeout waiting
# for a new request
if timeout is None:
timeout = 1
to_watch = ifilter(lambda q: q not in self.client.watching(), queues)
map(self.client.watch, to_watch)
job = self.client.reserve(timeout=timeout)
return self._parse_job(job)
def purge(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
ignore = ifilter(lambda q: q!= queue, self.client.watching())
map(self.client.ignore, ignore)
count = 0
while True:
job = self.client.reserve(timeout=1)
if job:
job.delete()
count += 1
else:
break
return count
| agpl-3.0 |
nhomar/odoo | addons/base_import_module/models/ir_module.py | 238 | 4795 | import logging
import os
import sys
import zipfile
from os.path import join as opj
import openerp
from openerp.osv import osv
from openerp.tools import convert_file
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MAX_FILE_SIZE = 100 * 1024 * 1024 # in megabytes
class view(osv.osv):
_inherit = "ir.module.module"
def import_module(self, cr, uid, module, path, force=False, context=None):
known_mods = self.browse(cr, uid, self.search(cr, uid, []))
known_mods_names = dict([(m.name, m) for m in known_mods])
installed_mods = [m.name for m in known_mods if m.state == 'installed']
terp = openerp.modules.load_information_from_description_file(module, mod_path=path)
values = self.get_values_from_terp(terp)
unmet_dependencies = set(terp['depends']).difference(installed_mods)
if unmet_dependencies:
msg = _("Unmet module dependencies: %s")
raise osv.except_osv(_('Error !'), msg % ', '.join(unmet_dependencies))
mod = known_mods_names.get(module)
if mod:
self.write(cr, uid, mod.id, dict(state='installed', **values))
mode = 'update' if not force else 'init'
else:
assert terp.get('installable', True), "Module not installable"
self.create(cr, uid, dict(name=module, state='installed', **values))
mode = 'init'
for kind in ['data', 'init_xml', 'update_xml']:
for filename in terp[kind]:
_logger.info("module %s: loading %s", module, filename)
noupdate = False
if filename.endswith('.csv') and kind in ('init', 'init_xml'):
noupdate = True
pathname = opj(path, filename)
idref = {}
convert_file(cr, module, filename, idref, mode=mode, noupdate=noupdate, kind=kind, pathname=pathname)
path_static = opj(path, 'static')
ir_attach = self.pool['ir.attachment']
if os.path.isdir(path_static):
for root, dirs, files in os.walk(path_static):
for static_file in files:
full_path = opj(root, static_file)
with open(full_path, 'r') as fp:
data = fp.read().encode('base64')
url_path = '/%s%s' % (module, full_path.split(path)[1].replace(os.path.sep, '/'))
url_path = url_path.decode(sys.getfilesystemencoding())
filename = os.path.split(url_path)[1]
values = dict(
name=filename,
datas_fname=filename,
url=url_path,
res_model='ir.ui.view',
type='binary',
datas=data,
)
att_id = ir_attach.search(cr, uid, [('url', '=', url_path), ('type', '=', 'binary'), ('res_model', '=', 'ir.ui.view')], context=context)
if att_id:
ir_attach.write(cr, uid, att_id, values, context=context)
else:
ir_attach.create(cr, uid, values, context=context)
return True
def import_zipfile(self, cr, uid, module_file, force=False, context=None):
if not module_file:
raise Exception("No file sent.")
if not zipfile.is_zipfile(module_file):
raise osv.except_osv(_('Error !'), _('File is not a zip file!'))
success = []
errors = dict()
module_names = []
with zipfile.ZipFile(module_file, "r") as z:
for zf in z.filelist:
if zf.file_size > MAX_FILE_SIZE:
msg = _("File '%s' exceed maximum allowed file size")
raise osv.except_osv(_('Error !'), msg % zf.filename)
with openerp.tools.osutil.tempdir() as module_dir:
z.extractall(module_dir)
dirs = [d for d in os.listdir(module_dir) if os.path.isdir(opj(module_dir, d))]
for mod_name in dirs:
module_names.append(mod_name)
try:
# assert mod_name.startswith('theme_')
path = opj(module_dir, mod_name)
self.import_module(cr, uid, mod_name, path, force=force, context=context)
success.append(mod_name)
except Exception, e:
errors[mod_name] = str(e)
r = ["Successfully imported module '%s'" % mod for mod in success]
for mod, error in errors.items():
r.append("Error while importing module '%s': %r" % (mod, error))
return '\n'.join(r), module_names
| agpl-3.0 |
6H057WH1P3/Asit | lib/requests/packages/urllib3/util/ssl_.py | 484 | 10037 | from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning
SSLContext = None
HAS_SNI = False
create_default_context = None
import errno
import warnings
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5'
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, location):
self.ca_certs = location
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. For more information, see '
'https://urllib3.readthedocs.org/en/latest/security.html'
'#insecureplatformwarning.',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1,
32: sha256,
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, odd = divmod(len(fingerprint), 2)
if odd or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None):
"""
All arguments except for server_hostname and ssl_context have the same
meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
"""
context = ssl_context
if context is None:
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs:
try:
context.load_verify_locations(ca_certs)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
| mit |
ghchinoy/tensorflow | tensorflow/contrib/boosted_trees/lib/learner/batch/categorical_split_handler_test.py | 13 | 25928 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def get_empty_tensors(gradient_shape, hessian_shape):
empty_hess_shape = [1] + hessian_shape.as_list()
empty_grad_shape = [1] + gradient_shape.as_list()
empty_gradients = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_hess_shape)
return empty_gradients, empty_hessians
class EqualitySplitHandlerTest(test_util.TensorFlowTestCase):
def testGenerateFeatureSplitCandidates(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testObliviousFeatureSplitGeneration(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 1 | 1 |
# i1 | (-0.5, 0.07) | 1 | 2 |
# i2 | (1.2, 0.2) | 1 | 1 |
# i3 | (4.0, 0.13) | 2 | 2 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [1, 1, 1, 2]
indices = [[0, 0], [1, 0], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 1, 2], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([1, 2], partitions)
# For partition 1.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight1 = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain1 = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight1 = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain1 = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain1 = 0.46043165467625885
split_info = split_info_pb2.ObliviousSplitInfo()
split_info.ParseFromString(splits[0])
# Children of partition 1.
left_child = split_info.children[0].vector
right_child = split_info.children[1].vector
split_node = split_info.split_node.oblivious_categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
self.assertAllClose([expected_left_weight1], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight1], right_child.value, 0.00001)
# For partition2.
expected_left_weight2 = 0
expected_left_gain2 = 0
# -(4 - 0.1) / (0.13 + 1)
expected_right_weight2 = -3.4513274336283186
# (4 - 0.1) ** 2 / (0.13 + 1)
expected_right_gain2 = 13.460176991150442
# (4 - 0.1) ** 2 / (0.13 + 1)
expected_bias_gain2 = 13.460176991150442
# Children of partition 2.
left_child = split_info.children[2].vector
right_child = split_info.children[3].vector
self.assertAllClose([expected_left_weight2], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight2], right_child.value, 0.00001)
self.assertAllClose(
expected_left_gain1 + expected_right_gain1 - expected_bias_gain1 +
expected_left_gain2 + expected_right_gain2 - expected_bias_gain2,
gains[0], 0.00001)
def testGenerateFeatureSplitCandidatesSumReduction(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | 1 |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0,
loss_uses_sum_reduction=True)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
update_2 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1, update_2]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.4 + 2.4 - 0.1) / (0.24 + 0.4 + 1)
expected_left_weight = -1.6463414634146338
# (0.4 + 2.4 - 0.1) ** 2 / (0.24 + 0.4 + 1)
expected_left_gain = 4.445121951219511
# -(-1 + 0.1) / (0.14 + 1)
expected_right_weight = 0.789473684211
# (-1 + 0.1) ** 2 / (0.14 + 1)
expected_right_gain = 0.710526315789
# (0.4 + -1 + 2.4 - 0.1) ** 2 / (0.24 + 0.14 + 0.4 + 1)
expected_bias_gain = 1.6235955056179772
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-8 + 0.1) / (0.26 + 1)
expected_left_weight = -6.26984126984
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_left_gain = 49.5317460317
expected_right_weight = 0
expected_right_gain = 0
# (-8 + 0.1) ** 2 / (0.26 + 1)
expected_bias_gain = 49.5317460317
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testGenerateFeatureSplitCandidatesMulticlass(self):
with self.cached_session() as sess:
# Batch size is 4, 2 gradients per each instance.
gradients = array_ops.constant(
[[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]], shape=[4, 2])
# 2x2 matrix for each instance
hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3])
partition_ids = array_ops.constant([0, 0, 0, 1], dtype=dtypes.int32)
gradient_shape = tensor_shape.TensorShape([2])
hessian_shape = tensor_shape.TensorShape([2, 2])
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(2, len(left_child.value))
self.assertEqual(0, len(right_child.value))
self.assertEqual(1, split_node.feature_id)
def testEmpty(self):
with self.cached_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = constant_op.constant_v1([], dtype=dtypes.int64, shape=[0, 2])
values = constant_op.constant_v1([], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testInactive(self):
with self.cached_session() as sess:
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([False, False]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (sess.run(
[are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertEqual(len(partitions), 0)
self.assertEqual(len(gains), 0)
self.assertEqual(len(splits), 0)
def testLastOneEmpty(self):
with self.cached_session() as sess:
# The data looks like the following:
# Example | Gradients | Partition | Feature ID |
# i0 | (0.2, 0.12) | 0 | 1,2 |
# i1 | (-0.5, 0.07) | 0 | |
# i2 | (1.2, 0.2) | 0 | 2 |
# i3 | (4.0, 0.13) | 1 | |
gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
partition_ids = [0, 0, 0, 1]
indices = [[0, 0], [0, 1], [2, 0]]
values = array_ops.constant([1, 2, 2], dtype=dtypes.int64)
gradient_shape = tensor_shape.scalar()
hessian_shape = tensor_shape.scalar()
class_id = -1
split_handler = categorical_split_handler.EqualitySplitHandler(
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
sparse_int_column=sparse_tensor.SparseTensor(indices, values, [4, 1]),
feature_column_group_id=0,
gradient_shape=gradient_shape,
hessian_shape=hessian_shape,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
init_stamp_token=0)
resources.initialize_resources(resources.shared_resources()).run()
empty_gradients, empty_hessians = get_empty_tensors(
gradient_shape, hessian_shape)
example_weights = array_ops.ones([4, 1], dtypes.float32)
update_1 = split_handler.update_stats_sync(
0,
partition_ids,
gradients,
hessians,
empty_gradients,
empty_hessians,
example_weights,
is_active=array_ops.constant([True, True]))
with ops.control_dependencies([update_1]):
are_splits_ready, partitions, gains, splits = (
split_handler.make_splits(0, 1, class_id))
are_splits_ready, partitions, gains, splits = (
sess.run([are_splits_ready, partitions, gains, splits]))
self.assertTrue(are_splits_ready)
self.assertAllEqual([0], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
tellesnobrega/sahara | sahara/tests/unit/utils/openstack/test_base.py | 3 | 10848 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cinderclient import exceptions as cinder_exc
from heatclient import exc as heat_exc
from keystoneauth1 import exceptions as keystone_exc
import mock
from neutronclient.common import exceptions as neutron_exc
from novaclient import exceptions as nova_exc
from sahara import exceptions as sahara_exc
from sahara.tests.unit import base as testbase
from sahara.utils.openstack import base
class TestBase(testbase.SaharaTestCase):
def test_url_for_regions(self):
service_catalog = (
'[{"endpoints": '
' [{"adminURL": "http://192.168.0.5:8774/v2", '
' "region": "RegionOne", '
' "id": "83d12c9ad2d647ecab7cbe91adb8666b", '
' "internalURL": "http://192.168.0.5:8774/v2", '
' "publicURL": "http://172.18.184.5:8774/v2"}, '
' {"adminURL": "http://192.168.0.6:8774/v2", '
' "region": "RegionTwo", '
' "id": "07c5a555176246c783d8f0497c98537b", '
' "internalURL": "http://192.168.0.6:8774/v2", '
' "publicURL": "http://172.18.184.6:8774/v2"}], '
' "endpoints_links": [], '
' "type": "compute", '
' "name": "nova"}]')
self.override_config("os_region_name", "RegionOne")
self.assertEqual("http://192.168.0.5:8774/v2",
base.url_for(service_catalog, "compute"))
self.override_config("os_region_name", "RegionTwo")
self.assertEqual("http://192.168.0.6:8774/v2",
base.url_for(service_catalog, "compute"))
class AuthUrlTest(testbase.SaharaTestCase):
def test_retrieve_auth_url_api_v3(self):
self.override_config('use_identity_api_v3', True)
correct = "https://127.0.0.1:8080/v3"
def _assert(uri):
self.override_config('auth_uri', uri, 'keystone_authtoken')
self.assertEqual(correct, base.retrieve_auth_url())
_assert("%s/" % correct)
_assert("https://127.0.0.1:8080")
_assert("https://127.0.0.1:8080/")
_assert("https://127.0.0.1:8080/v2.0")
_assert("https://127.0.0.1:8080/v2.0/")
_assert("https://127.0.0.1:8080/v3")
_assert("https://127.0.0.1:8080/v3/")
@mock.patch("sahara.utils.openstack.base.url_for")
def test_retrieve_auth_url_api_v3_without_port(self, mock_url_for):
self.override_config('use_identity_api_v3', True)
self.setup_context(service_catalog=True)
correct = "https://127.0.0.1/v3"
def _assert(uri):
mock_url_for.return_value = uri
self.assertEqual(correct, base.retrieve_auth_url())
_assert("%s/" % correct)
_assert("https://127.0.0.1")
_assert("https://127.0.0.1/")
_assert("https://127.0.0.1/v2.0")
_assert("https://127.0.0.1/v2.0/")
_assert("https://127.0.0.1/v3")
_assert("https://127.0.0.1/v3/")
@mock.patch("sahara.utils.openstack.base.url_for")
def test_retrieve_auth_url_api_v3_path_present(self, mock_url_for):
self.override_config('use_identity_api_v3', True)
self.setup_context(service_catalog=True)
correct = "https://127.0.0.1/identity/v3"
def _assert(uri):
mock_url_for.return_value = uri
self.assertEqual(correct, base.retrieve_auth_url())
_assert("%s" % correct)
_assert("%s/" % correct)
_assert("https://127.0.0.1/identity")
_assert("https://127.0.0.1/identity/")
def test_retrieve_auth_url_api_v20(self):
self.override_config('use_identity_api_v3', False)
correct = "https://127.0.0.1:8080/v2.0"
def _assert(uri):
self.override_config('auth_uri', uri, 'keystone_authtoken')
self.assertEqual(correct, base.retrieve_auth_url())
_assert("%s/" % correct)
_assert("https://127.0.0.1:8080")
_assert("https://127.0.0.1:8080/")
_assert("https://127.0.0.1:8080/v2.0")
_assert("https://127.0.0.1:8080/v2.0/")
_assert("https://127.0.0.1:8080/v3")
_assert("https://127.0.0.1:8080/v3/")
@mock.patch("sahara.utils.openstack.base.url_for")
def test_retrieve_auth_url_api_v20_without_port(self, mock_url_for):
self.override_config('use_identity_api_v3', False)
self.setup_context(service_catalog=True)
correct = "https://127.0.0.1/v2.0"
def _assert(uri):
mock_url_for.return_value = uri
self.assertEqual(correct, base.retrieve_auth_url())
_assert("%s/" % correct)
_assert("https://127.0.0.1")
_assert("https://127.0.0.1/")
_assert("https://127.0.0.1/v2.0")
_assert("https://127.0.0.1/v2.0/")
_assert("https://127.0.0.1/v3")
_assert("https://127.0.0.1/v3/")
class ExecuteWithRetryTest(testbase.SaharaTestCase):
def setUp(self):
super(ExecuteWithRetryTest, self).setUp()
self.fake_client_call = mock.MagicMock()
self.fake_client_call.__name__ = 'fake_client_call'
self.override_config('retries_number', 2, 'retries')
@mock.patch('sahara.context.sleep')
def _check_error_without_retry(self, error, code, m_sleep):
self.fake_client_call.side_effect = error(code)
self.assertRaises(error, base.execute_with_retries,
self.fake_client_call)
self.assertEqual(1, self.fake_client_call.call_count)
self.fake_client_call.reset_mock()
@mock.patch('sahara.context.sleep')
def _check_error_with_retry(self, error, code, m_sleep):
self.fake_client_call.side_effect = error(code)
self.assertRaises(sahara_exc.MaxRetriesExceeded,
base.execute_with_retries, self.fake_client_call)
self.assertEqual(3, self.fake_client_call.call_count)
self.fake_client_call.reset_mock()
def test_novaclient_calls_without_retry(self):
# check that following errors will not be retried
self._check_error_without_retry(nova_exc.BadRequest, 400)
self._check_error_without_retry(nova_exc.Unauthorized, 401)
self._check_error_without_retry(nova_exc.Forbidden, 403)
self._check_error_without_retry(nova_exc.NotFound, 404)
self._check_error_without_retry(nova_exc.MethodNotAllowed, 405)
self._check_error_without_retry(nova_exc.Conflict, 409)
self._check_error_without_retry(nova_exc.HTTPNotImplemented, 501)
def test_novaclient_calls_with_retry(self):
# check that following errors will be retried
self._check_error_with_retry(nova_exc.OverLimit, 413)
self._check_error_with_retry(nova_exc.RateLimit, 429)
def test_cinderclient_calls_without_retry(self):
# check that following errors will not be retried
self._check_error_without_retry(cinder_exc.BadRequest, 400)
self._check_error_without_retry(cinder_exc.Unauthorized, 401)
self._check_error_without_retry(cinder_exc.Forbidden, 403)
self._check_error_without_retry(cinder_exc.NotFound, 404)
self._check_error_without_retry(nova_exc.HTTPNotImplemented, 501)
def test_cinderclient_calls_with_retry(self):
# check that following error will be retried
self._check_error_with_retry(cinder_exc.OverLimit, 413)
def test_neutronclient_calls_without_retry(self):
# check that following errors will not be retried
# neutron exception expects string in constructor
self._check_error_without_retry(neutron_exc.BadRequest, "400")
self._check_error_without_retry(neutron_exc.Forbidden, "403")
self._check_error_without_retry(neutron_exc.NotFound, "404")
self._check_error_without_retry(neutron_exc.Conflict, "409")
def test_neutronclient_calls_with_retry(self):
# check that following errors will be retried
# neutron exception expects string in constructor
self._check_error_with_retry(neutron_exc.InternalServerError, "500")
self._check_error_with_retry(neutron_exc.ServiceUnavailable, "503")
def test_heatclient_calls_without_retry(self):
# check that following errors will not be retried
self._check_error_without_retry(heat_exc.HTTPBadRequest, 400)
self._check_error_without_retry(heat_exc.HTTPUnauthorized, 401)
self._check_error_without_retry(heat_exc.HTTPForbidden, 403)
self._check_error_without_retry(heat_exc.HTTPNotFound, 404)
self._check_error_without_retry(heat_exc.HTTPMethodNotAllowed, 405)
self._check_error_without_retry(heat_exc.HTTPConflict, 409)
self._check_error_without_retry(heat_exc.HTTPUnsupported, 415)
self._check_error_without_retry(heat_exc.HTTPNotImplemented, 501)
def test_heatclient_calls_with_retry(self):
# check that following errors will be retried
self._check_error_with_retry(heat_exc.HTTPInternalServerError, 500)
self._check_error_with_retry(heat_exc.HTTPBadGateway, 502)
self._check_error_with_retry(heat_exc.HTTPServiceUnavailable, 503)
def test_keystoneclient_calls_without_retry(self):
# check that following errors will not be retried
self._check_error_without_retry(keystone_exc.BadRequest, 400)
self._check_error_without_retry(keystone_exc.Unauthorized, 401)
self._check_error_without_retry(keystone_exc.Forbidden, 403)
self._check_error_without_retry(keystone_exc.NotFound, 404)
self._check_error_without_retry(keystone_exc.MethodNotAllowed, 405)
self._check_error_without_retry(keystone_exc.Conflict, 409)
self._check_error_without_retry(keystone_exc.UnsupportedMediaType, 415)
self._check_error_without_retry(keystone_exc.HttpNotImplemented, 501)
def test_keystoneclient_calls_with_retry(self):
# check that following errors will be retried
self._check_error_with_retry(keystone_exc.RequestTimeout, 408)
self._check_error_with_retry(keystone_exc.InternalServerError, 500)
self._check_error_with_retry(keystone_exc.BadGateway, 502)
self._check_error_with_retry(keystone_exc.ServiceUnavailable, 503)
self._check_error_with_retry(keystone_exc.GatewayTimeout, 504)
| apache-2.0 |
googlei18n/language-resources | bn/zero_width.py | 2 | 2661 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015, 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for context-aware removal of ZWNJ and ZWJ in Bangla text.
Tested with:
$ python2 -m unittest zero_width
$ python3 -m unittest zero_width
"""
from __future__ import unicode_literals
import io
import re
import unittest
STANDARDIZE_ZW = re.compile(r'(?<=\u09b0)[\u200c\u200d]+(?=\u09cd\u09af)')
DELETE_ZW = re.compile(r'(?<!\u09b0)[\u200c\u200d](?!\u09cd\u09af)')
def RemoveOptionalZW(text):
"""Removes all optional occurrences of ZWNJ or ZWJ from Bangla text.
The non-printing characters U+200C (ZWNJ) and U+200D (ZWJ) are used in Bangla
to optionally control the appearance of ligatures, except in one special
situation: after RA and before VIRAMA+YA, the presence or absence of ZWJ
(formerly ZWNJ) changes the visual appearance of the involved consonants in a
meaningful way. This occurrences of ZWJ must be preserved, while all other
occurrences are advisory and can be removed for most purposes.
After RA and before VIRAMA+YA, this function changes ZWNJ to ZWJ and preserves
ZWJ; and removes ZWNJ and ZWJ everywhere else.
Args:
text: The text from which the zero-width format controls are to be removed.
Returns:
The text with all non-obligatory occurrences of ZWNJ and ZWJ removed.
"""
text = STANDARDIZE_ZW.sub('\u200D', text)
text = DELETE_ZW.sub('', text)
return text
class TestBanglaZeroWidth(unittest.TestCase):
def test_RemoveOptionalZW(self):
rally = '\u09b0\u200d\u09cd\u09af\u09be\u09b2\u09c0'
self.assertEqual(rally, 'র্যালী')
self.assertEqual(RemoveOptionalZW(rally), rally)
for i in range(len(rally) + 1):
for zw in ('\u200c', '\u200d', '\u200c\u200d', '\u200d\u200c'):
text = rally[:i] + zw + rally[i:]
self.assertEqual(RemoveOptionalZW(text), rally)
if __name__ == '__main__':
stdin = io.open(0, mode='rt', encoding='utf-8', closefd=False)
stdout = io.open(1, mode='wt', encoding='utf-8', closefd=False)
for line in stdin:
stdout.write(RemoveOptionalZW(line))
| apache-2.0 |
satish-avninetworks/murano | murano_tempest_tests/plugin.py | 3 | 1950 | # Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.test_discover import plugins
from murano_tempest_tests import config as config_application_catalog
class MuranoTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(__file__)))[0]
test_dir = "murano_tempest_tests/tests"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
config.register_opt_group(
conf, config_application_catalog.service_available_group,
config_application_catalog.ServiceAvailableGroup)
config.register_opt_group(
conf, config_application_catalog.application_catalog_group,
config_application_catalog.ApplicationCatalogGroup)
config.register_opt_group(
conf, config_application_catalog.service_broker_group,
config_application_catalog.ServiceBrokerGroup)
def get_opt_lists(self):
return [(config_application_catalog.application_catalog_group.name,
config_application_catalog.ApplicationCatalogGroup),
(config_application_catalog.service_broker_group.name,
config_application_catalog.ServiceBrokerGroup)]
| apache-2.0 |
sid88in/incubator-airflow | airflow/example_dags/docker_copy_data.py | 17 | 3569 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This sample "listen to directory". move the new file and print it,
using docker-containers.
The following operators are being used: DockerOperator,
BashOperator & ShortCircuitOperator.
TODO: Review the workflow, change it accordingly to
your environment & enable the code.
"""
# from __future__ import print_function
#
# from airflow import DAG
# import airflow
# from datetime import datetime, timedelta
# from airflow.operators import BashOperator
# from airflow.operators import ShortCircuitOperator
# from airflow.operators.docker_operator import DockerOperator
#
# default_args = {
# 'owner': 'airflow',
# 'depends_on_past': False,
# 'start_date': datetime.utcnow(),
# 'email': ['[email protected]'],
# 'email_on_failure': False,
# 'email_on_retry': False,
# 'retries': 1,
# 'retry_delay': timedelta(minutes=5),
# }
#
# dag = DAG(
# 'docker_sample_copy_data', default_args=
# default_args, schedule_interval=timedelta(minutes=10))
#
# locate_file_cmd = """
# sleep 10
# find {{params.source_location}} -type f -printf "%f\n" | head -1
# """
#
# t_view = BashOperator(
# task_id='view_file',
# bash_command=locate_file_cmd,
# xcom_push=True,
# params={'source_location': '/your/input_dir/path'},
# dag=dag)
#
#
# def is_data_available(*args, **kwargs):
# ti = kwargs['ti']
# data = ti.xcom_pull(key=None, task_ids='view_file')
# return not data == ''
#
#
# t_is_data_available = ShortCircuitOperator(
# task_id='check_if_data_available',
# provide_context=True,
# python_callable=is_data_available,
# dag=dag)
#
# t_move = DockerOperator(
# api_version='1.19',
# docker_url='tcp://localhost:2375', # replace it with swarm/docker endpoint
# image='centos:latest',
# network_mode='bridge',
# volumes=['/your/host/input_dir/path:/your/input_dir/path',
# '/your/host/output_dir/path:/your/output_dir/path'],
# command='./entrypoint.sh',
# task_id='move_data',
# xcom_push=True,
# params={'source_location': '/your/input_dir/path',
# 'target_location': '/your/output_dir/path'},
# dag=dag)
#
# print_templated_cmd = """
# cat {{ ti.xcom_pull('move_data') }}
# """
#
# t_print = DockerOperator(
# api_version='1.19',
# docker_url='tcp://localhost:2375',
# image='centos:latest',
# volumes=['/your/host/output_dir/path:/your/output_dir/path'],
# command=print_templated_cmd,
# task_id='print',
# dag=dag)
#
# t_view.set_downstream(t_is_data_available)
# t_is_data_available.set_downstream(t_move)
# t_move.set_downstream(t_print)
| apache-2.0 |
LTommy/tommyslab | lib/banner.py | 1 | 1317 | # -*- coding:utf-8 -*-
def print_banner():
print('\n\n***************欢迎使用tommyslab, 请保持在网络畅通的环境下进行实验**************')
print('''
_ _ _
| |_ ___ _ __ ___ _ __ ___ _ _ ___| | __ _| |__
| __/ _ \| '_ ` _ \| '_ ` _ \| | | / __| | / _` | '_ \
| || (_) | | | | | | | | | | | |_| \__ \ |__| (_| | |_) |
\__\___/|_| |_| |_|_| |_| |_|\__, |___/_____\__,_|_.__/
|___/
_ _ _
_
___ _ __ (_) ___ _ _
/ _ \ '_ \ | |/ _ \| | | |
| __/ | | || | (_) | |_| |
\___|_| |_|/ |\___/ \__, |
|__/ |___/
| |__ __ _ ___| | _(_)_ __ __ _
| '_ \ / _` |/ __| |/ / | '_ \ / _` |
| | | | (_| | (__| <| | | | | (_| |
|_| |_|\__,_|\___|_|\_\_|_| |_|\__, |
|___/
''')
| apache-2.0 |
lhCheung1991/flappybird_cocos2dx_2 | cocos2d/plugin/tools/pluginx-bindings-generator/genbindings-lua.py | 130 | 7752 | #!/usr/bin/python
# This script is used to generate luabinding glue codes.
# Android ndk version must be ndk-r9b.
import sys
import os, os.path
import shutil
import ConfigParser
import subprocess
import re
from contextlib import contextmanager
import shutil
import yaml
import tempfile
def _check_ndk_root_env():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment."
sys.exit(1)
return NDK_ROOT
def _check_python_bin_env():
''' Checking the environment PYTHON_BIN, which will be used for building
'''
try:
PYTHON_BIN = os.environ['PYTHON_BIN']
except Exception:
print "PYTHON_BIN not defined, use current python."
PYTHON_BIN = sys.executable
return PYTHON_BIN
class CmdError(Exception):
pass
@contextmanager
def _pushd(newDir):
previousDir = os.getcwd()
os.chdir(newDir)
yield
os.chdir(previousDir)
def _run_cmd(command):
ret = subprocess.call(command, shell=True)
if ret != 0:
message = "Error running command"
raise CmdError(message)
def _edit_yaml(filePath):
f = open(filePath, 'r')
data = yaml.load(f)
f.close()
data['conversions']['ns_map']['cocos2d::plugin::'] = 'plugin.'
data['conversions']['to_native']['TIAPDeveloperInfo'] = 'ok &= pluginx::luaval_to_TIAPDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TAdsDeveloperInfo'] = 'ok &= pluginx::luaval_to_TAdsDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TAdsInfo'] = 'ok &= pluginx::luaval_to_TAdsInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TShareDeveloperInfo'] = 'ok &= pluginx::luaval_to_TShareDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TSocialDeveloperInfo'] = 'ok &= pluginx::luaval_to_TSocialDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TUserDeveloperInfo'] = 'ok &= pluginx::luaval_to_TUserDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
f = open(filePath, 'w')
f.write(yaml.dump(data))
f.close()
def main():
cur_platform= '??'
llvm_path = '??'
ndk_root = _check_ndk_root_env()
# del the " in the path
ndk_root = re.sub(r"\"", "", ndk_root)
python_bin = _check_python_bin_env()
platform = sys.platform
if platform == 'win32':
cur_platform = 'windows'
elif platform == 'darwin':
cur_platform = platform
elif 'linux' in platform:
cur_platform = 'linux'
else:
print 'Your platform is not supported!'
sys.exit(1)
if platform == 'win32':
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s' % cur_platform))
if not os.path.exists(x86_llvm_path):
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s' % cur_platform))
else:
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86')))
if not os.path.exists(x86_llvm_path):
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s-%s' % (cur_platform, 'x86')))
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if not os.path.exists(x64_llvm_path):
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if os.path.isdir(x86_llvm_path):
llvm_path = x86_llvm_path
elif os.path.isdir(x64_llvm_path):
llvm_path = x64_llvm_path
else:
print 'llvm toolchain not found!'
print 'path: %s or path: %s are not valid! ' % (x86_llvm_path, x64_llvm_path)
sys.exit(1)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
cocos_root = os.path.abspath(os.path.join(project_root, ''))
cxx_generator_root = os.path.abspath(os.path.join(project_root, 'tools/bindings-generator'))
pluginx_root = os.path.abspath(os.path.join(project_root, 'plugin'))
# save config to file
config = ConfigParser.ConfigParser()
config.set('DEFAULT', 'androidndkdir', ndk_root)
config.set('DEFAULT', 'clangllvmdir', llvm_path)
config.set('DEFAULT', 'cocosdir', cocos_root)
config.set('DEFAULT', 'cxxgeneratordir', cxx_generator_root)
config.set('DEFAULT', 'extra_flags', '')
config.set('DEFAULT', 'pluginxdir', pluginx_root)
# To fix parse error on windows, we must difine __WCHAR_MAX__ and undefine __MINGW32__ .
if platform == 'win32':
config.set('DEFAULT', 'extra_flags', '-D__WCHAR_MAX__=0x7fffffff -U__MINGW32__')
conf_ini_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'userconf.ini'))
print 'generating userconf.ini...'
with open(conf_ini_file, 'w') as configfile:
config.write(configfile)
# set proper environment variables
if 'linux' in platform or platform == 'darwin':
os.putenv('LD_LIBRARY_PATH', '%s/libclang' % cxx_generator_root)
if platform == 'win32':
path_env = os.environ['PATH']
os.putenv('PATH', r'%s;%s\libclang;%s\tools\win32;' % (path_env, cxx_generator_root, cxx_generator_root))
# edit conversions config for pluginx
conversions_yaml = '%s/targets/lua/conversions.yaml' % cxx_generator_root
conversions_backup = '%s.backup' % conversions_yaml
shutil.copy(conversions_yaml, conversions_backup)
_edit_yaml(conversions_yaml)
try:
tolua_root = '%s/plugin/tools/pluginx-bindings-generator/tolua' % project_root
output_dir = '%s/plugin/luabindings/auto' % project_root
cmd_args = {'cocos2dx_pluginx.ini' : ('cocos2dx_pluginx', 'lua_cocos2dx_pluginx_auto')}
target = 'lua'
generator_py = '%s/generator.py' % cxx_generator_root
for key in cmd_args.keys():
args = cmd_args[key]
cfg = '%s/%s' % (tolua_root, key)
print 'Generating bindings for %s...' % (key[:-4])
command = '%s %s %s -s %s -t %s -o %s -n %s' % (python_bin, generator_py, cfg, args[0], target, output_dir, args[1])
_run_cmd(command)
if platform == 'win32':
with _pushd(output_dir):
_run_cmd('dos2unix *')
# replace header file
tmpfd,tmpname = tempfile.mkstemp(dir='.')
input_file_name = '%s/%s.cpp' % (output_dir, args[1])
try:
output_file = os.fdopen(tmpfd, 'w')
input_file = open(input_file_name)
for line in input_file:
output_file.write(line.replace('#include "LuaBasicConversions.h"', '#include "LuaBasicConversions.h"\n#include "lua_pluginx_basic_conversions.h"'))
finally:
output_file.close()
input_file.close()
shutil.move(tmpname, input_file_name)
print '---------------------------------'
print 'Generating lua bindings succeeds.'
print '---------------------------------'
except Exception as e:
if e.__class__.__name__ == 'CmdError':
print '---------------------------------'
print 'Generating lua bindings fails.'
print '---------------------------------'
sys.exit(1)
else:
raise
finally:
shutil.move(conversions_backup, conversions_yaml)
# -------------- main --------------
if __name__ == '__main__':
main()
| mit |
ByteInternet/python-social-auth | social/backends/uber.py | 33 | 1390 | """
Uber OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/uber.html
"""
from social.backends.oauth import BaseOAuth2
class UberOAuth2(BaseOAuth2):
name = 'uber'
ID_KEY='uuid'
SCOPE_SEPARATOR = ' '
AUTHORIZATION_URL = 'https://login.uber.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://login.uber.com/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
def auth_complete_credentials(self):
return self.get_key_and_secret()
def get_user_details(self, response):
"""Return user details from Uber account"""
email = response.get('email', '')
fullname, first_name, last_name = self.get_user_names()
return {'username': email,
'email': email,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
client_id, client_secret = self.get_key_and_secret()
response = kwargs.pop('response')
return self.get_json('https://api.uber.com/v1/me', headers={
'Authorization': '{0} {1}'.format(
response.get('token_type'), access_token
)
}
)
| bsd-3-clause |
atlassian/boto | tests/unit/glacier/test_concurrent.py | 88 | 7261 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import tempfile
from boto.compat import Queue
from tests.compat import mock, unittest
from tests.unit import AWSMockServiceTestCase
from boto.glacier.concurrent import ConcurrentUploader, ConcurrentDownloader
from boto.glacier.concurrent import UploadWorkerThread
from boto.glacier.concurrent import _END_SENTINEL
class FakeThreadedConcurrentUploader(ConcurrentUploader):
def _start_upload_threads(self, results_queue, upload_id,
worker_queue, filename):
self.results_queue = results_queue
self.worker_queue = worker_queue
self.upload_id = upload_id
def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts):
for i in range(total_parts):
hash_chunks[i] = b'foo'
class FakeThreadedConcurrentDownloader(ConcurrentDownloader):
def _start_download_threads(self, results_queue, worker_queue):
self.results_queue = results_queue
self.worker_queue = worker_queue
def _wait_for_download_threads(self, filename, result_queue, total_parts):
pass
class TestConcurrentUploader(unittest.TestCase):
def setUp(self):
super(TestConcurrentUploader, self).setUp()
self.stat_patch = mock.patch('os.stat')
self.addCleanup(self.stat_patch.stop)
self.stat_mock = self.stat_patch.start()
# Give a default value for tests that don't care
# what the file size is.
self.stat_mock.return_value.st_size = 1024 * 1024 * 8
def test_calculate_required_part_size(self):
self.stat_mock.return_value.st_size = 1024 * 1024 * 8
uploader = ConcurrentUploader(mock.Mock(), 'vault_name')
total_parts, part_size = uploader._calculate_required_part_size(
1024 * 1024 * 8)
self.assertEqual(total_parts, 2)
self.assertEqual(part_size, 4 * 1024 * 1024)
def test_calculate_required_part_size_too_small(self):
too_small = 1 * 1024 * 1024
self.stat_mock.return_value.st_size = 1024 * 1024 * 1024
uploader = ConcurrentUploader(mock.Mock(), 'vault_name',
part_size=too_small)
total_parts, part_size = uploader._calculate_required_part_size(
1024 * 1024 * 1024)
self.assertEqual(total_parts, 256)
# Part size if 4MB not the passed in 1MB.
self.assertEqual(part_size, 4 * 1024 * 1024)
def test_work_queue_is_correctly_populated(self):
uploader = FakeThreadedConcurrentUploader(mock.MagicMock(),
'vault_name')
uploader.upload('foofile')
q = uploader.worker_queue
items = [q.get() for i in range(q.qsize())]
self.assertEqual(items[0], (0, 4 * 1024 * 1024))
self.assertEqual(items[1], (1, 4 * 1024 * 1024))
# 2 for the parts, 10 for the end sentinels (10 threads).
self.assertEqual(len(items), 12)
def test_correct_low_level_api_calls(self):
api_mock = mock.MagicMock()
uploader = FakeThreadedConcurrentUploader(api_mock, 'vault_name')
uploader.upload('foofile')
# The threads call the upload_part, so we're just verifying the
# initiate/complete multipart API calls.
api_mock.initiate_multipart_upload.assert_called_with(
'vault_name', 4 * 1024 * 1024, None)
api_mock.complete_multipart_upload.assert_called_with(
'vault_name', mock.ANY, mock.ANY, 8 * 1024 * 1024)
def test_downloader_work_queue_is_correctly_populated(self):
job = mock.MagicMock()
job.archive_size = 8 * 1024 * 1024
downloader = FakeThreadedConcurrentDownloader(job)
downloader.download('foofile')
q = downloader.worker_queue
items = [q.get() for i in range(q.qsize())]
self.assertEqual(items[0], (0, 4 * 1024 * 1024))
self.assertEqual(items[1], (1, 4 * 1024 * 1024))
# 2 for the parts, 10 for the end sentinels (10 threads).
self.assertEqual(len(items), 12)
class TestUploaderThread(unittest.TestCase):
def setUp(self):
self.fileobj = tempfile.NamedTemporaryFile()
self.filename = self.fileobj.name
def test_fileobj_closed_when_thread_shuts_down(self):
thread = UploadWorkerThread(mock.Mock(), 'vault_name',
self.filename, 'upload_id',
Queue(), Queue())
fileobj = thread._fileobj
self.assertFalse(fileobj.closed)
# By settings should_continue to False, it should immediately
# exit, and we can still verify cleanup behavior.
thread.should_continue = False
thread.run()
self.assertTrue(fileobj.closed)
def test_upload_errors_have_exception_messages(self):
api = mock.Mock()
job_queue = Queue()
result_queue = Queue()
upload_thread = UploadWorkerThread(
api, 'vault_name', self.filename,
'upload_id', job_queue, result_queue, num_retries=1,
time_between_retries=0)
api.upload_part.side_effect = Exception("exception message")
job_queue.put((0, 1024))
job_queue.put(_END_SENTINEL)
upload_thread.run()
result = result_queue.get(timeout=1)
self.assertIn("exception message", str(result))
def test_num_retries_is_obeyed(self):
# total attempts is 1 + num_retries so if I have num_retries of 2,
# I'll attempt the upload once, and if that fails I'll retry up to
# 2 more times for a total of 3 attempts.
api = mock.Mock()
job_queue = Queue()
result_queue = Queue()
upload_thread = UploadWorkerThread(
api, 'vault_name', self.filename,
'upload_id', job_queue, result_queue, num_retries=2,
time_between_retries=0)
api.upload_part.side_effect = Exception()
job_queue.put((0, 1024))
job_queue.put(_END_SENTINEL)
upload_thread.run()
self.assertEqual(api.upload_part.call_count, 3)
if __name__ == '__main__':
unittest.main()
| mit |
ojengwa/sympy | sympy/core/tests/test_evalf.py | 2 | 16286 | from sympy import (Add, ceiling, cos, E, Eq, exp, factorial, fibonacci, floor,
Function, GoldenRatio, I, log, Mul, oo, pi, Pow, Rational,
sin, sqrt, sstr, Sum, sympify, S, integrate, atan, product)
from sympy.core.evalf import complex_accuracy, PrecisionExhausted, scaled_zero
from sympy.core.compatibility import long
from sympy.mpmath import inf, ninf, nan
from sympy.abc import n, x, y
from sympy.mpmath.libmp.libmpf import from_float
from sympy.utilities.pytest import raises, XFAIL
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_evalf_helpers():
assert complex_accuracy((from_float(2.0), None, 35, None)) == 35
assert complex_accuracy((from_float(2.0), from_float(10.0), 35, 100)) == 37
assert complex_accuracy(
(from_float(2.0), from_float(1000.0), 35, 100)) == 43
assert complex_accuracy((from_float(2.0), from_float(10.0), 100, 35)) == 35
assert complex_accuracy(
(from_float(2.0), from_float(1000.0), 100, 35)) == 35
def test_evalf_basic():
assert NS('pi', 15) == '3.14159265358979'
assert NS('2/3', 10) == '0.6666666667'
assert NS('355/113-pi', 6) == '2.66764e-7'
assert NS('16*atan(1/5)-4*atan(1/239)', 15) == '3.14159265358979'
def test_cancellation():
assert NS(Add(pi, Rational(1, 10**1000), -pi, evaluate=False), 15,
maxn=1200) == '1.00000000000000e-1000'
def test_evalf_powers():
assert NS('pi**(10**20)', 10) == '1.339148777e+49714987269413385435'
assert NS(pi**(10**100), 10) == ('4.946362032e+4971498726941338543512682882'
'9089887365167832438044244613405349992494711208'
'95526746555473864642912223')
assert NS('2**(1/10**50)', 15) == '1.00000000000000'
assert NS('2**(1/10**50)-1', 15) == '6.93147180559945e-51'
# Evaluation of Rump's ill-conditioned polynomial
def test_evalf_rump():
a = 1335*y**6/4 + x**2*(11*x**2*y**2 - y**6 - 121*y**4 - 2) + 11*y**8/2 + x/(2*y)
assert NS(a, 15, subs={x: 77617, y: 33096}) == '-0.827396059946821'
def test_evalf_complex():
assert NS('2*sqrt(pi)*I', 10) == '3.544907702*I'
assert NS('3+3*I', 15) == '3.00000000000000 + 3.00000000000000*I'
assert NS('E+pi*I', 15) == '2.71828182845905 + 3.14159265358979*I'
assert NS('pi * (3+4*I)', 15) == '9.42477796076938 + 12.5663706143592*I'
assert NS('I*(2+I)', 15) == '-1.00000000000000 + 2.00000000000000*I'
@XFAIL
def test_evalf_complex_bug():
assert NS('(pi+E*I)*(E+pi*I)', 15) in ('0.e-15 + 17.25866050002*I',
'0.e-17 + 17.25866050002*I', '-0.e-17 + 17.25866050002*I')
def test_evalf_complex_powers():
assert NS('(E+pi*I)**100000000000000000') == \
'-3.58896782867793e+61850354284995199 + 4.58581754997159e+61850354284995199*I'
# XXX: rewrite if a+a*I simplification introduced in sympy
#assert NS('(pi + pi*I)**2') in ('0.e-15 + 19.7392088021787*I', '0.e-16 + 19.7392088021787*I')
assert NS('(pi + pi*I)**2', chop=True) == '19.7392088021787*I'
assert NS(
'(pi + 1/10**8 + pi*I)**2') == '6.2831853e-8 + 19.7392088650106*I'
assert NS('(pi + 1/10**12 + pi*I)**2') == '6.283e-12 + 19.7392088021850*I'
assert NS('(pi + pi*I)**4', chop=True) == '-389.636364136010'
assert NS(
'(pi + 1/10**8 + pi*I)**4') == '-389.636366616512 + 2.4805021e-6*I'
assert NS('(pi + 1/10**12 + pi*I)**4') == '-389.636364136258 + 2.481e-10*I'
assert NS(
'(10000*pi + 10000*pi*I)**4', chop=True) == '-3.89636364136010e+18'
@XFAIL
def test_evalf_complex_powers_bug():
assert NS('(pi + pi*I)**4') == '-389.63636413601 + 0.e-14*I'
def test_evalf_exponentiation():
assert NS(sqrt(-pi)) == '1.77245385090552*I'
assert NS(Pow(pi*I, Rational(
1, 2), evaluate=False)) == '1.25331413731550 + 1.25331413731550*I'
assert NS(pi**I) == '0.413292116101594 + 0.910598499212615*I'
assert NS(pi**(E + I/3)) == '20.8438653991931 + 8.36343473930031*I'
assert NS((pi + I/3)**(E + I/3)) == '17.2442906093590 + 13.6839376767037*I'
assert NS(exp(pi)) == '23.1406926327793'
assert NS(exp(pi + E*I)) == '-21.0981542849657 + 9.50576358282422*I'
assert NS(pi**pi) == '36.4621596072079'
assert NS((-pi)**pi) == '-32.9138577418939 - 15.6897116534332*I'
assert NS((-pi)**(-pi)) == '-0.0247567717232697 + 0.0118013091280262*I'
# An example from Smith, "Multiple Precision Complex Arithmetic and Functions"
def test_evalf_complex_cancellation():
A = Rational('63287/100000')
B = Rational('52498/100000')
C = Rational('69301/100000')
D = Rational('83542/100000')
F = Rational('2231321613/2500000000')
# XXX: the number of returned mantissa digits in the real part could
# change with the implementation. What matters is that the returned digits are
# correct; those that are showing now are correct.
# >>> ((A+B*I)*(C+D*I)).expand()
# 64471/10000000000 + 2231321613*I/2500000000
# >>> 2231321613*4
# 8925286452L
assert NS((A + B*I)*(C + D*I), 6) == '6.44710e-6 + 0.892529*I'
assert NS((A + B*I)*(C + D*I), 10) == '6.447100000e-6 + 0.8925286452*I'
assert NS((A + B*I)*(
C + D*I) - F*I, 5) in ('6.4471e-6 + 0.e-14*I', '6.4471e-6 - 0.e-14*I')
def test_evalf_logs():
assert NS("log(3+pi*I)", 15) == '1.46877619736226 + 0.808448792630022*I'
assert NS("log(pi*I)", 15) == '1.14472988584940 + 1.57079632679490*I'
def test_evalf_trig():
assert NS('sin(1)', 15) == '0.841470984807897'
assert NS('cos(1)', 15) == '0.540302305868140'
assert NS('sin(10**-6)', 15) == '9.99999999999833e-7'
assert NS('cos(10**-6)', 15) == '0.999999999999500'
assert NS('sin(E*10**100)', 15) == '0.409160531722613'
# Some input near roots
assert NS(sin(exp(pi*sqrt(163))*pi), 15) == '-2.35596641936785e-12'
assert NS(sin(pi*10**100 + Rational(7, 10**5), evaluate=False), 15, maxn=120) == \
'6.99999999428333e-5'
assert NS(sin(Rational(7, 10**5), evaluate=False), 15) == \
'6.99999999428333e-5'
# Check detection of various false identities
def test_evalf_near_integers():
# Binet's formula
f = lambda n: ((1 + sqrt(5))**n)/(2**n * sqrt(5))
assert NS(f(5000) - fibonacci(5000), 10, maxn=1500) == '5.156009964e-1046'
# Some near-integer identities from
# http://mathworld.wolfram.com/AlmostInteger.html
assert NS('sin(2017*2**(1/5))', 15) == '-1.00000000000000'
assert NS('sin(2017*2**(1/5))', 20) == '-0.99999999999999997857'
assert NS('1+sin(2017*2**(1/5))', 15) == '2.14322287389390e-17'
assert NS('45 - 613*E/37 + 35/991', 15) == '6.03764498766326e-11'
def test_evalf_ramanujan():
assert NS(exp(pi*sqrt(163)) - 640320**3 - 744, 10) == '-7.499274028e-13'
# A related identity
A = 262537412640768744*exp(-pi*sqrt(163))
B = 196884*exp(-2*pi*sqrt(163))
C = 103378831900730205293632*exp(-3*pi*sqrt(163))
assert NS(1 - A - B + C, 10) == '1.613679005e-59'
# Input that for various reasons have failed at some point
def test_evalf_bugs():
assert NS(sin(1) + exp(-10**10), 10) == NS(sin(1), 10)
assert NS(exp(10**10) + sin(1), 10) == NS(exp(10**10), 10)
assert NS('log(1+1/10**50)', 20) == '1.0000000000000000000e-50'
assert NS('log(10**100,10)', 10) == '100.0000000'
assert NS('log(2)', 10) == '0.6931471806'
assert NS(
'(sin(x)-x)/x**3', 15, subs={x: '1/10**50'}) == '-0.166666666666667'
assert NS(sin(1) + Rational(
1, 10**100)*I, 15) == '0.841470984807897 + 1.00000000000000e-100*I'
assert x.evalf() == x
assert NS((1 + I)**2*I, 6) == '-2.00000'
d = {n: (
-1)**Rational(6, 7), y: (-1)**Rational(4, 7), x: (-1)**Rational(2, 7)}
assert NS((x*(1 + y*(1 + n))).subs(d).evalf(), 6) == '0.346011 + 0.433884*I'
assert NS(((-I - sqrt(2)*I)**2).evalf()) == '-5.82842712474619'
assert NS((1 + I)**2*I, 15) == '-2.00000000000000'
# issue 4758 (1/2):
assert NS(pi.evalf(69) - pi) == '-4.43863937855894e-71'
# issue 4758 (2/2): With the bug present, this still only fails if the
# terms are in the order given here. This is not generally the case,
# because the order depends on the hashes of the terms.
assert NS(20 - 5008329267844*n**25 - 477638700*n**37 - 19*n,
subs={n: .01}) == '19.8100000000000'
assert NS(((x - 1)*((1 - x))**1000).n()
) == '(-x + 1.00000000000000)**1000*(x - 1.00000000000000)'
assert NS((-x).n()) == '-x'
assert NS((-2*x).n()) == '-2.00000000000000*x'
assert NS((-2*x*y).n()) == '-2.00000000000000*x*y'
assert cos(x).n(subs={x: 1+I}) == cos(x).subs(x, 1+I).n()
# issue 6660. Also NaN != mpmath.nan
# In this order:
# 0*nan, 0/nan, 0*inf, 0/inf
# 0+nan, 0-nan, 0+inf, 0-inf
# >>> n = Some Number
# n*nan, n/nan, n*inf, n/inf
# n+nan, n-nan, n+inf, n-inf
assert (0*sin(oo)).n() == S.Zero
assert (0/sin(oo)).n() == S.Zero
assert (0*E**(oo)).n() == S.NaN
assert (0/E**(oo)).n() == S.Zero
assert (0+sin(oo)).n() == S.NaN
assert (0-sin(oo)).n() == S.NaN
assert (0+E**(oo)).n() == S.Infinity
assert (0-E**(oo)).n() == S.NegativeInfinity
assert (5*sin(oo)).n() == S.NaN
assert (5/sin(oo)).n() == S.NaN
assert (5*E**(oo)).n() == S.Infinity
assert (5/E**(oo)).n() == S.Zero
assert (5+sin(oo)).n() == S.NaN
assert (5-sin(oo)).n() == S.NaN
assert (5+E**(oo)).n() == S.Infinity
assert (5-E**(oo)).n() == S.NegativeInfinity
def test_evalf_integer_parts():
a = floor(log(8)/log(2) - exp(-1000), evaluate=False)
b = floor(log(8)/log(2), evaluate=False)
raises(PrecisionExhausted, lambda: a.evalf())
assert a.evalf(chop=True) == 3
assert a.evalf(maxn=500) == 2
assert b.evalf() == 3
# equals, as a fallback, can still fail but it might succeed as here
assert ceiling(10*(sin(1)**2 + cos(1)**2)) == 10
assert int(floor(factorial(50)/E, evaluate=False).evalf(70)) == \
long(11188719610782480504630258070757734324011354208865721592720336800)
assert int(ceiling(factorial(50)/E, evaluate=False).evalf(70)) == \
long(11188719610782480504630258070757734324011354208865721592720336801)
assert int(floor((GoldenRatio**999 / sqrt(5) + Rational(1, 2)))
.evalf(1000)) == fibonacci(999)
assert int(floor((GoldenRatio**1000 / sqrt(5) + Rational(1, 2)))
.evalf(1000)) == fibonacci(1000)
def test_evalf_trig_zero_detection():
a = sin(160*pi, evaluate=False)
t = a.evalf(maxn=100)
assert abs(t) < 1e-100
assert t._prec < 2
assert a.evalf(chop=True) == 0
raises(PrecisionExhausted, lambda: a.evalf(strict=True))
def test_evalf_sum():
assert Sum(n,(n,1,2)).evalf() == 3.
assert Sum(n,(n,1,2)).doit().evalf() == 3.
# the next test should return instantly
assert Sum(1/n,(n,1,2)).evalf() == 1.5
def test_evalf_divergent_series():
raises(ValueError, lambda: Sum(1/n, (n, 1, oo)).evalf())
raises(ValueError, lambda: Sum(n/(n**2 + 1), (n, 1, oo)).evalf())
raises(ValueError, lambda: Sum((-1)**n, (n, 1, oo)).evalf())
raises(ValueError, lambda: Sum((-1)**n, (n, 1, oo)).evalf())
raises(ValueError, lambda: Sum(n**2, (n, 1, oo)).evalf())
raises(ValueError, lambda: Sum(2**n, (n, 1, oo)).evalf())
raises(ValueError, lambda: Sum((-2)**n, (n, 1, oo)).evalf())
raises(ValueError, lambda: Sum((2*n + 3)/(3*n**2 + 4), (n, 0, oo)).evalf())
raises(ValueError, lambda: Sum((0.5*n**3)/(n**4 + 1), (n, 0, oo)).evalf())
def test_evalf_py_methods():
assert abs(float(pi + 1) - 4.1415926535897932) < 1e-10
assert abs(complex(pi + 1) - 4.1415926535897932) < 1e-10
assert abs(
complex(pi + E*I) - (3.1415926535897931 + 2.7182818284590451j)) < 1e-10
raises(TypeError, lambda: float(pi + x))
def test_evalf_power_subs_bugs():
assert (x**2).evalf(subs={x: 0}) == 0
assert sqrt(x).evalf(subs={x: 0}) == 0
assert (x**Rational(2, 3)).evalf(subs={x: 0}) == 0
assert (x**x).evalf(subs={x: 0}) == 1
assert (3**x).evalf(subs={x: 0}) == 1
assert exp(x).evalf(subs={x: 0}) == 1
assert ((2 + I)**x).evalf(subs={x: 0}) == 1
assert (0**x).evalf(subs={x: 0}) == 1
def test_evalf_arguments():
raises(TypeError, lambda: pi.evalf(method="garbage"))
def test_implemented_function_evalf():
from sympy.utilities.lambdify import implemented_function
f = Function('f')
f = implemented_function(f, lambda x: x + 1)
assert str(f(x)) == "f(x)"
assert str(f(2)) == "f(2)"
assert f(2).evalf() == 3
assert f(x).evalf() == f(x)
del f._imp_ # XXX: due to caching _imp_ would influence all other tests
def test_evaluate_false():
for no in [0, False, None]:
assert Add(3, 2, evaluate=no).is_Add
assert Mul(3, 2, evaluate=no).is_Mul
assert Pow(3, 2, evaluate=no).is_Pow
assert Pow(y, 2, evaluate=True) - Pow(y, 2, evaluate=True) == 0
def test_evalf_relational():
assert Eq(x/5, y/10).evalf() == Eq(0.2*x, 0.1*y)
def test_issue_5486():
assert not cos(sqrt(0.5 + I)).n().is_Function
def test_issue_5486_bug():
from sympy import I, Expr
assert abs(Expr._from_mpmath(I._to_mpmath(15), 15) - I) < 1.0e-15
def test_bugs():
from sympy import polar_lift, re
assert abs(re((1 + I)**2)) < 1e-15
# anything that evalf's to 0 will do in place of polar_lift
assert abs(polar_lift(0)).n() == 0
def test_subs_bugs():
from sympy import besseli
assert NS('besseli(-x, y) - besseli(x, y)', subs={x: 3.5, y: 20.0}) == \
'-4.92535585957223e-10'
assert NS('Piecewise((x, x>0)) + Piecewise((1-x, x>0))', subs={x: 0.1}) == \
'1.00000000000000'
def test_issue_4956_5204():
# issue 4956
v = S('''(-27*12**(1/3)*sqrt(31)*I +
27*2**(2/3)*3**(1/3)*sqrt(31)*I)/(-2511*2**(2/3)*3**(1/3) +
(29*18**(1/3) + 9*2**(1/3)*3**(2/3)*sqrt(31)*I +
87*2**(1/3)*3**(1/6)*I)**2)''')
assert NS(v, 1) == '0.e-118 - 0.e-118*I'
# issue 5204
v = S('''-(357587765856 + 18873261792*249**(1/2) + 56619785376*I*83**(1/2) +
108755765856*I*3**(1/2) + 41281887168*6**(1/3)*(1422 +
54*249**(1/2))**(1/3) - 1239810624*6**(1/3)*249**(1/2)*(1422 +
54*249**(1/2))**(1/3) - 3110400000*I*6**(1/3)*83**(1/2)*(1422 +
54*249**(1/2))**(1/3) + 13478400000*I*3**(1/2)*6**(1/3)*(1422 +
54*249**(1/2))**(1/3) + 1274950152*6**(2/3)*(1422 +
54*249**(1/2))**(2/3) + 32347944*6**(2/3)*249**(1/2)*(1422 +
54*249**(1/2))**(2/3) - 1758790152*I*3**(1/2)*6**(2/3)*(1422 +
54*249**(1/2))**(2/3) - 304403832*I*6**(2/3)*83**(1/2)*(1422 +
4*249**(1/2))**(2/3))/(175732658352 + (1106028 + 25596*249**(1/2) +
76788*I*83**(1/2))**2)''')
assert NS(v, 5) == '0.077284 + 1.1104*I'
assert NS(v, 1) == '0.08 + 1.*I'
def test_old_docstring():
a = (E + pi*I)*(E - pi*I)
assert NS(a) == '17.2586605000200'
assert a.n() == 17.25866050002001
def test_issue_4806():
assert integrate(atan(x)**2, (x, -1, 1)).evalf().round(1) == 0.5
assert atan(0, evaluate=False).n() == 0
def test_evalf_mul():
# sympy should not try to expand this; it should be handled term-wise
# in evalf through mpmath
assert NS(product(1 + sqrt(n)*I, (n, 1, 500)), 1) == '5.e+567 + 2.e+568*I'
def test_scaled_zero():
a, b = (([0], 1, 100, 1), -1)
assert scaled_zero(100) == (a, b)
assert scaled_zero(a) == (0, 1, 100, 1)
a, b = (([1], 1, 100, 1), -1)
assert scaled_zero(100, -1) == (a, b)
assert scaled_zero(a) == (1, 1, 100, 1)
raises(ValueError, lambda: scaled_zero(scaled_zero(100)))
raises(ValueError, lambda: scaled_zero(100, 2))
raises(ValueError, lambda: scaled_zero(100, 0))
raises(ValueError, lambda: scaled_zero((1, 5, 1, 3)))
def test_chop_value():
for i in range(-27, 28):
assert (Pow(10, i)*2).n(chop=10**i) and not (Pow(10, i)).n(chop=10**i)
def test_infinities():
assert oo.evalf(chop=True) == inf
assert (-oo).evalf(chop=True) == ninf
def test_to_mpmath():
assert sqrt(3)._to_mpmath(20)._mpf_ == (0, long(908093), -19, 20)
assert S(3.2)._to_mpmath(20)._mpf_ == (0, long(838861), -18, 20)
def test_issue_6632_evalf():
add = (-100000*sqrt(2500000001) + 5000000001)
assert add.n() == 9.999999998e-11
assert (add*add).n() == 9.999999996e-21
| bsd-3-clause |
wnt-zhp/hufce | django/contrib/localflavor/cn/forms.py | 89 | 6503 | # -*- coding: utf-8 -*-
"""
Chinese-specific form helpers
"""
from __future__ import absolute_import
import re
from django.contrib.localflavor.cn.cn_provinces import CN_PROVINCE_CHOICES
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.translation import ugettext_lazy as _
__all__ = (
'CNProvinceSelect',
'CNPostCodeField',
'CNIDCardField',
'CNPhoneNumberField',
'CNCellNumberField',
)
ID_CARD_RE = r'^\d{15}(\d{2}[0-9xX])?$'
POST_CODE_RE = r'^\d{6}$'
PHONE_RE = r'^\d{3,4}-\d{7,8}(-\d+)?$'
CELL_RE = r'^1[358]\d{9}$'
# Valid location code used in id card checking algorithm
CN_LOCATION_CODES = (
11, # Beijing
12, # Tianjin
13, # Hebei
14, # Shanxi
15, # Nei Mongol
21, # Liaoning
22, # Jilin
23, # Heilongjiang
31, # Shanghai
32, # Jiangsu
33, # Zhejiang
34, # Anhui
35, # Fujian
36, # Jiangxi
37, # Shandong
41, # Henan
42, # Hubei
43, # Hunan
44, # Guangdong
45, # Guangxi
46, # Hainan
50, # Chongqing
51, # Sichuan
52, # Guizhou
53, # Yunnan
54, # Xizang
61, # Shaanxi
62, # Gansu
63, # Qinghai
64, # Ningxia
65, # Xinjiang
71, # Taiwan
81, # Hong Kong
91, # Macao
)
class CNProvinceSelect(Select):
"""
A select widget with list of Chinese provinces as choices.
"""
def __init__(self, attrs=None):
super(CNProvinceSelect, self).__init__(
attrs, choices=CN_PROVINCE_CHOICES,
)
class CNPostCodeField(RegexField):
"""
A form field that validates as Chinese post code.
Valid code is XXXXXX where X is digit.
"""
default_error_messages = {
'invalid': _(u'Enter a post code in the format XXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(CNPostCodeField, self).__init__(POST_CODE_RE, *args, **kwargs)
class CNIDCardField(CharField):
"""
A form field that validates as Chinese Identification Card Number.
This field would check the following restrictions:
* the length could only be 15 or 18.
* if the length is 18, the last digit could be x or X.
* has a valid checksum.(length 18 only)
* has a valid birthdate.
* has a valid location.
The checksum algorithm is described in GB11643-1999.
"""
default_error_messages = {
'invalid': _(u'ID Card Number consists of 15 or 18 digits.'),
'checksum': _(u'Invalid ID Card Number: Wrong checksum'),
'birthday': _(u'Invalid ID Card Number: Wrong birthdate'),
'location': _(u'Invalid ID Card Number: Wrong location code'),
}
def __init__(self, max_length=18, min_length=15, *args, **kwargs):
super(CNIDCardField, self).__init__(max_length, min_length, *args,
**kwargs)
def clean(self, value):
"""
Check whether the input is a valid ID Card Number.
"""
# Check the length of the ID card number.
super(CNIDCardField, self).clean(value)
if not value:
return u""
# Check whether this ID card number has valid format
if not re.match(ID_CARD_RE, value):
raise ValidationError(self.error_messages['invalid'])
# Check the birthday of the ID card number.
if not self.has_valid_birthday(value):
raise ValidationError(self.error_messages['birthday'])
# Check the location of the ID card number.
if not self.has_valid_location(value):
raise ValidationError(self.error_messages['location'])
# Check the checksum of the ID card number.
value = value.upper()
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_birthday(self, value):
"""
This function would grab the birthdate from the ID card number and test
whether it is a valid date.
"""
from datetime import datetime
if len(value) == 15:
# 1st generation ID card
time_string = value[6:12]
format_string = "%y%m%d"
else:
# 2nd generation ID card
time_string = value[6:14]
format_string = "%Y%m%d"
try:
datetime.strptime(time_string, format_string)
return True
except ValueError:
# invalid date
return False
def has_valid_location(self, value):
"""
This method checks if the first two digits in the ID Card are valid.
"""
return int(value[:2]) in CN_LOCATION_CODES
def has_valid_checksum(self, value):
"""
This method checks if the last letter/digit in value is valid
according to the algorithm the ID Card follows.
"""
# If the length of the number is not 18, then the number is a 1st
# generation ID card number, and there is no checksum to be checked.
if len(value) != 18:
return True
checksum_index = sum(
map(
lambda a,b:a*(ord(b)-ord('0')),
(7,9,10,5,8,4,2,1,6,3,7,9,10,5,8,4,2),
value[:17],
),
) % 11
return '10X98765432'[checksum_index] == value[-1]
class CNPhoneNumberField(RegexField):
"""
A form field that validates as Chinese phone number
A valid phone number could be like:
010-55555555
Considering there might be extension phone numbers, so this could also be:
010-55555555-35
"""
default_error_messages = {
'invalid': _(u'Enter a valid phone number.'),
}
def __init__(self, *args, **kwargs):
super(CNPhoneNumberField, self).__init__(PHONE_RE, *args, **kwargs)
class CNCellNumberField(RegexField):
"""
A form field that validates as Chinese cell number
A valid cell number could be like:
13012345678
We used a rough rule here, the first digit should be 1, the second could be
3, 5 and 8, the rest could be what so ever.
The length of the cell number should be 11.
"""
default_error_messages = {
'invalid': _(u'Enter a valid cell number.'),
}
def __init__(self, *args, **kwargs):
super(CNCellNumberField, self).__init__(CELL_RE, *args, **kwargs)
| gpl-3.0 |
AndroidOpenDevelopment/android_external_chromium_org | net/tools/tld_cleanup/PRESUBMIT.py | 75 | 1117 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net/tools/tld_cleanup."""
def _RunMakeDafsaTests(input_api, output_api):
"""Runs unittest for make_dafsa if any related file has been modified."""
files = ('net/tools/tld_cleanup/make_dafsa.py',
'net/tools/tld_cleanup/make_dafsa_unittest.py')
if not any(f in input_api.LocalPaths() for f in files):
return []
test_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'make_dafsa_unittest.py')
cmd_name = 'make_dafsa_unittest'
cmd = [input_api.python_executable, test_path]
test_cmd = input_api.Command(
name=cmd_name,
cmd=cmd,
kwargs={},
message=output_api.PresubmitPromptWarning)
return input_api.RunTests([test_cmd])
def CheckChangeOnUpload(input_api, output_api):
return _RunMakeDafsaTests(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _RunMakeDafsaTests(input_api, output_api)
| bsd-3-clause |
maxalbert/bokeh | bokeh/server/views/autoload_js_handler.py | 2 | 1557 | ''' Provide a request handler that returns a page displaying a document.
'''
from __future__ import absolute_import, print_function
import logging
log = logging.getLogger(__name__)
from tornado.web import RequestHandler
from bokeh.templates import AUTOLOAD_JS
from bokeh.util.string import encode_utf8
class AutoloadJsHandler(RequestHandler):
''' Implements a custom Tornado handler for the autoload JS chunk
'''
def __init__(self, tornado_app, *args, **kw):
self.application_context = kw['application_context']
self.bokeh_websocket_path = kw['bokeh_websocket_path']
# Note: tornado_app is stored as self.application
super(AutoloadJsHandler, self).__init__(tornado_app, *args, **kw)
def initialize(self, *args, **kw):
pass
def get(self, *args, **kwargs):
element_id = self.get_argument("bokeh-autoload-element", default=None)
if not element_id:
self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
return
resources = self.application.resources(self.request)
websocket_url = self.application.websocket_url_for_request(self.request, self.bokeh_websocket_path)
js = AUTOLOAD_JS.render(
docs_json = None,
js_urls = resources.js_files,
css_files = resources.css_files,
elementid = element_id,
websocket_url = websocket_url
)
self.set_header("Content-Type", 'application/javascript')
self.write(encode_utf8(js))
| bsd-3-clause |
andyxhadji/incubator-airflow | airflow/contrib/operators/gcs_to_s3.py | 3 | 5260 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.operators.gcs_list_operator import GoogleCloudStorageListOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.S3_hook import S3Hook
class GoogleCloudStorageToS3Operator(GoogleCloudStorageListOperator):
"""
Synchronizes a Google Cloud Storage bucket with an S3 bucket.
:param bucket: The Google Cloud Storage bucket to find the objects. (templated)
:type bucket: str
:param prefix: Prefix string which filters objects whose name begin with
this prefix. (templated)
:type prefix: str
:param delimiter: The delimiter by which you want to filter the objects. (templated)
For e.g to lists the CSV files from in a directory in GCS you would use
delimiter='.csv'.
:type delimiter: str
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google Cloud Storage.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param dest_aws_conn_id: The destination S3 connection
:type dest_aws_conn_id: str
:param dest_s3_key: The base S3 key to be used to store the files. (templated)
:type dest_s3_key: str
:parame dest_verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- False: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- path/to/cert/bundle.pem: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type dest_verify: bool or str
"""
template_fields = ('bucket', 'prefix', 'delimiter', 'dest_s3_key')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
bucket,
prefix=None,
delimiter=None,
google_cloud_storage_conn_id='google_cloud_storage_default',
delegate_to=None,
dest_aws_conn_id=None,
dest_s3_key=None,
dest_verify=None,
replace=False,
*args,
**kwargs):
super(GoogleCloudStorageToS3Operator, self).__init__(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
google_cloud_storage_conn_id=google_cloud_storage_conn_id,
delegate_to=delegate_to,
*args,
**kwargs
)
self.dest_aws_conn_id = dest_aws_conn_id
self.dest_s3_key = dest_s3_key
self.dest_verify = dest_verify
self.replace = replace
def execute(self, context):
# use the super to list all files in an Google Cloud Storage bucket
files = super(GoogleCloudStorageToS3Operator, self).execute(context)
s3_hook = S3Hook(aws_conn_id=self.dest_aws_conn_id, verify=self.dest_verify)
if not self.replace:
# if we are not replacing -> list all files in the S3 bucket
# and only keep those files which are present in
# Google Cloud Storage and not in S3
bucket_name, _ = S3Hook.parse_s3_url(self.dest_s3_key)
existing_files = s3_hook.list_keys(bucket_name)
files = set(files) - set(existing_files)
if files:
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to
)
for file in files:
file_bytes = hook.download(self.bucket, file)
dest_key = self.dest_s3_key + file
self.log.info("Saving file to %s", dest_key)
s3_hook.load_bytes(file_bytes,
key=dest_key,
replace=self.replace)
self.log.info("All done, uploaded %d files to S3", len(files))
else:
self.log.info("In sync, no files needed to be uploaded to S3")
return files
| apache-2.0 |
baberthal/CouchPotatoServer | couchpotato/core/notifications/growl.py | 65 | 3163 | import traceback
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from couchpotato.environment import Env
from gntp import notifier
log = CPLog(__name__)
autoload = 'Growl'
class Growl(Notification):
registered = False
def __init__(self):
super(Growl, self).__init__()
self.growl = None
if self.isEnabled():
addEvent('app.load', self.register)
def register(self):
if self.registered: return
try:
hostname = self.conf('hostname')
password = self.conf('password')
port = self.conf('port')
self.growl = notifier.GrowlNotifier(
applicationName = Env.get('appname'),
notifications = ['Updates'],
defaultNotifications = ['Updates'],
applicationIcon = self.getNotificationImage('medium'),
hostname = hostname if hostname else 'localhost',
password = password if password else None,
port = port if port else 23053
)
self.growl.register()
self.registered = True
except Exception as e:
if 'timed out' in str(e):
self.registered = True
else:
log.error('Failed register of growl: %s', traceback.format_exc())
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
self.register()
try:
self.growl.notify(
noteType = 'Updates',
title = self.default_title,
description = message,
sticky = False,
priority = 1,
)
log.info('Growl notifications sent.')
return True
except:
log.error('Failed growl notification.')
return False
config = [{
'name': 'growl',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'growl',
'description': 'Version 1.4+',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'on_snatch',
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
{
'name': 'hostname',
'description': 'Notify growl over network. Needs restart.',
'advanced': True,
},
{
'name': 'port',
'type': 'int',
'advanced': True,
},
{
'name': 'password',
'type': 'password',
'advanced': True,
},
],
}
],
}]
| gpl-3.0 |
zero-rp/miniblink49 | third_party/WebKit/Source/build/scripts/make_style_shorthands.py | 65 | 3163 | #!/usr/bin/env python
# Copyright (C) 2013 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import defaultdict
import sys
import css_properties
import in_generator
from name_utilities import lower_first
import template_expander
class StylePropertyShorthandWriter(css_properties.CSSProperties):
class_name = 'StylePropertyShorthand'
def __init__(self, in_file_path):
super(StylePropertyShorthandWriter, self).__init__(in_file_path)
self._outputs = {
('StylePropertyShorthand.cpp'): self.generate_style_property_shorthand_cpp,
('StylePropertyShorthand.h'): self.generate_style_property_shorthand_h}
self._longhand_dictionary = defaultdict(list)
self._properties = {property_id: property for property_id, property in self._properties.items() if property['longhands']}
for property in self._properties.values():
property['longhand_property_ids'] = map(css_properties.css_name_to_enum, property['longhands'].split(';'))
for longhand in property['longhand_property_ids']:
self._longhand_dictionary[longhand].append(property)
@template_expander.use_jinja('StylePropertyShorthand.cpp.tmpl')
def generate_style_property_shorthand_cpp(self):
return {
'properties': self._properties,
'longhands_dictionary': self._longhand_dictionary,
}
@template_expander.use_jinja('StylePropertyShorthand.h.tmpl')
def generate_style_property_shorthand_h(self):
return {
'properties': self._properties,
}
if __name__ == '__main__':
in_generator.Maker(StylePropertyShorthandWriter).main(sys.argv)
| apache-2.0 |
axilleas/ansible | lib/ansible/plugins/inventory/ini.py | 90 | 2220 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from . import InventoryParser
class InventoryIniParser(InventoryAggregateParser):
def __init__(self, inven_directory):
directory = inven_directory
names = os.listdir(inven_directory)
filtered_names = []
# Clean up the list of filenames
for filename in names:
# Skip files that end with certain extensions or characters
if any(filename.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
continue
# Skip hidden files
if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)):
continue
# These are things inside of an inventory basedir
if filename in ("host_vars", "group_vars", "vars_plugins"):
continue
fullpath = os.path.join(directory, filename)
new_names.append(fullpath)
super(InventoryDirectoryParser, self).__init__(new_names)
def parse(self):
return super(InventoryDirectoryParser, self).parse()
def _before_comment(self, msg):
''' what's the part of a string before a comment? '''
msg = msg.replace("\#","**NOT_A_COMMENT**")
msg = msg.split("#")[0]
msg = msg.replace("**NOT_A_COMMENT**","#")
return msg
| gpl-3.0 |
enkiv2/popcorn_maker | vendor-local/lib/python/html5lib/tests/tokenizertotree.py | 72 | 1909 | import sys
import os
import json
import re
import html5lib
import support
import test_parser
import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\|\s*)<html ([^>]+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist"%out_path)
sys.exit(1)
for filename in support.html5lib_test_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(file(filename))
except ValueError:
sys.stderr.write("Failed to load %s\n"%filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat"%name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
#don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
tree = p.parse(test_data["input"])
output = p.tree.testSerializer(tree)
output = "\n".join(("| "+ line[3:]) if line.startswith("| ") else line
for line in output.split("\n"))
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
| bsd-3-clause |
hubert667/AIR | build/celery/celery/tests/contrib/test_rdb.py | 2 | 2943 | from __future__ import absolute_import
import errno
import socket
from celery.contrib.rdb import (
Rdb,
debugger,
set_trace,
)
from celery.tests.case import Case, Mock, WhateverIO, patch, skip_if_pypy
class SockErr(socket.error):
errno = None
class test_Rdb(Case):
@patch('celery.contrib.rdb.Rdb')
def test_debugger(self, Rdb):
x = debugger()
self.assertTrue(x)
self.assertIs(x, debugger())
@patch('celery.contrib.rdb.debugger')
@patch('celery.contrib.rdb._frame')
def test_set_trace(self, _frame, debugger):
self.assertTrue(set_trace(Mock()))
self.assertTrue(set_trace())
self.assertTrue(debugger.return_value.set_trace.called)
@patch('celery.contrib.rdb.Rdb.get_avail_port')
@skip_if_pypy
def test_rdb(self, get_avail_port):
sock = Mock()
get_avail_port.return_value = (sock, 8000)
sock.accept.return_value = (Mock(), ['helu'])
out = WhateverIO()
rdb = Rdb(out=out)
self.assertTrue(get_avail_port.called)
self.assertIn('helu', out.getvalue())
# set_quit
with patch('sys.settrace') as settrace:
rdb.set_quit()
settrace.assert_called_with(None)
# set_trace
with patch('celery.contrib.rdb.Pdb.set_trace') as pset:
with patch('celery.contrib.rdb._frame'):
rdb.set_trace()
rdb.set_trace(Mock())
pset.side_effect = SockErr
pset.side_effect.errno = errno.ECONNRESET
rdb.set_trace()
pset.side_effect.errno = errno.ENOENT
with self.assertRaises(SockErr):
rdb.set_trace()
# _close_session
rdb._close_session()
# do_continue
rdb.set_continue = Mock()
rdb.do_continue(Mock())
rdb.set_continue.assert_called_with()
# do_quit
rdb.set_quit = Mock()
rdb.do_quit(Mock())
rdb.set_quit.assert_called_with()
@patch('socket.socket')
@skip_if_pypy
def test_get_avail_port(self, sock):
out = WhateverIO()
sock.return_value.accept.return_value = (Mock(), ['helu'])
Rdb(out=out)
with patch('celery.contrib.rdb.current_process') as curproc:
curproc.return_value.name = 'PoolWorker-10'
Rdb(out=out)
err = sock.return_value.bind.side_effect = SockErr()
err.errno = errno.ENOENT
with self.assertRaises(SockErr):
Rdb(out=out)
err.errno = errno.EADDRINUSE
with self.assertRaises(Exception):
Rdb(out=out)
called = [0]
def effect(*a, **kw):
try:
if called[0] > 50:
return True
raise err
finally:
called[0] += 1
sock.return_value.bind.side_effect = effect
Rdb(out=out)
| gpl-3.0 |
hassoon3/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/ConvertBracesToField.py | 384 | 12556 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import unohelper
import string
import re
import base64
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
from lib.logreport import *
from lib.rpc import *
database="test"
uid = 1
class ConvertBracesToField( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
self.aReportSyntex=[]
self.getBraces(self.aReportSyntex)
self.setValue()
def setValue(self):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo= doc.getDocumentInfo()
count = 0
regexes = [
['[a-zA-Z0-9_]+\.[a-zA-Z0-9_.]+',"Field"],
['\\[\\[ *repeatIn\\( *([a-zA-Z0-9_\.]+), *\'([a-zA-Z0-9_]+)\' *\\) *\\]\\]', "RepeatIn"],
['\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]', "Field"]
# ['\\[\\[ ([a-zA-Z0-9_]+\.[a-zA-Z1-9]) \\]\\]',"Field"],
# ['\\[\\[ [a-zA-Z0-9_\.]+ and ([a-zA-Z0-9_\.]+) or .+? \\]\\]',"Field"],
# ['\\[\\[ ([a-zA-Z0-9_\.]+) or .+? \\]\\]',"Field"],
# ['\\[\\[ ([a-zA-Z0-9_\.]+) and .+? \\]\\]',"Field"],
# ['\\[\\[ .+? or ([a-zA-Z0-9_\.]+) \\]\\]',"Field"],
# ['\\[\\[ (.+?) and ([a-zA-Z0-9_\.]+) \\]\\]',"Field"],
# ['\\[\\[ .+? % ([a-zA-Z0-9_\.]+) \\]\\]',"Field"]
]
oFieldObject = []
oRepeatInObjects = []
saRepeatInList = []
sHost = docinfo.getUserFieldValue(0)
nCount = 0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
nCount += 1
getList(oRepeatInObjects,sHost,nCount)
for ro in oRepeatInObjects:
if ro.find("(")<>-1:
saRepeatInList.append( [ ro[:ro.find("(")], ro[ro.find("(")+1:ro.find(")")] ])
try:
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
for reg in regexes:
res=re.findall(reg[0],oPar.Items[1])
if len(res) <> 0:
if res[0][0] == "objects":
sTemp = docinfo.getUserFieldValue(3)
sTemp = "|-." + sTemp[sTemp.rfind(".")+1:] + ".-|"
oPar.Items=(sTemp.encode("utf-8"),oPar.Items[1].replace(' ',""))
oPar.update()
elif type(res[0]) <> type(u''):
sObject = self.getRes(self.sock, docinfo.getUserFieldValue(3), res[0][0][res[0][0].find(".")+1:].replace(".","/"))
r = self.sock.execute(database, uid, self.password, docinfo.getUserFieldValue(3) , 'fields_get')
sExpr="|-." + r[res[0][0][res[0][0].rfind(".")+1:]]["string"] + ".-|"
oPar.Items=(sExpr.encode("utf-8"),oPar.Items[1].replace(' ',""))
oPar.update()
else:
obj = None
for rl in saRepeatInList:
if rl[0] == res[0][:res[0].find(".")]:
obj=rl[1]
try:
sObject = self.getRes(self.sock, obj, res[0][res[0].find(".")+1:].replace(".","/"))
r = self.sock.execute(database, uid, self.password, sObject , 'read',[1])
except Exception,e:
r = "TTT"
self.logobj.log_write('ConvertBracesToField', LOG_ERROR, str(e))
if len(r) <> 0:
if r <> "TTT":
if len(res)>1:
sExpr=""
print res
if reg[1] == 'Field':
for ires in res:
try:
sExpr=r[0][ires[ires.rfind(".")+1:]]
break
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR,info)
try:
oPar.Items=(sExpr.encode("utf-8") ,oPar.Items[1])
oPar.update()
except:
oPar.Items=(str(sExpr) ,oPar.Items[1])
oPar.update()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR, info)
else:
sExpr=r[0][res[0][res[0].rfind(".")+1:]]
try:
if sExpr:
oPar.Items=(sExpr.encode("utf-8") ,oPar.Items[1])
oPar.update()
else:
oPar.Items=(u"/",oPar.Items[1])
oPar.update()
except:
oPar.Items=(str(sExpr) ,oPar.Items[1])
oPar.update()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBracesToField', LOG_ERROR,info)
else:
oPar.Items=(u""+r,oPar.Items[1])
oPar.update()
else:
oPar.Items=(u"TTT",oPar.Items[1])
oPar.update()
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBraceToField', LOG_ERROR, info)
def getRes(self, sock, sObject, sVar):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
res = sock.execute(database, uid, self.password, sObject , 'fields_get')
key = res.keys()
key.sort()
myval=None
if not sVar.find("/")==-1:
myval=sVar[:sVar.find("/")]
else:
myval=sVar
for k in key:
if (res[k]['type'] in ['many2one']) and k==myval:
sObject = self.getRes(sock,res[myval]['relation'], sVar[sVar.find("/")+1:])
return sObject
def getBraces(self, aReportSyntex=None):
if aReportSyntex is None:
aReportSyntex = []
desktop=getDesktop()
doc = desktop.getCurrentComponent()
aSearchString=[]
aReplaceString=[]
aRes=[]
try:
regexes = [
['\\[\\[ *repeatIn\\( *([a-zA-Z0-9_\.]+), *\'([a-zA-Z0-9_]+)\' *\\) *\\]\\]', "RepeatIn"],
['\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]', "Field"],
['\\[\\[ *.+? *\\]\\]', "Expression"]
]
search = doc.createSearchDescriptor()
search.SearchRegularExpression = True
for reg in regexes:
search.SearchString = reg[0]
found = doc.findFirst( search )
while found:
res=re.findall(reg[0],found.String)
print len(res)
if found.String not in [r[0] for r in aReportSyntex] and len(res) == 1 :
text=found.getText()
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if reg[1]<>"Expression":
oInputList.Items=(u""+found.String,u""+found.String)
else:
oInputList.Items=(u"?",u""+found.String)
aReportSyntex.append([oInputList,reg[1]])
text.insertTextContent(found,oInputList,False)
found.String =""
else:
aRes.append([res,reg[1]])
found = doc.findNext(found.End, search)
search = doc.createSearchDescriptor()
search.SearchRegularExpression = False
for res in aRes:
for r in res[0]:
search.SearchString=r
found=doc.findFirst(search)
while found:
text=found.getText()
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if res[1]<>"Expression":
oInputList.Items=(u""+found.String,u""+found.String)
else:
oInputList.Items=(u"?",u""+found.String)
aReportSyntex.append([oInputList,res[1]])
text.insertTextContent(found,oInputList,False)
found.String =""
found = doc.findNext(found.End, search)
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ConvertBraceToField', LOG_ERROR, info)
if __name__<>"package":
ConvertBracesToField(None)
else:
g_ImplementationHelper.addImplementation( ConvertBracesToField, "org.openoffice.openerp.report.convertBF", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eeshangarg/oh-mainline | vendor/packages/python-social-auth/social/backends/github.py | 17 | 3762 | """
Github OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/github.html
"""
from requests import HTTPError
from social.exceptions import AuthFailed
from social.backends.oauth import BaseOAuth2
class GithubOAuth2(BaseOAuth2):
"""Github OAuth authentication backend"""
name = 'github'
AUTHORIZATION_URL = 'https://github.com/login/oauth/authorize'
ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires'),
('login', 'login')
]
def get_user_details(self, response):
"""Return user details from Github account"""
fullname, first_name, last_name = self.get_user_names(
response.get('name')
)
return {'username': response.get('login'),
'email': response.get('email') or '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
data = self._user_data(access_token)
if not data.get('email'):
try:
emails = self._user_data(access_token, '/emails')
except (HTTPError, ValueError, TypeError):
emails = []
if emails:
email = emails[0]
primary_emails = [e for e in emails
if not isinstance(e, dict) or
e.get('primary')]
if primary_emails:
email = primary_emails[0]
if isinstance(email, dict):
email = email.get('email', '')
data['email'] = email
return data
def _user_data(self, access_token, path=None):
url = 'https://api.github.com/user{0}'.format(path or '')
return self.get_json(url, params={'access_token': access_token})
class GithubMemberOAuth2(GithubOAuth2):
no_member_string = ''
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
user_data = super(GithubMemberOAuth2, self).user_data(
access_token, *args, **kwargs
)
try:
self.request(self.member_url(user_data), params={
'access_token': access_token
})
except HTTPError as err:
# if the user is a member of the organization, response code
# will be 204, see http://bit.ly/ZS6vFl
if err.response.status_code != 204:
raise AuthFailed(self,
'User doesn\'t belong to the organization')
return user_data
def member_url(self, user_data):
raise NotImplementedError('Implement in subclass')
class GithubOrganizationOAuth2(GithubMemberOAuth2):
"""Github OAuth2 authentication backend for organizations"""
name = 'github-org'
no_member_string = 'User doesn\'t belong to the organization'
def member_url(self, user_data):
return 'https://api.github.com/orgs/{org}/members/{username}'\
.format(org=self.setting('NAME'),
username=user_data.get('login'))
class GithubTeamOAuth2(GithubMemberOAuth2):
"""Github OAuth2 authentication backend for teams"""
name = 'github-team'
no_member_string = 'User doesn\'t belong to the team'
def member_url(self, user_data):
return 'https://api.github.com/teams/{team_id}/members/{username}'\
.format(team_id=self.setting('ID'),
username=user_data.get('login'))
| agpl-3.0 |
KyleJamesWalker/ansible | lib/ansible/modules/utilities/logic/include_vars.py | 50 | 3943 | # -*- mode: python -*-
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
author: "Allen Sanabria (@linuxdynasty)"
module: include_vars
short_description: Load variables from files, dynamically within a task.
description:
- Loads variables from a YAML/JSON files dynamically from within a file or from a directory recursively during task runtime. If loading a directory,
the files are sorted alphabetically before being loaded.
version_added: "1.4"
options:
file:
version_added: "2.2"
description:
- The file name from which variables should be loaded.
- If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
dir:
version_added: "2.2"
description:
- The directory name from which the variables should be loaded.
- If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
default: null
name:
version_added: "2.2"
description:
- The name of a variable into which assign the included vars, if omitted (null) they will be made top level vars.
default: null
depth:
version_added: "2.2"
description:
- When using C(dir), this module will, by default, recursively go through each sub directory and load up the variables.
By explicitly setting the depth, this module will only go as deep as the depth.
default: 0
files_matching:
version_added: "2.2"
description:
- Limit the files that are loaded within any directory to this regular expression.
default: null
ignore_files:
version_added: "2.2"
description:
- List of file names to ignore.
default: null
extensions:
version_added: "2.3"
description:
- List of file extensions to read when using C(dir).
default: ['yaml', 'yml', 'json']
required: False
free-form:
description:
- This module allows you to specify the 'file' option directly w/o any other options.
There is no 'free-form' option, this is just an indicator, see example below.
'''
EXAMPLES = """
- name: Include vars of stuff.yml into the 'stuff' variable (2.2).
include_vars:
file: stuff.yml
name: stuff
- name: Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2)
include_vars:
file: contingency_plan.yml
name: plans
when: x == 0
- name: Load a variable file based on the OS type, or a default if not found. Using free-form to specify the file.
include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_distribution }}.yml"
- "{{ ansible_os_family }}.yml"
- "default.yml"
- name: bare include (free-form)
include_vars: myvars.yml
- name: Include all .json and .jsn files in vars/all and all nested directories (2.3)
include_vars:
dir: 'vars/all'
extensions:
- json
- jsn
- name: Include all default extension files in vars/all and all nested directories and save the output in test. (2.2)
include_vars:
dir: 'vars/all'
name: test
- name: Include default extension files in vars/services (2.2)
include_vars:
dir: 'vars/services'
depth: 1
- name: Include only files matching bastion.yml (2.2)
include_vars:
dir: 'vars'
files_matching: 'bastion.yml'
- name: Include all .yml files except bastion.yml (2.3)
include_vars:
dir: 'vars'
ignore_files: 'bastion.yml'
extensions: ['yml']
"""
| gpl-3.0 |
openstack/nova | nova/tests/unit/test_block_device.py | 3 | 38315 | # Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Block Device utility functions.
"""
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from nova import block_device
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import matchers
from nova.volume import cinder
class BlockDeviceTestCase(test.NoDBTestCase):
def setUp(self):
super(BlockDeviceTestCase, self).setUp()
BDM = block_device.BlockDeviceDict
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'volume_size': 1,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'volume_size': 10,
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'}),
]
def test_properties(self):
root_device0 = '/dev/sda'
root_device1 = '/dev/sdb'
mappings = [{'virtual': 'root',
'device': root_device0}]
properties0 = {'mappings': mappings}
properties1 = {'mappings': mappings,
'root_device_name': root_device1}
self.assertIsNone(block_device.properties_root_device_name({}))
self.assertEqual(root_device0,
block_device.properties_root_device_name(properties0))
self.assertEqual(root_device1,
block_device.properties_root_device_name(properties1))
def test_ephemeral(self):
self.assertFalse(block_device.is_ephemeral('ephemeral'))
self.assertTrue(block_device.is_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_ephemeral('ephemeral11'))
self.assertFalse(block_device.is_ephemeral('root'))
self.assertFalse(block_device.is_ephemeral('swap'))
self.assertFalse(block_device.is_ephemeral('/dev/sda1'))
self.assertEqual(0, block_device.ephemeral_num('ephemeral0'))
self.assertEqual(1, block_device.ephemeral_num('ephemeral1'))
self.assertEqual(11, block_device.ephemeral_num('ephemeral11'))
self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_swap_or_ephemeral('swap'))
self.assertFalse(block_device.is_swap_or_ephemeral('root'))
self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1'))
def test_mappings_prepend_dev(self):
mapping = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': 'sdb'},
{'virtual': 'swap', 'device': 'sdc'},
{'virtual': 'ephemeral1', 'device': 'sdd'},
{'virtual': 'ephemeral2', 'device': 'sde'}]
expected = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': '/dev/sdb'},
{'virtual': 'swap', 'device': '/dev/sdc'},
{'virtual': 'ephemeral1', 'device': '/dev/sdd'},
{'virtual': 'ephemeral2', 'device': '/dev/sde'}]
prepended = block_device.mappings_prepend_dev(mapping)
self.assertEqual(sorted(expected, key=lambda v: v['virtual']),
sorted(prepended, key=lambda v: v['virtual']))
def test_strip_dev(self):
self.assertEqual('sda', block_device.strip_dev('/dev/sda'))
self.assertEqual('sda', block_device.strip_dev('sda'))
self.assertIsNone(block_device.strip_dev(None))
def test_strip_prefix(self):
self.assertEqual('a', block_device.strip_prefix('/dev/sda'))
self.assertEqual('a', block_device.strip_prefix('a'))
self.assertEqual('a', block_device.strip_prefix('xvda'))
self.assertEqual('a', block_device.strip_prefix('vda'))
self.assertEqual('a', block_device.strip_prefix('hda'))
self.assertIsNone(block_device.strip_prefix(None))
def test_get_device_letter(self):
self.assertEqual('', block_device.get_device_letter(''))
self.assertEqual('a', block_device.get_device_letter('/dev/sda1'))
self.assertEqual('b', block_device.get_device_letter('/dev/xvdb'))
self.assertEqual('d', block_device.get_device_letter('/dev/d'))
self.assertEqual('a', block_device.get_device_letter('a'))
self.assertEqual('b', block_device.get_device_letter('sdb2'))
self.assertEqual('c', block_device.get_device_letter('vdc'))
self.assertEqual('c', block_device.get_device_letter('hdc'))
self.assertIsNone(block_device.get_device_letter(None))
def test_generate_device_name(self):
expected = (
('vda', ("vd", 0)),
('vdaa', ("vd", 26)),
('vdabc', ("vd", 730)),
('vdidpok', ("vd", 4194304)),
('sdc', ("sd", 2)),
('sdaa', ("sd", 26)),
('sdiw', ("sd", 256)),
('hdzz', ("hd", 701))
)
for res, args in expected:
self.assertEqual(res, block_device.generate_device_name(*args))
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1},
{'num': 2,
'virtual_name': 'ephemeral2',
'device_name': '/dev/sdd',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
in_mapping = block_device.volume_in_mapping(
device_name, block_device_info)
self.assertEqual(true_or_false, in_mapping)
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
def test_get_root_bdm(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vdd'}]
self.assertEqual(root_bdm, block_device.get_root_bdm(bdms))
self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]]))
self.assertIsNone(block_device.get_root_bdm(bdms[1:]))
self.assertIsNone(block_device.get_root_bdm(bdms[2:]))
self.assertIsNone(block_device.get_root_bdm(bdms[3:]))
self.assertIsNone(block_device.get_root_bdm([]))
def test_get_bdm_ephemeral_disk_size(self):
size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping)
self.assertEqual(10, size)
def test_get_bdm_swap_list(self):
swap_list = block_device.get_bdm_swap_list(self.new_mapping)
self.assertEqual(1, len(swap_list))
self.assertEqual(1, swap_list[0].get('id'))
def test_get_bdm_local_disk_num(self):
size = block_device.get_bdm_local_disk_num(self.new_mapping)
self.assertEqual(2, size)
def test_new_format_is_swap(self):
expected_results = [True, False, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_swap(bdm)
self.assertEqual(expected, res)
def test_new_format_is_ephemeral(self):
expected_results = [False, True, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_ephemeral(bdm)
self.assertEqual(expected, res)
def test_validate_device_name(self):
for value in [' ', 10, None, 'a' * 260]:
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_device_name,
value)
def test_validate_and_default_volume_size(self):
bdm = {}
for value in [-1, 'a', 2.5]:
bdm['volume_size'] = value
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_and_default_volume_size,
bdm)
def test_get_bdms_to_connect(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vde', 'boot_index': None},
{'device_name': 'vdd'}]
self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms,
exclude_root_mapping=True))
self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms))
class TestBlockDeviceDict(test.NoDBTestCase):
def setUp(self):
super(TestBlockDeviceDict, self).setUp()
BDM = block_device.BlockDeviceDict
self.api_mapping = [
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1},
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1},
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'boot_index': 0},
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'uuid': 'fake-snapshot-id-1',
'boot_index': -1},
{'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'}),
]
self.legacy_mapping = [
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'delete_on_termination': True,
'virtual_name': 'swap'},
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'delete_on_termination': True,
'virtual_name': 'ephemeral0'},
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}"},
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2'},
{'id': 5, 'instance_uuid': uuids.instance,
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping_source_image = [
BDM({'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3',
'boot_index': -1}),
BDM({'id': 7, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda4',
'source_type': 'image',
'destination_type': 'local',
'connection_info': "{'fake': 'connection_info'}",
'image_id': 'fake-image-id-2',
'boot_index': -1}),
]
self.legacy_mapping_source_image = [
{'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda3',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3'},
]
def test_init(self):
def fake_validate(obj, dct):
pass
self.stub_out('nova.block_device.BlockDeviceDict._fields',
set(['field1', 'field2']))
self.stub_out('nova.block_device.BlockDeviceDict._db_only_fields',
set(['db_field1', 'db_field2']))
self.stub_out('nova.block_device.BlockDeviceDict._validate',
fake_validate)
# Make sure db fields are not picked up if they are not
# in the original dict
dev_dict = block_device.BlockDeviceDict({'field1': 'foo',
'field2': 'bar',
'db_field1': 'baz'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Make sure all expected fields are defaulted
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Unless they are not meant to be
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'},
do_not_default=set(['field2']))
self.assertIn('field1', dev_dict)
self.assertNotIn('field2', dev_dict)
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Passing kwargs to constructor works
dev_dict = block_device.BlockDeviceDict(field1='foo')
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
dev_dict = block_device.BlockDeviceDict(
{'field1': 'foo'}, field2='bar')
self.assertEqual('foo', dev_dict['field1'])
self.assertEqual('bar', dev_dict['field2'])
def test_init_prepend_dev_to_device_name(self):
bdm = {'id': 3, 'instance_uuid': uuids.instance,
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vda', bdm_dict['device_name'])
bdm['device_name'] = '/dev/vdb'
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vdb', bdm_dict['device_name'])
bdm['device_name'] = None
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertIsNone(bdm_dict['device_name'])
def test_init_boolify_delete_on_termination(self):
# Make sure that when delete_on_termination is not passed it's
# still set to False and not None
bdm = {'id': 3, 'instance_uuid': uuids.instance,
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertFalse(bdm_dict['delete_on_termination'])
def test_validate(self):
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
{'bogus_field': 'lame_val'})
lame_bdm = dict(self.new_mapping[2])
del lame_bdm['source_type']
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_bdm)
lame_bdm['no_device'] = True
block_device.BlockDeviceDict(lame_bdm)
lame_dev_bdm = dict(self.new_mapping[2])
lame_dev_bdm['device_name'] = "not a valid name"
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
lame_dev_bdm['device_name'] = ""
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
cool_volume_size_bdm = dict(self.new_mapping[2])
cool_volume_size_bdm['volume_size'] = '42'
cool_volume_size_bdm = block_device.BlockDeviceDict(
cool_volume_size_bdm)
self.assertEqual(42, cool_volume_size_bdm['volume_size'])
lame_volume_size_bdm = dict(self.new_mapping[2])
lame_volume_size_bdm['volume_size'] = 'some_non_int_string'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_volume_size_bdm)
truthy_bdm = dict(self.new_mapping[2])
truthy_bdm['delete_on_termination'] = '1'
truthy_bdm = block_device.BlockDeviceDict(truthy_bdm)
self.assertTrue(truthy_bdm['delete_on_termination'])
verbose_bdm = dict(self.new_mapping[2])
verbose_bdm['boot_index'] = 'first'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
verbose_bdm)
def test_from_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
block_device.BlockDeviceDict.from_legacy(legacy),
matchers.IsSubDictOf(new))
def test_from_legacy_mapping(self):
def _get_image_bdms(bdms):
return [bdm for bdm in bdms if bdm['source_type'] == 'image']
def _get_bootable_bdms(bdms):
return [bdm for bdm in bdms
if (bdm['boot_index'] is not None and
bdm['boot_index'] >= 0)]
new_no_img = block_device.from_legacy_mapping(self.legacy_mapping)
self.assertEqual(0, len(_get_image_bdms(new_no_img)))
for new, expected in zip(new_no_img, self.new_mapping):
self.assertThat(new, matchers.IsSubDictOf(expected))
new_with_img = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref')
image_bdms = _get_image_bdms(new_with_img)
boot_bdms = _get_bootable_bdms(new_with_img)
self.assertEqual(1, len(image_bdms))
self.assertEqual(1, len(boot_bdms))
self.assertEqual(0, image_bdms[0]['boot_index'])
self.assertEqual('image', boot_bdms[0]['source_type'])
new_with_img_and_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1')
image_bdms = _get_image_bdms(new_with_img_and_root)
boot_bdms = _get_bootable_bdms(new_with_img_and_root)
self.assertEqual(0, len(image_bdms))
self.assertEqual(1, len(boot_bdms))
self.assertEqual(0, boot_bdms[0]['boot_index'])
self.assertEqual('volume', boot_bdms[0]['source_type'])
new_no_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True)
self.assertEqual(0, len(_get_image_bdms(new_no_root)))
self.assertEqual(0, len(_get_bootable_bdms(new_no_root)))
def test_from_api(self):
for api, new in zip(self.api_mapping, self.new_mapping):
new['connection_info'] = None
if new['snapshot_id']:
new['volume_id'] = None
self.assertThat(
block_device.BlockDeviceDict.from_api(api, False),
matchers.IsSubDictOf(new))
def test_from_api_invalid_blank_id(self):
api_dict = {'id': 1,
'source_type': 'blank',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'delete_on_termination': True,
'boot_index': -1}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict,
False)
def test_from_api_invalid_source_to_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'uuid': 'fake-volume-id-1'}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict,
False)
def test_from_api_valid_source_to_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'uuid': 1,
'boot_index': 0}
retexp = block_device.BlockDeviceDict(
{'id': 1,
'source_type': 'image',
'image_id': 1,
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})
self.assertEqual(retexp,
block_device.BlockDeviceDict.from_api(api_dict, True))
def test_from_api_valid_source_to_local_mapping_with_string_bi(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'uuid': 1,
'boot_index': '0'}
retexp = block_device.BlockDeviceDict(
{'id': 1,
'source_type': 'image',
'image_id': 1,
'destination_type': 'local',
'volume_id': 'fake-volume-id-1',
'boot_index': 0})
self.assertEqual(retexp,
block_device.BlockDeviceDict.from_api(api_dict, True))
def test_from_api_invalid_image_to_destination_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'image',
'destination_type': 'local',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1',
'boot_index': 1}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Mapping image to local is not supported', str(ex))
def test_from_api_invalid_volume_type_to_destination_local_mapping(self):
api_dict = {'id': 1,
'source_type': 'volume',
'destination_type': 'local',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1'}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Specifying a volume_type with destination_type=local '
'is not supported', str(ex))
def test_from_api_invalid_specify_volume_type_with_source_volume_mapping(
self):
api_dict = {'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'volume_type': 'fake-lvm-1'}
ex = self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api,
api_dict, False)
self.assertIn('Specifying volume type to existing volume is '
'not supported', str(ex))
def test_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
legacy,
matchers.IsSubDictOf(new.legacy()))
def test_legacy_mapping(self):
got_legacy = block_device.legacy_mapping(self.new_mapping)
for legacy, expected in zip(got_legacy, self.legacy_mapping):
self.assertThat(expected, matchers.IsSubDictOf(legacy))
def test_legacy_source_image(self):
for legacy, new in zip(self.legacy_mapping_source_image,
self.new_mapping_source_image):
if new['destination_type'] == 'volume':
self.assertThat(legacy, matchers.IsSubDictOf(new.legacy()))
else:
self.assertRaises(exception.InvalidBDMForLegacy, new.legacy)
def test_legacy_mapping_source_image(self):
got_legacy = block_device.legacy_mapping(self.new_mapping)
for legacy, expected in zip(got_legacy, self.legacy_mapping):
self.assertThat(expected, matchers.IsSubDictOf(legacy))
def test_legacy_mapping_from_object_list(self):
bdm1 = objects.BlockDeviceMapping()
bdm1 = objects.BlockDeviceMapping._from_db_object(
None, bdm1, fake_block_device.FakeDbBlockDeviceDict(
self.new_mapping[0]))
bdm2 = objects.BlockDeviceMapping()
bdm2 = objects.BlockDeviceMapping._from_db_object(
None, bdm2, fake_block_device.FakeDbBlockDeviceDict(
self.new_mapping[1]))
bdmlist = objects.BlockDeviceMappingList()
bdmlist.objects = [bdm1, bdm2]
block_device.legacy_mapping(bdmlist)
def test_image_mapping(self):
removed_fields = ['id', 'instance_uuid', 'connection_info',
'created_at', 'updated_at', 'deleted_at', 'deleted']
for bdm in self.new_mapping:
mapping_bdm = fake_block_device.FakeDbBlockDeviceDict(
bdm).get_image_mapping()
for fld in removed_fields:
self.assertNotIn(fld, mapping_bdm)
def _test_snapshot_from_bdm(self, template):
snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template)
self.assertEqual('new-snapshot-id', snapshot['snapshot_id'])
self.assertEqual('snapshot', snapshot['source_type'])
self.assertEqual('volume', snapshot['destination_type'])
self.assertEqual(template.volume_size, snapshot['volume_size'])
self.assertEqual(template.delete_on_termination,
snapshot['delete_on_termination'])
self.assertEqual(template.device_name, snapshot['device_name'])
for key in ['disk_bus', 'device_type', 'boot_index']:
self.assertEqual(template[key], snapshot[key])
def test_snapshot_from_bdm(self):
for bdm in self.new_mapping:
self._test_snapshot_from_bdm(objects.BlockDeviceMapping(**bdm))
def test_snapshot_from_object(self):
for bdm in self.new_mapping[:-1]:
obj = objects.BlockDeviceMapping()
obj = objects.BlockDeviceMapping._from_db_object(
None, obj, fake_block_device.FakeDbBlockDeviceDict(
bdm))
self._test_snapshot_from_bdm(obj)
class GetBDMImageMetadataTestCase(test.NoDBTestCase):
def setUp(self):
super().setUp()
self.compute_api = compute_api.API()
self.context = context.RequestContext('fake', 'fake')
def _test_get_bdm_image_metadata__bootable(self, is_bootable=False):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
expected_meta = {
'min_disk': 0, 'min_ram': 0, 'properties': {}, 'size': 0,
'status': 'active',
}
def get_vol_data(*args, **kwargs):
return {'bootable': is_bootable}
with mock.patch.object(
self.compute_api.volume_api, 'get', side_effect=get_vol_data,
):
if not is_bootable:
self.assertRaises(
exception.InvalidBDMVolumeNotBootable,
block_device.get_bdm_image_metadata,
self.context,
self.compute_api.image_api,
self.compute_api.volume_api,
block_device_mapping)
else:
meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping)
self.assertEqual(expected_meta, meta)
def test_get_bdm_image_metadata__non_bootable(self):
self._test_get_bdm_image_metadata__bootable(False)
def test_get_bdm_image_metadata__bootable(self):
self._test_get_bdm_image_metadata__bootable(True)
def test_get_bdm_image_metadata__basic_property(self):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
fake_volume = {
'volume_image_metadata': {
'min_ram': 256, 'min_disk': 128, 'foo': 'bar',
},
}
with mock.patch.object(
self.compute_api.volume_api, 'get', return_value=fake_volume,
):
meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
self.assertEqual('bar', meta['properties']['foo'])
def test_get_bdm_image_metadata__snapshot_basic_property(self):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': '2',
'volume_id': None,
'delete_on_termination': False,
}]
fake_volume = {
'volume_image_metadata': {
'min_ram': 256, 'min_disk': 128, 'foo': 'bar',
},
}
fake_snapshot = {'volume_id': '1'}
with test.nested(
mock.patch.object(
self.compute_api.volume_api, 'get',
return_value=fake_volume),
mock.patch.object(
self.compute_api.volume_api, 'get_snapshot',
return_value=fake_snapshot),
) as (volume_get, volume_get_snapshot):
meta = block_device.get_bdm_image_metadata(
self.context, self.compute_api.image_api,
self.compute_api.volume_api, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
self.assertEqual('bar', meta['properties']['foo'])
volume_get_snapshot.assert_called_once_with(
self.context, block_device_mapping[0]['snapshot_id'])
volume_get.assert_called_once_with(
self.context, fake_snapshot['volume_id'])
@mock.patch.object(
cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_get_bdm_image_metadata__cinder_down(self, mock_get):
bdms = [
objects.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
})
)
]
self.assertRaises(
exception.CinderConnectionFailed,
block_device.get_bdm_image_metadata,
self.context,
self.compute_api.image_api,
self.compute_api.volume_api,
bdms, legacy_bdm=True)
class GetImageMetadataFromVolumeTestCase(test.NoDBTestCase):
def test_inherit_image_properties(self):
properties = {'fake_prop': 'fake_value'}
volume = {'volume_image_metadata': properties}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual(properties, image_meta['properties'])
def test_image_size(self):
volume = {'size': 10}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual(10 * units.Gi, image_meta['size'])
def test_image_status(self):
volume = {}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual('active', image_meta['status'])
def test_values_conversion(self):
properties = {'min_ram': '5', 'min_disk': '7'}
volume = {'volume_image_metadata': properties}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual(5, image_meta['min_ram'])
self.assertEqual(7, image_meta['min_disk'])
def test_suppress_not_image_properties(self):
properties = {
'min_ram': '256', 'min_disk': '128', 'image_id': 'fake_id',
'image_name': 'fake_name', 'container_format': 'ami',
'disk_format': 'ami', 'size': '1234', 'checksum': 'fake_checksum',
}
volume = {'volume_image_metadata': properties}
image_meta = block_device.get_image_metadata_from_volume(volume)
self.assertEqual({}, image_meta['properties'])
self.assertEqual(0, image_meta['size'])
# volume's properties should not be touched
self.assertNotEqual({}, properties)
| apache-2.0 |
cdegroc/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 52 | 8004 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString
from docscrape import FunctionDoc
from docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
bixbydev/Bixby | google/gdata-2.0.18/build/lib.linux-x86_64-2.7/gdata/tlslite/utils/codec.py | 361 | 2771 | """Classes for reading/writing binary data (such as TLS records)."""
from compat import *
class Writer:
def __init__(self, length=0):
#If length is zero, then this is just a "trial run" to determine length
self.index = 0
self.bytes = createByteArrayZeros(length)
def add(self, x, length):
if self.bytes:
newIndex = self.index+length-1
while newIndex >= self.index:
self.bytes[newIndex] = x & 0xFF
x >>= 8
newIndex -= 1
self.index += length
def addFixSeq(self, seq, length):
if self.bytes:
for e in seq:
self.add(e, length)
else:
self.index += len(seq)*length
def addVarSeq(self, seq, length, lengthLength):
if self.bytes:
self.add(len(seq)*length, lengthLength)
for e in seq:
self.add(e, length)
else:
self.index += lengthLength + (len(seq)*length)
class Parser:
def __init__(self, bytes):
self.bytes = bytes
self.index = 0
def get(self, length):
if self.index + length > len(self.bytes):
raise SyntaxError()
x = 0
for count in range(length):
x <<= 8
x |= self.bytes[self.index]
self.index += 1
return x
def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes
return bytes
def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList):
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength)
if lengthList % length != 0:
raise SyntaxError()
lengthList = int(lengthList/length)
l = [0] * lengthList
for x in range(lengthList):
l[x] = self.get(length)
return l
def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index
def setLengthCheck(self, length):
self.lengthCheck = length
self.indexCheck = self.index
def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck:
raise SyntaxError()
def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck:
return False
elif (self.index - self.indexCheck) == self.lengthCheck:
return True
else:
raise SyntaxError() | gpl-3.0 |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/structured/labs/serving/application/lib/googleapiclient/http.py | 16 | 57329 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
from __future__ import absolute_import
import six
from six.moves import http_client
from six.moves import range
__author__ = '[email protected] (Joe Gregorio)'
from six import BytesIO, StringIO
from six.moves.urllib.parse import urlparse, urlunparse, quote, unquote
import base64
import copy
import gzip
import httplib2
import json
import logging
import mimetypes
import os
import random
import socket
import sys
import time
import uuid
# TODO(issue 221): Remove this conditional import jibbajabba.
try:
import ssl
except ImportError:
_ssl_SSLError = object()
else:
_ssl_SSLError = ssl.SSLError
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
# Oauth2client < 3 has the positional helper in 'util', >= 3 has it
# in '_helpers'.
try:
from oauth2client import util
except ImportError:
from oauth2client import _helpers as util
from googleapiclient import mimeparse
from googleapiclient.errors import BatchError
from googleapiclient.errors import HttpError
from googleapiclient.errors import InvalidChunkSizeError
from googleapiclient.errors import ResumableUploadError
from googleapiclient.errors import UnexpectedBodyError
from googleapiclient.errors import UnexpectedMethodError
from googleapiclient.model import JsonModel
LOGGER = logging.getLogger(__name__)
DEFAULT_CHUNK_SIZE = 512*1024
MAX_URI_LENGTH = 2048
_TOO_MANY_REQUESTS = 429
DEFAULT_HTTP_TIMEOUT_SEC = 60
def _should_retry_response(resp_status, content):
"""Determines whether a response should be retried.
Args:
resp_status: The response status received.
content: The response content body.
Returns:
True if the response should be retried, otherwise False.
"""
# Retry on 5xx errors.
if resp_status >= 500:
return True
# Retry on 429 errors.
if resp_status == _TOO_MANY_REQUESTS:
return True
# For 403 errors, we have to check for the `reason` in the response to
# determine if we should retry.
if resp_status == six.moves.http_client.FORBIDDEN:
# If there's no details about the 403 type, don't retry.
if not content:
return False
# Content is in JSON format.
try:
data = json.loads(content.decode('utf-8'))
reason = data['error']['errors'][0]['reason']
except (UnicodeDecodeError, ValueError, KeyError):
LOGGER.warning('Invalid JSON content from response: %s', content)
return False
LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason)
# Only retry on rate limit related failures.
if reason in ('userRateLimitExceeded', 'rateLimitExceeded', ):
return True
# Everything else is a success or non-retriable so break.
return False
def _retry_request(http, num_retries, req_type, sleep, rand, uri, method, *args,
**kwargs):
"""Retries an HTTP request multiple times while handling errors.
If after all retries the request still fails, last error is either returned as
return value (for HTTP 5xx errors) or thrown (for ssl.SSLError).
Args:
http: Http object to be used to execute request.
num_retries: Maximum number of retries.
req_type: Type of the request (used for logging retries).
sleep, rand: Functions to sleep for random time between retries.
uri: URI to be requested.
method: HTTP method to be used.
args, kwargs: Additional arguments passed to http.request.
Returns:
resp, content - Response from the http request (may be HTTP 5xx).
"""
resp = None
content = None
for retry_num in range(num_retries + 1):
if retry_num > 0:
# Sleep before retrying.
sleep_time = rand() * 2 ** retry_num
LOGGER.warning(
'Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s',
sleep_time, retry_num, num_retries, req_type, method, uri,
resp.status if resp else exception)
sleep(sleep_time)
try:
exception = None
resp, content = http.request(uri, method, *args, **kwargs)
# Retry on SSL errors and socket timeout errors.
except _ssl_SSLError as ssl_error:
exception = ssl_error
except socket.error as socket_error:
# errno's contents differ by platform, so we have to match by name.
if socket.errno.errorcode.get(socket_error.errno) not in (
'WSAETIMEDOUT', 'ETIMEDOUT', 'EPIPE', 'ECONNABORTED', ):
raise
exception = socket_error
if exception:
if retry_num == num_retries:
raise exception
else:
continue
if not _should_retry_response(resp.status, content):
break
return resp, content
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when uploading a media object. It is important to keep the size of the chunk
as large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
Streams are io.Base compatible objects that support seek(). Some MediaUpload
subclasses support using streams directly to upload data. Support for
streaming may be indicated by a MediaUpload sub-class and if appropriate for a
platform that stream will be used for uploading the media object. The support
for streaming is indicated by has_stream() returning True. The stream() method
should return an io.Base object that supports seek(). On platforms where the
underlying httplib module supports streaming, for example Python 2.6 and
later, the stream will be passed into the http library which will result in
less memory being used and possibly faster uploads.
If you need to upload media that can't be uploaded using any of the existing
MediaUpload sub-class then you can sub-class MediaUpload for your particular
needs.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def has_stream(self):
"""Does the underlying upload support a streaming interface.
Streaming means it is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
Returns:
True if the call to stream() will return an instance of a seekable io.Base
subclass.
"""
return False
def stream(self):
"""A stream interface to the data being uploaded.
Returns:
The returned value is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
"""
raise NotImplementedError()
@util.positional(1)
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return json.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = json.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
Depending on the platform you are working on, you may pass -1 as the
chunksize, which indicates that the entire file should be uploaded in a single
request. If the underlying platform supports streams, such as Python 2.6 or
later, then this can be very efficient as it avoids multiple connections, and
also avoids loading the entire file into memory before sending it. Note that
Google App Engine has a 5MB limit on request size, so you should never set
your chunksize larger than 5MB, or to -1.
"""
@util.positional(3)
def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fd: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
The given stream must be seekable, that is, it must be able to call
seek() on fd.
mimetype: string, Mime-type of the file.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded as a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunksize larger than 5MB,
or to -1.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
super(MediaIoBaseUpload, self).__init__()
self._fd = fd
self._mimetype = mimetype
if not (chunksize == -1 or chunksize > 0):
raise InvalidChunkSizeError()
self._chunksize = chunksize
self._resumable = resumable
self._fd.seek(0, os.SEEK_END)
self._size = self._fd.tell()
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fd.seek(begin)
return self._fd.read(length)
def has_stream(self):
"""Does the underlying upload support a streaming interface.
Streaming means it is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
Returns:
True if the call to stream() will return an instance of a seekable io.Base
subclass.
"""
return True
def stream(self):
"""A stream interface to the data being uploaded.
Returns:
The returned value is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
"""
return self._fd
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaFileUpload(MediaIoBaseUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
Depending on the platform you are working on, you may pass -1 as the
chunksize, which indicates that the entire file should be uploaded in a single
request. If the underlying platform supports streams, such as Python 2.6 or
later, then this can be very efficient as it avoids multiple connections, and
also avoids loading the entire file into memory before sending it. Note that
Google App Engine has a 5MB limit on request size, so you should never set
your chunksize larger than 5MB, or to -1.
"""
@util.positional(2)
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded in a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunksize larger than 5MB,
or to -1.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
fd = open(self._filename, 'rb')
if mimetype is None:
# No mimetype provided, make a guess.
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
# Guess failed, use octet-stream.
mimetype = 'application/octet-stream'
super(MediaFileUpload, self).__init__(fd, mimetype, chunksize=chunksize,
resumable=resumable)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(strip=['_fd'])
@staticmethod
def from_json(s):
d = json.loads(s)
return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'],
chunksize=d['_chunksize'], resumable=d['_resumable'])
class MediaInMemoryUpload(MediaIoBaseUpload):
"""MediaUpload for a chunk of bytes.
DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
the stream.
"""
@util.positional(2)
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaInMemoryUpload.
DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
the stream.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
fd = BytesIO(body)
super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize,
resumable=resumable)
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
@util.positional(3)
def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fd: io.Base or file object, The stream in which to write the downloaded
bytes.
request: googleapiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self._fd = fd
self._request = request
self._uri = request.uri
self._chunksize = chunksize
self._progress = 0
self._total_size = None
self._done = False
# Stubs for testing.
self._sleep = time.sleep
self._rand = random.random
@util.positional(1)
def next_chunk(self, num_retries=0):
"""Get the next chunk of the download.
Args:
num_retries: Integer, number of times to retry with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
googleapiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
resp, content = _retry_request(
http, num_retries, 'media download', self._sleep, self._rand, self._uri,
'GET', headers=headers)
if resp.status in [200, 206]:
if 'content-location' in resp and resp['content-location'] != self._uri:
self._uri = resp['content-location']
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
elif 'content-length' in resp:
self._total_size = int(resp['content-length'])
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, uri=self._uri)
class _StreamSlice(object):
"""Truncated stream.
Takes a stream and presents a stream that is a slice of the original stream.
This is used when uploading media in chunks. In later versions of Python a
stream can be passed to httplib in place of the string of data to send. The
problem is that httplib just blindly reads to the end of the stream. This
wrapper presents a virtual stream that only reads to the end of the chunk.
"""
def __init__(self, stream, begin, chunksize):
"""Constructor.
Args:
stream: (io.Base, file object), the stream to wrap.
begin: int, the seek position the chunk begins at.
chunksize: int, the size of the chunk.
"""
self._stream = stream
self._begin = begin
self._chunksize = chunksize
self._stream.seek(begin)
def read(self, n=-1):
"""Read n bytes.
Args:
n, int, the number of bytes to read.
Returns:
A string of length 'n', or less if EOF is reached.
"""
# The data left available to read sits in [cur, end)
cur = self._stream.tell()
end = self._begin + self._chunksize
if n == -1 or cur + n > end:
n = end - cur
return self._stream.read(n)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
@util.positional(4)
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self.response_callbacks = []
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
self.headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
# Stubs for testing.
self._rand = random.random
self._sleep = time.sleep
@util.positional(1)
def execute(self, http=None, num_retries=0):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
googleapiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http=http, num_retries=num_retries)
return body
# Non-resumable case.
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
# If the request URI is too long then turn it into a POST request.
if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
self.method = 'POST'
self.headers['x-http-method-override'] = 'GET'
self.headers['content-type'] = 'application/x-www-form-urlencoded'
parsed = urlparse(self.uri)
self.uri = urlunparse(
(parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
None)
)
self.body = parsed.query
self.headers['content-length'] = str(len(self.body))
# Handle retries for server-side errors.
resp, content = _retry_request(
http, num_retries, 'request', self._sleep, self._rand, str(self.uri),
method=str(self.method), body=self.body, headers=self.headers)
for callback in self.response_callbacks:
callback(resp)
if resp.status >= 300:
raise HttpError(resp, content, uri=self.uri)
return self.postproc(resp, content)
@util.positional(2)
def add_response_callback(self, cb):
"""add_response_headers_callback
Args:
cb: Callback to be called on receiving the response headers, of signature:
def cb(resp):
# Where resp is an instance of httplib2.Response
"""
self.response_callbacks.append(cb)
@util.positional(1)
def next_chunk(self, http=None, num_retries=0):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
googleapiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
resp, content = _retry_request(
http, num_retries, 'resumable URI request', self._sleep, self._rand,
self.uri, method=self.method, body=self.body, headers=start_headers)
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError(resp, content)
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
if self.resumable.has_stream():
data = self.resumable.stream()
if self.resumable.chunksize() == -1:
data.seek(self.resumable_progress)
chunk_end = self.resumable.size() - self.resumable_progress - 1
else:
# Doing chunking with a stream, so wrap a slice of the stream.
data = _StreamSlice(data, self.resumable_progress,
self.resumable.chunksize())
chunk_end = min(
self.resumable_progress + self.resumable.chunksize() - 1,
self.resumable.size() - 1)
else:
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
chunk_end = self.resumable_progress + len(data) - 1
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, chunk_end, size),
# Must set the content-length header here because httplib can't
# calculate the size when working with _StreamSlice.
'Content-Length': str(chunk_end - self.resumable_progress + 1)
}
for retry_num in range(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
LOGGER.warning(
'Retry #%d for media upload: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
try:
resp, content = http.request(self.resumable_uri, method='PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
if not _should_retry_response(resp.status, content):
break
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
googleapiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
try:
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
except KeyError:
# If resp doesn't contain range header, resumable progress is 0
self.resumable_progress = 0
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, uri=self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
del d['_sleep']
del d['_rand']
return json.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = json.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from googleapiclient.http import BatchHttpRequest
def list_animals(request_id, response, exception):
\"\"\"Do something with the animals list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
def list_farmers(request_id, response, exception):
\"\"\"Do something with the farmers list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http=http)
"""
@util.positional(1)
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an googleapiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no error occurred.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (httplib2.Response, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse(request.uri)
request_line = urlunparse(
('', '', parsed.path, parsed.params, parsed.query, '')
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in six.iteritems(headers):
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
return status_line + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content), such as would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
@util.positional(2)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an googleapiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors occurred.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
googleapiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
# encode the body: note that we can't use `as_string`, because
# it plays games with `From ` lines.
fp = StringIO()
g = Generator(fp, mangle_from_=False)
g.flatten(message, unixfrom=False)
body = fp.getvalue()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, method='POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, uri=self._batch_uri)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
# PY3's FeedParser only accepts unicode. So we should decode content
# here, and encode each payload again.
if six.PY3:
content = content.decode('utf-8')
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp=resp,
content=content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
response, content = self._deserialize_response(part.get_payload())
# We encode content here to emulate normal http response.
if isinstance(content, six.text_type):
content = content.encode('utf-8')
self._responses[request_id] = (response, content)
@util.positional(1)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.HttpLib2Error if a transport error has occured.
googleapiclient.errors.BatchError if the response is the wrong format.
"""
# If we have no requests return
if len(self._order) == 0:
return None
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
# Special case for OAuth2Credentials-style objects which have not yet been
# refreshed with an initial access_token.
if getattr(http.request, 'credentials', None) is not None:
creds = http.request.credentials
if not getattr(creds, 'access_token', None):
LOGGER.info('Attempting refresh to obtain initial access_token')
creds.refresh(http)
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
resp, content = self._responses[request_id]
if resp['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
resp, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
if resp.status >= 300:
raise HttpError(resp, content, uri=request.uri)
response = request.postproc(resp, content)
except HttpError as e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
googleapiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = json.loads(expected_body)
body = json.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId=methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename=None, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200'}
if filename:
f = open(filename, 'rb')
self.data = f.read()
f.close()
else:
self.data = None
self.response_headers = headers
self.headers = None
self.uri = None
self.method = None
self.body = None
self.headers = None
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
self.uri = uri
self.method = method
self.body = body
self.headers = headers
return httplib2.Response(self.response_headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = json.dumps(headers)
elif content == 'echo_request_body':
if hasattr(body, 'read'):
content = body.read()
else:
content = body
elif content == 'echo_request_uri':
content = uri
if isinstance(content, six.text_type):
content = content.encode('utf-8')
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
LOGGER.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def build_http():
"""Builds httplib2.Http object
Returns:
A httplib2.Http object, which is used to make http requests, and which has timeout set by default.
To override default timeout call
socket.setdefaulttimeout(timeout_in_sec)
before interacting with this method.
"""
if socket.getdefaulttimeout() is not None:
http_timeout = socket.getdefaulttimeout()
else:
http_timeout = DEFAULT_HTTP_TIMEOUT_SEC
return httplib2.Http(timeout=http_timeout)
| apache-2.0 |
cvegaj/ElectriCERT | venv3/lib/python3.6/site-packages/bitcoin/core/serialize.py | 2 | 10477 | # Copyright (C) 2012-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Serialization routines
You probabably don't need to use these directly.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import struct
# Py3 compatibility
import sys
if sys.version > '3':
_bchr = lambda x: bytes([x])
_bord = lambda x: x[0]
from io import BytesIO as _BytesIO
else:
_bchr = chr
_bord = ord
from cStringIO import StringIO as _BytesIO
MAX_SIZE = 0x02000000
def Hash(msg):
"""SHA256^2)(msg) -> bytes"""
return hashlib.sha256(hashlib.sha256(msg).digest()).digest()
def Hash160(msg):
"""RIPEME160(SHA256(msg)) -> bytes"""
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(msg).digest())
return h.digest()
class SerializationError(Exception):
"""Base class for serialization errors"""
class SerializationTruncationError(SerializationError):
"""Serialized data was truncated
Thrown by deserialize() and stream_deserialize()
"""
class DeserializationExtraDataError(SerializationError):
"""Deserialized data had extra data at the end
Thrown by deserialize() when not all data is consumed during
deserialization. The deserialized object and extra padding not consumed are
saved.
"""
def __init__(self, msg, obj, padding):
super(DeserializationExtraDataError, self).__init__(msg)
self.obj = obj
self.padding = padding
def ser_read(f, n):
"""Read from a stream safely
Raises SerializationError and SerializationTruncationError appropriately.
Use this instead of f.read() in your classes stream_(de)serialization()
functions.
"""
if n > MAX_SIZE:
raise SerializationError('Asked to read 0x%x bytes; MAX_SIZE exceeded' % n)
r = f.read(n)
if len(r) < n:
raise SerializationTruncationError('Asked to read %i bytes, but only got %i' % (n, len(r)))
return r
class Serializable(object):
"""Base class for serializable objects"""
__slots__ = []
def stream_serialize(self, f):
"""Serialize to a stream"""
raise NotImplementedError
@classmethod
def stream_deserialize(cls, f):
"""Deserialize from a stream"""
raise NotImplementedError
def serialize(self):
"""Serialize, returning bytes"""
f = _BytesIO()
self.stream_serialize(f)
return f.getvalue()
@classmethod
def deserialize(cls, buf, allow_padding=False):
"""Deserialize bytes, returning an instance
allow_padding - Allow buf to include extra padding. (default False)
If allow_padding is False and not all bytes are consumed during
deserialization DeserializationExtraDataError will be raised.
"""
fd = _BytesIO(buf)
r = cls.stream_deserialize(fd)
if not allow_padding:
padding = fd.read()
if len(padding) != 0:
raise DeserializationExtraDataError('Not all bytes consumed during deserialization',
r, padding)
return r
def GetHash(self):
"""Return the hash of the serialized object"""
return Hash(self.serialize())
def __eq__(self, other):
if (not isinstance(other, self.__class__) and
not isinstance(self, other.__class__)):
return NotImplemented
return self.serialize() == other.serialize()
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.serialize())
class ImmutableSerializable(Serializable):
"""Immutable serializable object"""
__slots__ = ['_cached_GetHash', '_cached__hash__']
def __setattr__(self, name, value):
raise AttributeError('Object is immutable')
def __delattr__(self, name):
raise AttributeError('Object is immutable')
def GetHash(self):
"""Return the hash of the serialized object"""
try:
return self._cached_GetHash
except AttributeError:
_cached_GetHash = super(ImmutableSerializable, self).GetHash()
object.__setattr__(self, '_cached_GetHash', _cached_GetHash)
return _cached_GetHash
def __hash__(self):
try:
return self._cached__hash__
except AttributeError:
_cached__hash__ = hash(self.serialize())
object.__setattr__(self, '_cached__hash__', _cached__hash__)
return _cached__hash__
class Serializer(object):
"""Base class for object serializers"""
def __new__(cls):
raise NotImplementedError
@classmethod
def stream_serialize(cls, obj, f):
raise NotImplementedError
@classmethod
def stream_deserialize(cls, f):
raise NotImplementedError
@classmethod
def serialize(cls, obj):
f = _BytesIO()
cls.stream_serialize(obj, f)
return f.getvalue()
@classmethod
def deserialize(cls, buf):
if isinstance(buf, str) or isinstance(buf, bytes):
buf = _BytesIO(buf)
return cls.stream_deserialize(buf)
class VarIntSerializer(Serializer):
"""Serialization of variable length ints"""
@classmethod
def stream_serialize(cls, i, f):
if i < 0:
raise ValueError('varint must be non-negative integer')
elif i < 0xfd:
f.write(_bchr(i))
elif i <= 0xffff:
f.write(_bchr(0xfd))
f.write(struct.pack(b'<H', i))
elif i <= 0xffffffff:
f.write(_bchr(0xfe))
f.write(struct.pack(b'<I', i))
else:
f.write(_bchr(0xff))
f.write(struct.pack(b'<Q', i))
@classmethod
def stream_deserialize(cls, f):
r = _bord(ser_read(f, 1))
if r < 0xfd:
return r
elif r == 0xfd:
return struct.unpack(b'<H', ser_read(f, 2))[0]
elif r == 0xfe:
return struct.unpack(b'<I', ser_read(f, 4))[0]
else:
return struct.unpack(b'<Q', ser_read(f, 8))[0]
class BytesSerializer(Serializer):
"""Serialization of bytes instances"""
@classmethod
def stream_serialize(cls, b, f):
VarIntSerializer.stream_serialize(len(b), f)
f.write(b)
@classmethod
def stream_deserialize(cls, f):
l = VarIntSerializer.stream_deserialize(f)
return ser_read(f, l)
class VectorSerializer(Serializer):
"""Base class for serializers of object vectors"""
@classmethod
def stream_serialize(cls, inner_cls, objs, f):
VarIntSerializer.stream_serialize(len(objs), f)
for obj in objs:
inner_cls.stream_serialize(obj, f)
@classmethod
def stream_deserialize(cls, inner_cls, f):
n = VarIntSerializer.stream_deserialize(f)
r = []
for i in range(n):
r.append(inner_cls.stream_deserialize(f))
return r
class uint256VectorSerializer(Serializer):
"""Serialize vectors of uint256"""
@classmethod
def stream_serialize(cls, uints, f):
VarIntSerializer.stream_serialize(len(uints), f)
for uint in uints:
assert len(uint) == 32
f.write(uint)
@classmethod
def stream_deserialize(cls, f):
n = VarIntSerializer.stream_deserialize(f)
r = []
for i in range(n):
r.append(ser_read(f, 32))
return r
class intVectorSerializer(Serializer):
@classmethod
def stream_serialize(cls, ints, f):
l = len(ints)
VarIntSerializer.stream_serialize(l, f)
for i in ints:
f.write(struct.pack(b"<i", i))
@classmethod
def stream_deserialize(cls, f):
l = VarIntSerializer.stream_deserialize(f)
ints = []
for i in range(l):
ints.append(struct.unpack(b"<i", ser_read(f, 4))[0])
return ints
class VarStringSerializer(Serializer):
"""Serialize variable length strings"""
@classmethod
def stream_serialize(cls, s, f):
l = len(s)
VarIntSerializer.stream_serialize(l, f)
f.write(s)
@classmethod
def stream_deserialize(cls, f):
l = VarIntSerializer.stream_deserialize(f)
return ser_read(f, l)
def uint256_from_str(s):
"""Convert bytes to uint256"""
r = 0
t = struct.unpack(b"<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
"""Convert compact encoding to uint256
Used for the nBits compact encoding of the target in the block header.
"""
nbytes = (c >> 24) & 0xFF
if nbytes <= 3:
v = (c & 0xFFFFFF) >> 8 * (3 - nbytes)
else:
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def compact_from_uint256(v):
"""Convert uint256 to compact encoding
"""
nbytes = (v.bit_length() + 7) >> 3
compact = 0
if nbytes <= 3:
compact = (v & 0xFFFFFF) << 8 * (3 - nbytes)
else:
compact = v >> 8 * (nbytes - 3)
compact = compact & 0xFFFFFF
# If the sign bit (0x00800000) is set, divide the mantissa by 256 and
# increase the exponent to get an encoding without it set.
if compact & 0x00800000:
compact >>= 8
nbytes += 1
return compact | nbytes << 24
def uint256_to_str(u):
r = b""
for i in range(8):
r += struct.pack('<I', u >> (i * 32) & 0xffffffff)
return r
def uint256_to_shortstr(u):
s = "%064x" % (u,)
return s[:16]
__all__ = (
'MAX_SIZE',
'Hash',
'Hash160',
'SerializationError',
'SerializationTruncationError',
'DeserializationExtraDataError',
'ser_read',
'Serializable',
'ImmutableSerializable',
'Serializer',
'VarIntSerializer',
'BytesSerializer',
'VectorSerializer',
'uint256VectorSerializer',
'intVectorSerializer',
'VarStringSerializer',
'uint256_from_str',
'uint256_from_compact',
'compact_from_uint256',
'uint256_to_str',
'uint256_to_shortstr',
)
| gpl-3.0 |
CforED/Machine-Learning | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
xiaoyaozi5566/DiamondCache | tests/configs/realview-o3-checker.py | 14 | 3718 | # Copyright (c) 2011 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Geoffrey Blake
import m5
from m5.objects import *
m5.util.addToPath('../configs/common')
import FSConfig
# --------------------
# Base L1 Cache
# ====================
class L1(BaseCache):
latency = '1ns'
block_size = 64
mshrs = 4
tgts_per_mshr = 20
is_top_level = True
# ----------------------
# Base L2 Cache
# ----------------------
class L2(BaseCache):
block_size = 64
latency = '10ns'
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
# ---------------------
# I/O Cache
# ---------------------
class IOCache(BaseCache):
assoc = 8
block_size = 64
latency = '50ns'
mshrs = 20
size = '1kB'
tgts_per_mshr = 12
addr_ranges = [AddrRange(0, size='256MB')]
forward_snoops = False
#cpu
cpu = DerivO3CPU(cpu_id=0)
#the system
system = FSConfig.makeArmSystem('timing', "RealView_PBX", None, False)
system.cpu = cpu
#create the l1/l2 bus
system.toL2Bus = CoherentBus()
system.iocache = IOCache()
system.iocache.cpu_side = system.iobus.master
system.iocache.mem_side = system.membus.slave
#connect up the l2 cache
system.l2c = L2(size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.master
system.l2c.mem_side = system.membus.slave
#connect up the checker
cpu.addCheckerCpu()
#connect up the cpu and l1s
cpu.createInterruptController()
cpu.addPrivateSplitL1Caches(L1(size = '32kB', assoc = 1),
L1(size = '32kB', assoc = 4))
# connect cpu level-1 caches to shared level-2 cache
cpu.connectAllPorts(system.toL2Bus, system.membus)
cpu.clock = '2GHz'
root = Root(full_system=True, system=system)
m5.ticks.setGlobalFrequency('1THz')
| bsd-3-clause |
bblacey/FreeCAD-MacOS-CI | src/Mod/Inspection/Init.py | 58 | 1873 | # FreeCAD init script of the Inspection module
# (c) 2001 Juergen Riegel
#***************************************************************************
#* (c) Juergen Riegel ([email protected]) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
| lgpl-2.1 |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/opus_gui/results_manager/run/indicator_framework/representations/computed_indicator.py | 2 | 3583 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable_name import VariableName
import os
from time import strftime, localtime, time
from opus_gui.results_manager.run.indicator_framework.utilities.indicator_data_manager import IndicatorDataManager
from copy import copy
class ComputedIndicator:
def __init__(self,
indicator,
source_data,
dataset_name,
primary_keys):
self.indicator = indicator
self.source_data = source_data
cache_directory = self.source_data.cache_directory
self.storage_location = os.path.join(cache_directory, 'indicators')
self.date_computed = strftime("%Y-%m-%d %H:%M:%S", localtime(time()))
self.computed_dataset_column_name = self.get_attribute_alias()
self.dataset_name = dataset_name
self.primary_keys = primary_keys
def get_attribute_alias(self, year = None):
return self.indicator.get_attribute_alias(year)
def get_file_extension(self):
return 'csv'
def get_file_name(self, years = None,
extension = 'csv',
suppress_extension_addition = False):
short_name = self.indicator.name
file_name = '%s__%s'%(self.indicator.dataset_name,
short_name)
if years is not None:
file_name += '__%s'%('-'.join([str(year) for year in years]))
if not suppress_extension_addition:
if extension == None:
extension = self.get_file_extension()
file_name += '.%s'%extension
return file_name
def get_file_path(self, years = None):
file_name = self.get_file_name(years)
return os.path.join(self.storage_location, file_name)
def get_computed_dataset_column_name(self, year = None):
name = self.computed_dataset_column_name
if year is not None:
if name.find('DDDD') == -1:
name = '%s_%i'%(name, year)
else:
name = name.replace('DDDD', repr(year))
return name
# def export(self):
# data_manager = IndicatorDataManager()
# data_manager.export_indicator(
# indicator = self,
# source_data = self.source_data)
from opus_core.tests import opus_unittest
from opus_gui.results_manager.run.indicator_framework.representations.indicator import Indicator
from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
class ComputedIndicatorTests(AbstractIndicatorTest):
def test__get_indicator_path(self):
indicator = Indicator(
attribute = 'opus_core.test.population',
dataset_name = 'test')
computed_indicator = ComputedIndicator(
source_data = self.source_data,
indicator = indicator,
dataset_name = 'test',
primary_keys = ['id']
)
returned_path = computed_indicator.get_file_name()
expected_path = 'test__population.csv'
self.assertEqual(returned_path, expected_path)
if __name__ == '__main__':
opus_unittest.main()
| gpl-2.0 |
pixelgremlins/ztruck | dj/lib/python2.7/site-packages/django/utils/synch.py | 586 | 2558 | """
Synchronization primitives:
- reader-writer lock (preference to writers)
(Contributed to Django by [email protected])
"""
import contextlib
import threading
class RWLock(object):
"""
Classic implementation of reader-writer lock with preference to writers.
Readers can access a resource simultaneously.
Writers get an exclusive access.
API is self-descriptive:
reader_enters()
reader_leaves()
writer_enters()
writer_leaves()
"""
def __init__(self):
self.mutex = threading.RLock()
self.can_read = threading.Semaphore(0)
self.can_write = threading.Semaphore(0)
self.active_readers = 0
self.active_writers = 0
self.waiting_readers = 0
self.waiting_writers = 0
def reader_enters(self):
with self.mutex:
if self.active_writers == 0 and self.waiting_writers == 0:
self.active_readers += 1
self.can_read.release()
else:
self.waiting_readers += 1
self.can_read.acquire()
def reader_leaves(self):
with self.mutex:
self.active_readers -= 1
if self.active_readers == 0 and self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
@contextlib.contextmanager
def reader(self):
self.reader_enters()
try:
yield
finally:
self.reader_leaves()
def writer_enters(self):
with self.mutex:
if self.active_writers == 0 and self.waiting_writers == 0 and self.active_readers == 0:
self.active_writers += 1
self.can_write.release()
else:
self.waiting_writers += 1
self.can_write.acquire()
def writer_leaves(self):
with self.mutex:
self.active_writers -= 1
if self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
elif self.waiting_readers != 0:
t = self.waiting_readers
self.waiting_readers = 0
self.active_readers += t
while t > 0:
self.can_read.release()
t -= 1
@contextlib.contextmanager
def writer(self):
self.writer_enters()
try:
yield
finally:
self.writer_leaves()
| apache-2.0 |
rovere/utilities | stringAtom.py | 1 | 3458 | # Simple patricia tree implementation for a ultra-fast strings.
# Maintains a patricia tree of strings; an individual string is
# just an index into this table.
class Node:
def __init__(self):
self.s = " "
self.maxbit = 0
self.bit = 0
self.kids = []
self.kids.append(0)
self.kids.append(0)
class StringAtomTree:
BITS = 1
NODES = 1 << BITS
CHAR_BIT = 8
def __init__(self):
self.tree_ = []
self.tree_.append(Node())
def bits(self, p, maxbit, start, n):
if start > maxbit:
return 0
return (ord(p[start/StringAtomTree.CHAR_BIT]) \
>> (StringAtomTree.CHAR_BIT-start%StringAtomTree.CHAR_BIT-n)) \
& ~(~0 << n)
def clear(self):
while(len(self.tree_)):
self.tree_.pop()
self.tree_.append(Node())
def insert(self, s):
maxbit = len(s) * StringAtomTree.CHAR_BIT - 1
t = self.tree_[0].kids[0]
p = 0
i = 0
while (self.tree_[p].bit < self.tree_[t].bit):
kid = self.bits(s, maxbit, self.tree_[t].bit, StringAtomTree.BITS)
p = t
t = self.tree_[t].kids[kid]
if s == self.tree_[t].s:
return t
while (self.bits(self.tree_[t].s, self.tree_[t].maxbit, i, 1) == self.bits(s, maxbit, i, 1)):
i += 1
p = 0
x = self.tree_[0].kids[0]
while self.tree_[p].bit < self.tree_[x].bit and (self.tree_[x].bit < i):
kid = self.bits(s, maxbit, self.tree_[x].bit, StringAtomTree.BITS)
p = x
x = self.tree_[x].kids[kid]
ibit = self.bits(s, maxbit, i, 1)
self.tree_.append(Node())
t = len(self.tree_)-1
n = self.tree_[-1]
n.s = s
n.bit = i
n.maxbit = maxbit
n.kids[0] = x if ibit else t
n.kids[1] = t if ibit else x
self.tree_[p].kids[self.bits(s, maxbit, self.tree_[p].bit, 1)] = t
return t
def search(self, s):
maxbit = len(s) * StringAtomTree.CHAR_BIT - 1;
p = 0
x = self.tree_[0].kids[0]
while self.tree_[p].bit < self.tree_[x].bit:
kid = self.bits(s, maxbit, self.tree_[x].bit, StringAtomTree.BITS)
p = x
x = self.tree_[x].kids[kid]
if s == self.tree_[x].s:
return x
else:
return ~0
def key(self, index):
return self.tree_[index].s
def size(self):
return len(self.tree_)
def dump(self):
for n in range(0, len(self.tree_)):
t = self.tree_[n]
print "Node %d:\n string: %s\n bit: %d\n kid[0]: %d\n kid[1]: %d\n" %\
(n, t.s, t.bit, t.kids[0], t.kids[1])
class StringAtom:
TestOnlyTag = 0
def __init__(self):
self.tree_ = StringAtomTree()
self.index_ = 0
def __init__(self, tree, s, test):
self.tree_ = tree
if test == StringAtom.TestOnlyTag:
self.index_ = self.tree_.search(s)
else:
self.index_ = self.tree_.insert(s)
if self.index_ == ~0:
self.index_ = 0
def index(self):
return self.index_
def string(self):
assert tree_ !=0
assert index_ != ~0
return self.tree_.key(self.index_)
def size(self):
return len(self.string())
def test():
sat = StringAtomTree()
sat.insert("marco")
sat.insert("matilde")
sat.insert("cate")
sat.insert("stefano")
sat.insert("elena")
sat.dump()
if __name__ == "__main__":
test()
| mit |
jimlawton/pyagc-googlecode | tools/pagecounter.py | 10 | 3378 | #!/usr/bin/env python
# Copyright 2010 Jim lawton <jim dot lawton at gmail dot com>
#
# This file is part of yaAGC.
#
# yaAGC is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# yaAGC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with yaAGC; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Python script to check the page meta-comments in AGC source modules.
# Looks for all .agc files in the current directory, and searches them for '## Page'
# directives. It checks the directives to verify that there are no incorrect page numbers
# (missing, extra, duplicated, out of sequence).
#
# While the page numbers do not form part of the original AGC source, they are very important
# in the code conversion process, and in the debugging of errors in the rope binary files.
import sys
import glob
def main():
sfiles = glob.glob('*.agc')
if len(sfiles) == 0:
print >>sys.stderr, "Error, no AGC source files found!"
sys.exit(1)
errors = 0
for sfile in sfiles:
if sfile == "Template.agc":
continue
page = 0
linenum = 0
start = True
for line in open(sfile):
linenum += 1
sline = line.strip()
if not sline.startswith('#'):
continue
if not "Page" in sline or ("Page" in sline and ("scans" in sline or "Pages" in sline)):
continue
fields = sline
if sline.startswith('#Page'):
print >>sys.stderr, "%s, line %d: invalid page number \"%s\"" % (sfile, linenum, sline)
errors += 1
fields = sline[1:]
elif sline.startswith('# Page'):
fields = sline[2:]
else:
continue
try:
if fields[4] == ' ':
pagenum = fields.split()[1]
else:
pagenum = fields[4:]
print >>sys.stderr, "%s, line %d: invalid page number \"%s\"" % (sfile, linenum, sline)
except:
print "Error processing line: \"%s\"" % (sline)
raise
if pagenum.isdigit():
pagenum = int(pagenum)
if start:
page = pagenum
start = False
else:
page += 1
if page != pagenum:
print >>sys.stderr, "%s, line %d: page number mismatch, expected %d, got %d" % (sfile, linenum, page, pagenum)
errors += 1
else:
print >>sys.stderr, "%s, line %d: invalid page number \"%s\"" % (sfile, linenum, pagenum)
errors += 1
if errors != 0:
print >>sys.stderr, "%d errors found" % (errors)
else:
print "No errors found"
if __name__=="__main__":
sys.exit(main())
| gpl-2.0 |
Gabriel439/pip | pip/_vendor/cachecontrol/controller.py | 317 | 10124 | """
The httplib2 algorithms ported for use with requests.
"""
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(self, cache=None, cache_etags=True, serializer=None):
self.cache = cache or DictCache()
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
@classmethod
def _urlnorm(cls, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
@classmethod
def cache_url(cls, uri):
return cls._urlnorm(uri)
def parse_cache_control(self, headers):
"""
Parse the cache control headers returning a dictionary with values
for the different directives.
"""
retval = {}
cc_header = 'cache-control'
if 'Cache-Control' in headers:
cc_header = 'Cache-Control'
if cc_header in headers:
parts = headers[cc_header].split(',')
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)])
for part in parts if -1 != part.find("=")
]
parts_wo_args = [
(name.strip().lower(), 1)
for name in parts if -1 == name.find("=")
]
retval = dict(parts_with_args + parts_wo_args)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
cc = self.parse_cache_control(request.headers)
# non-caching states
no_cache = True if 'no-cache' in cc else False
if 'max-age' in cc and cc['max-age'] == 0:
no_cache = True
# Bail out if no-cache was set
if no_cache:
return False
# It is in the cache, so lets see if it is going to be
# fresh enough
resp = self.serializer.loads(request, self.cache.get(cache_url))
# Check to see if we have a cached object
if not resp:
return False
# If we have a cached 301, return it immediately. We don't
# need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if resp.status == 301:
return resp
headers = CaseInsensitiveDict(resp.headers)
if not headers or 'date' not in headers:
# With date or etag, the cached response can never be used
# and should be deleted.
if 'etag' not in headers:
self.cache.delete(cache_url)
return False
now = time.time()
date = calendar.timegm(
parsedate_tz(headers['date'])
)
current_age = max(0, now - date)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
freshness_lifetime = int(resp_cc['max-age'])
# If there isn't a max-age, check for an expires header
elif 'expires' in headers:
expires = parsedate_tz(headers['expires'])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
# determine if we are setting freshness limit in the req
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
# adjust our current age by our min fresh
current_age += min_fresh
# see how fresh we actually are
fresh = (freshness_lifetime > current_age)
if fresh:
return resp
# we're not fresh. If we don't have an Etag, clear it out
if 'etag' not in headers:
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if 'etag' in headers:
new_headers['If-None-Match'] = headers['ETag']
if 'last-modified' in headers:
new_headers['If-Modified-Since'] = headers['Last-Modified']
return new_headers
def cache_response(self, request, response, body=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
if response.status not in [200, 203, 300, 301]:
return
response_headers = CaseInsensitiveDict(response.headers)
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
# Delete it from the cache if we happen to have it stored there
no_store = cc.get('no-store') or cc_req.get('no-store')
if no_store and self.cache.get(cache_url):
self.cache.delete(cache_url)
# If we've been given an etag, then keep the response
if self.cache_etags and 'etag' in response_headers:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# Add to the cache any 301s. We do this before looking that
# the Date headers.
elif response.status == 301:
self.cache.set(
cache_url,
self.serializer.dumps(request, response)
)
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif 'date' in response_headers:
# cache when there is a max-age > 0
if cc and cc.get('max-age'):
if int(cc['max-age']) > 0:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# If the request can expire, it means we should cache it
# in the meantime.
elif 'expires' in response_headers:
if response_headers['expires']:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(
request,
self.cache.get(cache_url)
)
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = [
"content-length",
]
cached_response.headers.update(
dict((k, v) for k, v in response.headers.items()
if k.lower() not in excluded_headers)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(
cache_url,
self.serializer.dumps(request, cached_response),
)
return cached_response
| mit |
miminus/youtube-dl | youtube_dl/extractor/everyonesmixtape.py | 128 | 2872 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class EveryonesMixtapeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
_TESTS = [{
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
"info_dict": {
'id': '5bfseWNmlds',
'ext': 'mp4',
"title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
"uploader": "FKR.TV",
"uploader_id": "frenchkissrecords",
"description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
"upload_date": "20081015"
},
'params': {
'skip_download': True, # This is simply YouTube
}
}, {
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi',
'info_dict': {
'id': 'm7m0jJAbMQi',
'title': 'Driving',
},
'playlist_count': 24
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
pllist_req = compat_urllib_request.Request(pllist_url)
pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
playlist_list = self._download_json(
pllist_req, playlist_id, note='Downloading playlist metadata')
try:
playlist_no = next(playlist['id']
for playlist in playlist_list
if playlist['code'] == playlist_id)
except StopIteration:
raise ExtractorError('Playlist id not found')
pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
pl_req = compat_urllib_request.Request(pl_url)
pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
playlist = self._download_json(
pl_req, playlist_id, note='Downloading playlist info')
entries = [{
'_type': 'url',
'url': t['url'],
'title': t['title'],
} for t in playlist['tracks']]
if mobj.group('songnr'):
songnr = int(mobj.group('songnr')) - 1
return entries[songnr]
playlist_title = playlist['mixData']['name']
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_title,
'entries': entries,
}
| unlicense |
x111ong/odoo | openerp/addons/base/tests/test_ir_filters.py | 285 | 11000 | # -*- coding: utf-8 -*-
import functools
from openerp import exceptions
from openerp.tests import common
def noid(d):
""" Removes values that are not relevant for the test comparisons """
d.pop('id', None)
d.pop('action_id', None)
return d
class FiltersCase(common.TransactionCase):
def build(self, model, *args):
Model = self.registry(model)
for vars in args:
Model.create(self.cr, common.ADMIN_USER_ID, vars, {})
class TestGetFilters(FiltersCase):
def setUp(self):
super(TestGetFilters, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_own_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=self.USER_ID, model_id='ir.filters'))
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='b', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'),
dict(name='d', is_default=False, user_id=self.USER, domain='[]', context='{}'),
])
def test_global_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
dict(name='c', user_id=False, model_id='ir.filters'),
dict(name='d', user_id=False, model_id='ir.filters'),
)
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='b', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='d', is_default=False, user_id=False, domain='[]', context='{}'),
])
def test_no_third_party_filters(self):
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=common.ADMIN_USER_ID, model_id='ir.filters'),
dict(name='c', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='d', user_id=common.ADMIN_USER_ID, model_id='ir.filters') )
filters = self.registry('ir.filters').get_filters(
self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', is_default=False, user_id=False, domain='[]', context='{}'),
dict(name='c', is_default=False, user_id=self.USER, domain='[]', context='{}'),
])
class TestOwnDefaults(FiltersCase):
def setUp(self):
super(TestOwnDefaults, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_new_no_filter(self):
"""
When creating a @is_default filter with no existing filter, that new
filter gets the default flag
"""
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=True,
domain='[]', context='{}')
])
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, the flag should be *moved* from the old to the new filter
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=self.USER, is_default=True, domain='[]', context='{}'),
])
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag the flag should be moved
"""
self.build(
'ir.filters',
dict(name='a', user_id=self.USER_ID, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=self.USER_ID, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': self.USER_ID,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=self.USER, is_default=True, domain='[]', context='{}'),
dict(name='b', user_id=self.USER, is_default=False, domain='[]', context='{}'),
])
class TestGlobalDefaults(FiltersCase):
def setUp(self):
super(TestGlobalDefaults, self).setUp()
self.USER = self.registry('res.users').name_search(self.cr, self.uid, 'demo')[0]
self.USER_ID = self.USER[0]
def test_new_filter_not_default(self):
"""
When creating a @is_default filter with existing non-default filters,
the new filter gets the flag
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='c', user_id=False, is_default=True, domain='[]', context='{}'),
])
def test_new_filter_existing_default(self):
"""
When creating a @is_default filter where an existing filter is already
@is_default, an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
with self.assertRaises(exceptions.Warning):
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'c',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_filter_set_default(self):
"""
When updating an existing filter to @is_default, if an other filter
already has the flag an error should be generated
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
with self.assertRaises(exceptions.Warning):
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'a',
'model_id': 'ir.filters',
'user_id': False,
'is_default': True,
})
def test_update_default_filter(self):
"""
Replacing the current default global filter should not generate any error
"""
self.build(
'ir.filters',
dict(name='a', user_id=False, model_id='ir.filters'),
dict(name='b', is_default=True, user_id=False, model_id='ir.filters'),
)
Filters = self.registry('ir.filters')
context_value = "{'some_key': True}"
Filters.create_or_replace(self.cr, self.USER_ID, {
'name': 'b',
'model_id': 'ir.filters',
'user_id': False,
'context': context_value,
'is_default': True,
})
filters = Filters.get_filters(self.cr, self.USER_ID, 'ir.filters')
self.assertItemsEqual(map(noid, filters), [
dict(name='a', user_id=False, is_default=False, domain='[]', context='{}'),
dict(name='b', user_id=False, is_default=True, domain='[]', context=context_value),
])
| agpl-3.0 |
hitszxp/scikit-learn | sklearn/kernel_approximation.py | 12 | 16958 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://eprints.pascal-network.org/archive/00006964/01/vedaldi10.pdf>`_
Vedaldi, A. and Zisserman, A., Computer Vision and Pattern Recognition 2010
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
rnd = check_random_state(self.random_state)
if not sp.issparse(X):
X = np.asarray(X)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
freedomtan/workload-automation | wlauto/instrumentation/trace_cmd/__init__.py | 2 | 16185 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,E1101
from __future__ import division
import os
import time
import subprocess
from collections import defaultdict
from wlauto import Instrument, Parameter, Executable
from wlauto.exceptions import InstrumentError, ConfigError
from wlauto.core import signal
from wlauto.utils.types import boolean
OUTPUT_TRACE_FILE = 'trace.dat'
OUTPUT_TEXT_FILE = '{}.txt'.format(os.path.splitext(OUTPUT_TRACE_FILE)[0])
TIMEOUT = 180
class TraceCmdInstrument(Instrument):
name = 'trace-cmd'
description = """
trace-cmd is an instrument which interacts with Ftrace Linux kernel internal
tracer
From trace-cmd man page:
trace-cmd command interacts with the Ftrace tracer that is built inside the
Linux kernel. It interfaces with the Ftrace specific files found in the
debugfs file system under the tracing directory.
trace-cmd reads a list of events it will trace, which can be specified in
the config file as follows ::
trace_events = ['irq*', 'power*']
If no event is specified in the config file, trace-cmd traces the following events:
- sched*
- irq*
- power*
- cpufreq_interactive*
The list of available events can be obtained by rooting and running the following
command line on the device ::
trace-cmd list
You may also specify ``trace_buffer_size`` setting which must be an integer that will
be used to set the ftrace buffer size. It will be interpreted as KB::
trace_cmd_buffer_size = 8000
The maximum buffer size varies from device to device, but there is a maximum and trying
to set buffer size beyound that will fail. If you plan on collecting a lot of trace over
long periods of time, the buffer size will not be enough and you will only get trace for
the last portion of your run. To deal with this you can set the ``trace_mode`` setting to
``'record'`` (the default is ``'start'``)::
trace_cmd_mode = 'record'
This will cause trace-cmd to trace into file(s) on disk, rather than the buffer, and so the
limit for the max size of the trace is set by the storage available on device. Bear in mind
that ``'record'`` mode *is* more instrusive than the default, so if you do not plan on
generating a lot of trace, it is best to use the default ``'start'`` mode.
.. note:: Mode names correspend to the underlying trace-cmd exectuable's command used to
implement them. You can find out more about what is happening in each case from
trace-cmd documentation: https://lwn.net/Articles/341902/.
This instrument comes with an Android trace-cmd binary that will be copied and used on the
device, however post-processing will be done on-host and you must have trace-cmd installed and
in your path. On Ubuntu systems, this may be done with::
sudo apt-get install trace-cmd
"""
parameters = [
Parameter('events', kind=list, default=['sched*', 'irq*', 'power*', 'cpufreq_interactive*'],
global_alias='trace_events',
description="""
Specifies the list of events to be traced. Each event in the list will be passed to
trace-cmd with -e parameter and must be in the format accepted by trace-cmd.
"""),
Parameter('mode', default='start', allowed_values=['start', 'record'],
global_alias='trace_mode',
description="""
Trace can be collected using either 'start' or 'record' trace-cmd
commands. In 'start' mode, trace will be collected into the ftrace buffer;
in 'record' mode, trace will be written into a file on the device's file
system. 'start' mode is (in theory) less intrusive than 'record' mode, however
it is limited by the size of the ftrace buffer (which is configurable --
see ``buffer_size`` -- but only up to a point) and that may overflow
for long-running workloads, which will result in dropped events.
"""),
Parameter('buffer_size', kind=int, default=None,
global_alias='trace_buffer_size',
description="""
Attempt to set ftrace buffer size to the specified value (in KB). Default buffer size
may need to be increased for long-running workloads, or if a large number
of events have been enabled. Note: there is a maximum size that the buffer can
be set, and that varies from device to device. Attempting to set buffer size higher
than this will fail. In that case, this instrument will set the size to the highest
possible value by going down from the specified size in ``buffer_size_step`` intervals.
"""),
Parameter('buffer_size_step', kind=int, default=1000,
global_alias='trace_buffer_size_step',
description="""
Defines the decremental step used if the specified ``buffer_size`` could not be set.
This will be subtracted form the buffer size until set succeeds or size is reduced to
1MB.
"""),
Parameter('buffer_size_file', default='/sys/kernel/debug/tracing/buffer_size_kb',
description="""
Path to the debugs file that may be used to set ftrace buffer size. This should need
to be modified for the vast majority devices.
"""),
Parameter('report', kind=boolean, default=True,
description="""
Specifies whether host-side reporting should be performed once the binary trace has been
pulled form the device.
.. note:: This requires the latest version of trace-cmd to be installed on the host (the
one in your distribution's repos may be too old).
"""),
Parameter('no_install', kind=boolean, default=False,
description="""
Do not install the bundled trace-cmd and use the one on the device instead. If there is
not already a trace-cmd on the device, an error is raised.
"""),
]
def __init__(self, device, **kwargs):
super(TraceCmdInstrument, self).__init__(device, **kwargs)
self.trace_cmd = None
self.event_string = _build_trace_events(self.events)
self.output_file = os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)
self.temp_trace_file = self.device.path.join(self.device.working_directory, OUTPUT_TRACE_FILE)
def on_run_init(self, context):
if not self.device.is_rooted:
raise InstrumentError('trace-cmd instrument cannot be used on an unrooted device.')
if not self.no_install:
host_file = context.resolver.get(Executable(self, self.device.abi, 'trace-cmd'))
self.trace_cmd = self.device.install_executable(host_file)
else:
if not self.device.is_installed('trace-cmd'):
raise ConfigError('No trace-cmd found on device and no_install=True is specified.')
self.trace_cmd = 'trace-cmd'
# Register ourselves as absolute last event before and
# first after so we can mark the trace at the right time
signal.connect(self.insert_start_mark, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
signal.connect(self.insert_end_mark, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
def setup(self, context):
if self.mode == 'start':
if self.buffer_size:
self._set_buffer_size()
self.device.execute('{} reset'.format(self.trace_cmd), as_root=True, timeout=180)
elif self.mode == 'record':
pass
else:
raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
def start(self, context):
self.start_time = time.time() # pylint: disable=attribute-defined-outside-init
if self.mode == 'start':
self.device.execute('{} start {}'.format(self.trace_cmd, self.event_string), as_root=True)
elif self.mode == 'record':
self.device.kick_off('{} record -o {} {}'.format(self.trace_cmd, self.output_file, self.event_string))
else:
raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
def stop(self, context):
self.stop_time = time.time() # pylint: disable=attribute-defined-outside-init
if self.mode == 'start':
self.device.execute('{} stop'.format(self.trace_cmd), timeout=60, as_root=True)
elif self.mode == 'record':
# There will be a trace-cmd worker process per CPU core plus a main
# control trace-cmd process. Interrupting the control process will
# trigger the generation of the single binary trace file.
trace_cmds = self.device.ps(name=self.trace_cmd)
if not trace_cmds:
raise InstrumentError('Could not find running trace-cmd on device.')
# The workers will have their PPID set to the PID of control.
parent_map = defaultdict(list)
for entry in trace_cmds:
parent_map[entry.ppid].append(entry.pid)
controls = [v[0] for _, v in parent_map.iteritems()
if len(v) == 1 and v[0] in parent_map]
if len(controls) > 1:
self.logger.warning('More than one trace-cmd instance found; stopping all of them.')
for c in controls:
self.device.kill(c, signal='INT', as_root=True)
else:
raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
def update_result(self, context): # NOQA pylint: disable=R0912
if self.mode == 'start':
self.device.execute('{} extract -o {}'.format(self.trace_cmd, self.output_file),
timeout=TIMEOUT, as_root=True)
elif self.mode == 'record':
self.logger.debug('Waiting for trace.dat to be generated.')
while self.device.ps(name=self.trace_cmd):
time.sleep(2)
else:
raise ValueError('Bad mode: {}'.format(self.mode)) # should never get here
# The size of trace.dat will depend on how long trace-cmd was running.
# Therefore timout for the pull command must also be adjusted
# accordingly.
pull_timeout = (self.stop_time - self.start_time)
self.device.pull_file(self.output_file, context.output_directory, timeout=pull_timeout)
context.add_iteration_artifact('bintrace', OUTPUT_TRACE_FILE, kind='data',
description='trace-cmd generated ftrace dump.')
local_trace_file = os.path.join(context.output_directory, OUTPUT_TRACE_FILE)
local_txt_trace_file = os.path.join(context.output_directory, OUTPUT_TEXT_FILE)
if self.report:
# To get the output of trace.dat, trace-cmd must be installed
# This is done host-side because the generated file is very large
if not os.path.isfile(local_trace_file):
self.logger.warning('Not generating trace.txt, as trace.bin does not exist.')
try:
command = 'trace-cmd report {} > {}'.format(local_trace_file, local_txt_trace_file)
self.logger.debug(command)
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
_, error = process.communicate()
if process.returncode:
raise InstrumentError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
if error:
# logged at debug level, as trace-cmd always outputs some
# errors that seem benign.
self.logger.debug(error)
if os.path.isfile(local_txt_trace_file):
context.add_iteration_artifact('txttrace', OUTPUT_TEXT_FILE, kind='export',
description='trace-cmd generated ftrace dump.')
self.logger.debug('Verifying traces.')
with open(local_txt_trace_file) as fh:
for line in fh:
if 'EVENTS DROPPED' in line:
self.logger.warning('Dropped events detected.')
break
else:
self.logger.debug('Trace verified.')
else:
self.logger.warning('Could not generate trace.txt.')
except OSError:
raise InstrumentError('Could not find trace-cmd. Please make sure it is installed and is in PATH.')
def teardown(self, context):
self.device.delete_file(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE))
def on_run_end(self, context):
pass
def validate(self):
if self.report and os.system('which trace-cmd > /dev/null'):
raise InstrumentError('trace-cmd is not in PATH; is it installed?')
if self.buffer_size:
if self.mode == 'record':
self.logger.debug('trace_buffer_size specified with record mode; it will be ignored.')
else:
try:
int(self.buffer_size)
except ValueError:
raise ConfigError('trace_buffer_size must be an int.')
def insert_start_mark(self, context):
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_START in info field
self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False)
def insert_end_mark(self, context):
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_STOP in info field
self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False)
def _set_buffer_size(self):
target_buffer_size = self.buffer_size
attempt_buffer_size = target_buffer_size
buffer_size = 0
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
while attempt_buffer_size >= floor:
self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
if buffer_size == attempt_buffer_size:
break
else:
attempt_buffer_size -= self.buffer_size_step
if buffer_size == target_buffer_size:
return
while attempt_buffer_size < target_buffer_size:
attempt_buffer_size += self.buffer_size_step
self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
if attempt_buffer_size != buffer_size:
self.logger.warning('Failed to set trace buffer size to {}, value set was {}'.format(target_buffer_size, buffer_size))
break
def _build_trace_events(events):
event_string = ' '.join(['-e {}'.format(e) for e in events])
return event_string
| apache-2.0 |
jgmanzanas/CMNT_004_15 | project-addons/sale_customer_discount/__openerp__.py | 1 | 1743 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2014 Pexego Sistemas Informáticos All Rights Reserved
# $Marta Vázquez Rodríguez$ <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Sale customer discount",
"version": "1.0",
"author": "Pexego",
'website': 'www.pexego.es',
"category": "Sales",
"description": """
Sales customer discount
========================================
* Add the fields 'sale price 2' and 'commercial cost' to products.
* Also, added the 'cost margin' and 'commercial margin' of the sale price
and 'cost margin' and 'commercial margin' of the sale price 2.
""",
"depends": ["base", "product", "stock_account", "pmp_landed_costs",
"web_readonly_bypass", "hide_product_variants", "purchase"],
"data": [
"product_view.xml",
],
"demo": [],
'auto_install': False,
"installable": True,
'images': [],
}
| agpl-3.0 |
n0max/servo | tests/wpt/css-tests/tools/html5lib/html5lib/ihatexml.py | 1727 | 16581 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
| mpl-2.0 |
sffjunkie/home-assistant | homeassistant/components/switch/mysensors.py | 2 | 3027 | """
Support for MySensors switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.mysensors/
"""
import logging
from homeassistant.components import mysensors
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import STATE_OFF, STATE_ON
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = []
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the mysensors platform for switches."""
# Only act if loaded via mysensors by discovery event.
# Otherwise gateway is not setup.
if discovery_info is None:
return
for gateway in mysensors.GATEWAYS.values():
# Define the S_TYPES and V_TYPES that the platform should handle as
# states. Map them in a dict of lists.
pres = gateway.const.Presentation
set_req = gateway.const.SetReq
map_sv_types = {
pres.S_DOOR: [set_req.V_ARMED],
pres.S_MOTION: [set_req.V_ARMED],
pres.S_SMOKE: [set_req.V_ARMED],
pres.S_LIGHT: [set_req.V_LIGHT],
pres.S_LOCK: [set_req.V_LOCK_STATUS],
}
if float(gateway.version) >= 1.5:
map_sv_types.update({
pres.S_BINARY: [set_req.V_STATUS, set_req.V_LIGHT],
pres.S_SPRINKLER: [set_req.V_STATUS],
pres.S_WATER_LEAK: [set_req.V_ARMED],
pres.S_SOUND: [set_req.V_ARMED],
pres.S_VIBRATION: [set_req.V_ARMED],
pres.S_MOISTURE: [set_req.V_ARMED],
})
map_sv_types[pres.S_LIGHT].append(set_req.V_STATUS)
devices = {}
gateway.platform_callbacks.append(mysensors.pf_callback_factory(
map_sv_types, devices, add_devices, MySensorsSwitch))
class MySensorsSwitch(mysensors.MySensorsDeviceEntity, SwitchDevice):
"""Representation of the value of a MySensors Switch child node."""
@property
def is_on(self):
"""Return True if switch is on."""
if self.value_type in self._values:
return self._values[self.value_type] == STATE_ON
return False
def turn_on(self):
"""Turn the switch on."""
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, 1)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[self.value_type] = STATE_ON
self.update_ha_state()
def turn_off(self):
"""Turn the switch off."""
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, 0)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[self.value_type] = STATE_OFF
self.update_ha_state()
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self.gateway.optimistic
| mit |
olasitarska/django | tests/generic_views/urls.py | 7 | 13621 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models
from . import views
urlpatterns = [
# TemplateView
url(r'^template/no_template/$',
TemplateView.as_view()),
url(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
url(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
url(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>[0-9]+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>[0-9]+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
url(r'^detail/author/bycustompk/(?P<foo>[0-9]+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
url(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
url(r'^detail/author/(?P<pk>[0-9]+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
url(r'^detail/author/(?P<pk>[0-9]+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
url(r'^detail/author/(?P<pk>[0-9]+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
url(r'^detail/page/(?P<pk>[0-9]+)/field/$',
views.PageDetail.as_view()),
url(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
url(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
url(r'^detail/doesnotexist/(?P<pk>[0-9]+)/$',
views.ObjectDoesNotExistDetail.as_view()),
# FormView
url(r'^contact/$',
views.ContactView.as_view()),
# Create/UpdateView
url(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
url(r'^edit/artists/(?P<pk>[0-9]+)/update/$',
views.ArtistUpdate.as_view()),
url(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
url(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
url(r'^edit/authors/create/$',
views.AuthorCreate.as_view()),
url(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/author/(?P<pk>[0-9]+)/update/$',
views.AuthorUpdate.as_view()),
url(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/interpolate_redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted=%(id)s')),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/$',
views.AuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>[0-9]+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
url(r'^dates/books/$',
views.BookArchive.as_view()),
url(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
url(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
url(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
url(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
url(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
url(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
url(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
url(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
url(r'^dates/books/sortedbyname/$',
views.BookArchive.as_view(ordering='name')),
url(r'^dates/books/sortedbynamedec/$',
views.BookArchive.as_view(ordering='-name')),
# ListView
url(r'^list/dict/$',
views.DictList.as_view()),
url(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
url(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated/(?P<page>[0-9]+)/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated-orphaned/$',
views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)),
url(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
url(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
url(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
url(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
url(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
url(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
url(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
url(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
url(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
url(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
url(r'^list/books/sorted/$',
views.BookList.as_view(ordering='name')),
url(r'^list/books/sortedbypagesandnamedec/$',
views.BookList.as_view(ordering=('pages', '-name'))),
# YearArchiveView
# Mixing keyword and positional captures below is intentional; the views
# ought to be able to accept either.
url(r'^dates/books/(?P<year>[0-9]{4})/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/sortedbyname/$',
views.BookYearArchive.as_view(make_object_list=True, ordering='name')),
url(r'^dates/books/(?P<year>\d{4})/sortedbypageandnamedec/$',
views.BookYearArchive.as_view(make_object_list=True, ordering=('pages', '-name'))),
url(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/no_month/$',
views.BookMonthArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>[0-9]{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/week/(?P<week>[0-9]{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
url(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
url(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
url(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetail.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/nopk/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/get_object_custom_queryset/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookDetailGetObjectCustomQueryset.as_view()),
url(r'^dates/booksignings/(?P<year>[0-9]{4})/(?P<month>[a-z]{3})/(?P<day>[0-9]{1,2})/(?P<pk>[0-9]+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
url(r'^accounts/login/$', auth_views.login)
]
| bsd-3-clause |
HunterUSF/Quandles | cohomology_calculation/torsioncounter.py | 1 | 4826 | ##############################################################################
### This programs checks the row reduced
### cocycle matrix to see if there are any torsion elemenets.
###
### Version 2 uses update rowReduce function, where z does not need
### to be specified.
###
###############################################################################
from numpy import *
from sympy import *
from math import sqrt
from quandlelist import *
def computeCoeff(triple, matrix):
### Equation obtained from evaluating element of Z^2 in 2-cocycle equation
n = len(matrix)
current_eq = []
p, q, r = triple[0], triple[1], triple[2]
a, b, c, d = (p, r), (matrix[p, r], matrix[q, r]), (p, q), (matrix[p, q], r)
constants = [a,b,c,d]
for x in range(0, n):
for y in range(0, n):
if (x,y) in constants:
if ((x,y) == a or (x,y) == b) and ((x,y) == c or (x,y) == d):
current_eq.append(0)
elif ((x,y) == a or (x,y) == b):
current_eq.append(1)
elif ((x,y) == c or (x,y) == d):
current_eq.append(-1)
else:
current_eq.append(0)
#print triple
#equation = []
#for i in range(0, len(current_eq)):
# if current_eq[i] != 0:
# equation.append("%d(%d,%d)" % (current_eq[i] , (i / 4), (i % 4)))
#print equation
return current_eq
def listEquations(M):
### Lists n^3 equations obtained by evaluating triples in 2-cocycle equation
n = len(M)
list_of_equations = []
for x in range(0, n):
for y in range(0, n):
for z in range(0, n):
triple = (x,y,z)
if (x == y) or (y == z):
pass
else:
list_of_equations.append(computeCoeff(triple, M))
return list_of_equations
def computeCoboundary(M, n):
### Given Cayley tables, computes the delta(chi_n)
length = len(M)
coboundary_row = []
for i in range(0, length):
for j in range(0, length):
cob = chi(i,n) - chi( M[i,j],n )
coboundary_row.append(cob)
return coboundary_row
def matrixCoboundaries(M):
### Lists coboundary computations in a matrix
length = len(M)
coboundary_matrix = []
for i in range(0, length):
coboundary_matrix.append(computeCoboundary(M,i))
return coboundary_matrix
def chi(m,n):
### Characteristic functions mapping the quandle to an abelian group
if m == n:
return 1
else:
return 0
def zeroDoubles(M):
### Sets all entries in columns corresponding to an ordered pair of the form (x, x) to the zero
for i in range(0, M.shape[0]):
for j in range(0, M.shape[1], int(sqrt(M.shape[1])) + 1):
M[i, j] = 0
M[i, j] = 0
M[i, j] = 0
def display(M):
### Use to display matrix if python shorten output
for i in range(0, len(M)):
print M[i]
def computeBetti(M):
### Compute the Betti number for the cohomology group with dim Z^2 - dim B^2
reduced_list = array(listEquations(M))
zeroDoubles(reduced_list)
ker_matrix = Matrix(reduced_list).rref()
ker_matrix_free_variables = array(ker_matrix[0]).shape[1] - sqrt(array(ker_matrix[0]).shape[1]) - len(ker_matrix[1])
im_matrix = Matrix(matrixCoboundaries(M)).rref()
im_matrix_pivots = len(im_matrix[1])
print "%d" % (ker_matrix_free_variables - im_matrix_pivots)
def rowReduce(array):
### Row reduces over a group - n is the coefficients it checks
M = array.copy()
rows = M.shape[0]
cols = M.shape[1]
done_list = []
z = 1
repeat = True
while repeat == True:
max_index = argmax(M)
max_entry = M[max_index / cols][max_index % cols]
min_index = argmin(M)
min_entry = M[min_index / cols][min_index % cols]
if z > max_entry or (abs(min_entry)) > z:
repeat = False
for j in range(0, cols):
for i in range(0, rows):
if i not in done_list:
if M[i][j] == -z:
store = M[i]
for k in range(i+1, rows):
if M[k][j] == -z:
M[k] = M[k] - store
if M[k][j] == z:
M[k] = M[k] + store
done_list.append(i)
if M[i][j] == z:
store = M[i]
for k in range(i+1, rows):
if M[k][j] == z:
M[k] = M[k] - store
if M[k][j] == -z:
M[k] = M[k] + store
done_list.append(i)
z += 1
return M
counter = 1
for M in quandle_list:
print "Checking %d" % counter
equations = array(listEquations(M))
zeroDoubles(equations)
mylist = rowReduce(equations)
### Removes zero rows
newlist =[]
for i in range(0, mylist.shape[0]):
for entry in list(mylist[i]):
if entry != 0:
newlist.append(list(mylist[i]))
break
eqlist = array(newlist)
### Determined the group coefficents are in. 0 implies Z
mod_num = 0
if mod_num == 0:
uselist = eqlist
else:
modlist = (array(newlist) % mod_num)
print "Modded by %d:" % mod_num
display(modlist)
print "\n"
uselist = modlist
for row in uselist:
n = 0
for entry in row:
if entry != 0:
n += 1
if n == 1:
print "Please check the follow quandle %d" % counter
break
counter += 1
| gpl-3.0 |
rtindru/django | django/db/backends/postgresql_psycopg2/introspection.py | 326 | 10060 | from __future__ import unicode_literals
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if field_type == 'IntegerField' and description.default and 'nextval' in description.default:
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6]
+ (field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])))
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text
FROM information_schema.constraint_column_usage
WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
ORDER BY kc.ordinal_position ASC
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
| bsd-3-clause |
auready/django | tests/flatpages_tests/test_forms.py | 4 | 4487 | from django.conf import settings
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from django.utils import translation
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.flatpages', ]})
@override_settings(SITE_ID=1)
class FlatpageAdminFormTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
def setUp(self):
# Site fields cache needs to be cleared after flatpages is added to
# INSTALLED_APPS
Site._meta._expire_cache()
self.form_data = {
'title': "A test page",
'content': "This is a test",
'sites': [settings.SITE_ID],
}
def test_flatpage_admin_form_url_validation(self):
"The flatpage admin form correctly validates urls"
self.assertTrue(FlatpageForm(data=dict(url='/new_flatpage/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.special~chars/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.very_special~chars-here/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a space/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a % char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ! char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a & char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ? char/', **self.form_data)).is_valid())
def test_flatpage_requires_leading_slash(self):
form = FlatpageForm(data=dict(url='no_leading_slash/', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a leading slash."])
@override_settings(APPEND_SLASH=True, MIDDLEWARE=['django.middleware.common.CommonMiddleware'])
def test_flatpage_requires_trailing_slash_with_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a trailing slash."])
@override_settings(APPEND_SLASH=False, MIDDLEWARE=['django.middleware.common.CommonMiddleware'])
def test_flatpage_doesnt_requires_trailing_slash_without_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
self.assertTrue(form.is_valid())
def test_flatpage_admin_form_url_uniqueness_validation(self):
"The flatpage admin form correctly enforces url uniqueness among flatpages of the same site"
data = dict(url='/myflatpage1/', **self.form_data)
FlatpageForm(data=data).save()
f = FlatpageForm(data=data)
with translation.override('en'):
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'__all__': ['Flatpage with url /myflatpage1/ already exists for site example.com']})
def test_flatpage_admin_form_edit(self):
"""
Existing flatpages can be edited in the admin form without triggering
the url-uniqueness validation.
"""
existing = FlatPage.objects.create(
url="/myflatpage1/", title="Some page", content="The content")
existing.sites.add(settings.SITE_ID)
data = dict(url='/myflatpage1/', **self.form_data)
f = FlatpageForm(data=data, instance=existing)
self.assertTrue(f.is_valid(), f.errors)
updated = f.save()
self.assertEqual(updated.title, "A test page")
def test_flatpage_nosites(self):
data = dict(url='/myflatpage1/', **self.form_data)
data.update({'sites': ''})
f = FlatpageForm(data=data)
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'sites': [translation.ugettext('This field is required.')]})
| bsd-3-clause |
joshuajan/odoo | openerp/tools/parse_version.py | 380 | 4462 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
## this functions are taken from the setuptools package (version 0.6c8)
## http://peak.telecommunity.com/DevCenter/PkgResources#parsing-utilities
import re
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','_':'final-','rc':'c','dev':'@','saas':'','~':''}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", whic in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them.
"""
parts = []
for part in _parse_version_parts((s or '0.1').lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
if __name__ == '__main__':
def cmp(a, b):
msg = '%s < %s == %s' % (a, b, a < b)
assert a < b, msg
return b
def chk(lst, verbose=False):
pvs = []
for v in lst:
pv = parse_version(v)
pvs.append(pv)
if verbose:
print v, pv
reduce(cmp, pvs)
chk(('0', '4.2', '4.2.3.4', '5.0.0-alpha', '5.0.0-rc1', '5.0.0-rc1.1', '5.0.0_rc2', '5.0.0_rc3', '5.0.0'), False)
chk(('5.0.0-0_rc3', '5.0.0-1dev', '5.0.0-1'), False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
albertomurillo/ansible | lib/ansible/modules/network/nxos/nxos_vtp_domain.py | 15 | 6126 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vtp_domain
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP domain configuration.
description:
- Manages VTP domain configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP domain names.
- VTP domain names are case-sensible.
- If it's never been configured before, VTP version is set to 1 by default.
Otherwise, it leaves the previous configured version untouched.
Use M(nxos_vtp_version) to change it.
- Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version)
to fully manage VTP operations.
options:
domain:
description:
- VTP domain name.
required: true
'''
EXAMPLES = '''
# ENSURE VTP DOMAIN IS CONFIGURED
- nxos_vtp_domain:
domain: ntc
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"domain": "ntc"}
existing:
description:
- k/v pairs of existing vtp domain
returned: always
type: dict
sample: {"domain": "testing", "version": "2", "vtp_password": "\"}
end_state:
description: k/v pairs of vtp domain after module execution
returned: always
type: dict
sample: {"domain": "ntc", "version": "2", "vtp_password": "\"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp domain ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.network.nxos.nxos import get_capabilities
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, output='json'):
cmds = [{
'command': command,
'output': output,
}]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module, 'text')[0]
vtp_parsed = {}
if body:
version_regex = r'.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = r'.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
output = 'json'
cap = get_capabilities(module)['device_info']['network_os_model']
if re.search(r'Nexus 6', cap):
output = 'text'
body = execute_show_command(command, module, output)[0]
if output == 'json':
password = body.get('passwd', '')
else:
password = ''
rp = r'VTP Password: (\S+)'
mo = re.search(rp, body)
if mo:
password = mo.group(1)
return str(password)
def main():
argument_spec = dict(
domain=dict(type='str', required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
domain = module.params['domain']
existing = get_vtp_config(module)
end_state = existing
args = dict(domain=domain)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if delta:
commands.append(['vtp domain {0}'.format(domain)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
albertomurillo/ansible | lib/ansible/modules/network/avi/avi_httppolicyset.py | 28 | 5426 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_httppolicyset
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of HTTPPolicySet Avi RESTful Object
description:
- This module is used to configure HTTPPolicySet object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
created_by:
description:
- Creator name.
description:
description:
- User defined description for the object.
http_request_policy:
description:
- Http request policy for the virtual service.
http_response_policy:
description:
- Http response policy for the virtual service.
http_security_policy:
description:
- Http security policy for the virtual service.
is_internal_policy:
description:
- Boolean flag to set is_internal_policy.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
name:
description:
- Name of the http policy set.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the http policy set.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a HTTP Policy set two switch between testpool1 and testpool2
avi_httppolicyset:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
name: test-HTTP-Policy-Set
tenant_ref: admin
http_request_policy:
rules:
- index: 1
enable: true
name: test-test1
match:
path:
match_case: INSENSITIVE
match_str:
- /test1
match_criteria: EQUALS
switching_action:
action: HTTP_SWITCHING_SELECT_POOL
status_code: HTTP_LOCAL_RESPONSE_STATUS_CODE_200
pool_ref: "/api/pool?name=testpool1"
- index: 2
enable: true
name: test-test2
match:
path:
match_case: INSENSITIVE
match_str:
- /test2
match_criteria: CONTAINS
switching_action:
action: HTTP_SWITCHING_SELECT_POOL
status_code: HTTP_LOCAL_RESPONSE_STATUS_CODE_200
pool_ref: "/api/pool?name=testpool2"
is_internal_policy: false
"""
RETURN = '''
obj:
description: HTTPPolicySet (api/httppolicyset) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cloud_config_cksum=dict(type='str',),
created_by=dict(type='str',),
description=dict(type='str',),
http_request_policy=dict(type='dict',),
http_response_policy=dict(type='dict',),
http_security_policy=dict(type='dict',),
is_internal_policy=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'httppolicyset',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
ChrisAntaki/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/user_unittest.py | 124 | 7300 | # Copyright (C) 2010 Research in Motion Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user import User
class UserTest(unittest.TestCase):
example_user_response = "example user response"
def test_prompt_repeat(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
if not self.repeatsRemaining:
return UserTest.example_user_response
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), UserTest.example_user_response)
def test_prompt_when_exceeded_repeats(self):
self.repeatsRemaining = 2
def mock_raw_input(message):
self.repeatsRemaining -= 1
return None
self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), None)
def test_prompt_with_multiple_lists(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_multiple_lists,
args=["title", ["subtitle1", "subtitle2"], [["foo", "bar"], ["foobar", "barbaz", "foobaz"]]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n\nsubtitle1\n 1. foo\n 2. bar\n\nsubtitle2\n 3. foobar\n 4. barbaz\n 5. foobaz\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["3"], "foobar")
run_prompt_test(["4"], "barbaz")
run_prompt_test(["5"], "foobaz")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["1-3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["1-2,3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
run_prompt_test(["2-1,3"], ["foobar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
def test_prompt_with_list(self):
def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
def mock_raw_input(message):
return inputs.pop(0)
output_capture = OutputCapture()
actual_result = output_capture.assert_outputs(
self,
User.prompt_with_list,
args=["title", ["foo", "bar"]],
kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
expected_stdout="title\n 1. foo\n 2. bar\n")
self.assertEqual(actual_result, expected_result)
self.assertEqual(len(inputs), 0)
run_prompt_test(["1"], "foo")
run_prompt_test(["badinput", "2"], "bar")
run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test([" "], ["foo", "bar"], can_choose_multiple=True)
run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True)
def test_confirm(self):
test_cases = (
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')),
(("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')),
(("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')),
(("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')),
(("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')),
)
for test_case in test_cases:
expected, inputs = test_case
def mock_raw_input(message):
self.assertEqual(expected[0], message)
return inputs[1]
result = User().confirm(default=inputs[0],
raw_input=mock_raw_input)
self.assertEqual(expected[1], result)
def test_warn_if_application_is_xcode(self):
output = OutputCapture()
user = User()
output.assert_outputs(self, user._warn_if_application_is_xcode, ["TextMate"])
output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Applications/TextMate.app"])
output.assert_outputs(self, user._warn_if_application_is_xcode, ["XCode"]) # case sensitive matching
xcode_warning = "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\".\n"
output.assert_outputs(self, user._warn_if_application_is_xcode, ["Xcode"], expected_stdout=xcode_warning)
output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Developer/Applications/Xcode.app"], expected_stdout=xcode_warning)
| bsd-3-clause |
camilonova/mopidy-grooveshark | setup.py | 1 | 1288 | from __future__ import unicode_literals
import re
from setuptools import setup, find_packages
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-Grooveshark',
version=get_version('mopidy_grooveshark/__init__.py'),
url='https://github.com/camilonova/mopidy-grooveshark',
license='MIT',
author='Camilo Nova',
author_email='[email protected]',
description='Mopidy extension that plays sound from Grooveshark',
long_description=open('README.md').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.0',
'pygrooveshark >= 3.1',
],
entry_points={
'mopidy.ext': [
'grooveshark = mopidy_grooveshark:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
| mit |
eflglobal/class-registry | test/entry_points_test.py | 1 | 3820 | # coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from os.path import dirname
from unittest import TestCase
from pkg_resources import iter_entry_points, working_set
from class_registry import EntryPointClassRegistry, RegistryKeyError
from test import Bulbasaur, Charmander, Mew, PokemonFactory, Squirtle
def setUpModule():
#
# Install a fake distribution that we can use to inject entry
# points at runtime.
#
# The side effects from this are pretty severe, but they (very
# probably) only impact this test, and they are undone as soon as
# the process terminates.
#
working_set.add_entry(dirname(__file__))
class EntryPointClassRegistryTestCase(TestCase):
def test_happy_path(self):
"""
Loading classes automatically via entry points.
See ``dummy_package.egg-info/entry_points.txt`` for more info.
"""
registry = EntryPointClassRegistry('pokemon')
fire = registry['fire']
self.assertIsInstance(fire, Charmander)
self.assertIsNone(fire.name)
grass = registry.get('grass')
self.assertIsInstance(grass, Bulbasaur)
self.assertIsNone(grass.name)
water = registry.get('water', name='archibald')
self.assertIsInstance(water, Squirtle)
self.assertEqual(water.name, 'archibald')
# The 'psychic' entry point actually references a function, but
# it works exactly the same as a class.
psychic = registry.get('psychic', 'snuggles')
self.assertIsInstance(psychic, Mew)
self.assertEqual(psychic.name, 'snuggles')
def test_branding(self):
"""
Configuring the registry to "brand" each class/instance with its
corresponding key.
"""
registry = EntryPointClassRegistry('pokemon', attr_name='poke_type')
try:
# Branding is applied immediately to each registered class.
self.assertEqual(getattr(Charmander, 'poke_type'), 'fire')
self.assertEqual(getattr(Squirtle, 'poke_type'), 'water')
# Instances, too!
self.assertEqual(registry['fire'].poke_type, 'fire')
self.assertEqual(registry.get('water', 'phil').poke_type, 'water')
# Registered functions and methods can't be branded this
# way, though...
self.assertFalse(
hasattr(PokemonFactory.create_psychic_pokemon, 'poke_type'),
)
# ... but we can brand the resulting instances.
self.assertEqual(registry['psychic'].poke_type, 'psychic')
self.assertEqual(registry.get('psychic').poke_type, 'psychic')
finally:
# Clean up after ourselves.
for cls in registry.values():
if isinstance(cls, type):
try:
delattr(cls, 'poke_type')
except AttributeError:
pass
def test_len(self):
"""
Getting the length of an entry point class registry.
"""
# Just in case some other package defines pokémon entry
# points (:
expected = len(list(iter_entry_points('pokemon')))
# Quick sanity check, to make sure our test pokémon are
# registered correctly.
self.assertGreaterEqual(expected, 4)
registry = EntryPointClassRegistry('pokemon')
self.assertEqual(len(registry), expected)
def test_error_wrong_group(self):
"""
The registry can't find entry points associated with the wrong
"""
registry = EntryPointClassRegistry('random')
with self.assertRaises(RegistryKeyError):
# The dummy project registers
registry.get('fire')
| mit |
onedata/helpers | test/integration/webdav_helper_test/webdav_helper_test.py | 1 | 7184 | """This module tests WebDAV helper."""
__author__ = "Bartek Kryza"
__copyright__ = """(C) 2018 ACK CYFRONET AGH,
This software is released under the MIT license cited in 'LICENSE.txt'."""
import os
import sys
import time
import subprocess
from os.path import expanduser
from urlparse import urlparse
import pytest
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.dirname(script_dir))
# noinspection PyUnresolvedReferences
from test_common import *
# noinspection PyUnresolvedReferences
from environment import common, docker, webdav
from webdav_helper import WebDAVHelperProxy
#
# Import test cases selectively, the commented out tests indicate
# test cases which will not work on WebDAV helper
#
from common_test_base import \
file_id, \
test_write_should_write_empty_data, \
test_write_should_write_data, \
test_write_should_append_data, \
test_write_should_prepend_data, \
test_write_should_merge_data, \
test_write_should_overwrite_data_left, \
test_write_should_overwrite_data_right, \
test_write_should_overwrite_data_middle, \
test_read_shoud_not_read_data, \
test_read_should_read_data, \
test_read_should_read_all_possible_ranges, \
test_read_should_pad_prefix_with_zeros, \
test_read_should_read_data_with_holes, \
test_read_should_read_empty_segment, \
test_unlink_should_delete_empty_data, \
test_truncate_should_increase_file_size
# test_truncate_should_decrease_file_size
from io_perf_test_base import \
test_write, \
test_write_read, \
test_read_write_truncate_unlink
# test_truncate
from posix_test_base import \
test_read_should_read_written_data, \
test_read_should_error_file_not_found, \
test_mkdir_should_create_directory, \
test_rename_directory_should_rename, \
test_readdir_should_list_files_in_directory, \
test_unlink_should_pass_errors, \
test_unlink_should_delete_file, \
test_mknod_should_create_regular_file_by_default, \
test_chown_should_change_user_and_group, \
test_read_should_not_read_after_end_of_file, \
test_read_write_large_file_should_maintain_consistency
# test_symlink_should_create_link
# test_link_should_create_hard_link
# test_mknod_should_set_premissions
# test_truncate_should_not_create_file
from xattr_test_base import \
test_setxattr_should_set_extended_attribute, \
test_setxattr_should_set_large_extended_attribute, \
test_setxattr_should_set_extended_attribute_with_empty_value, \
test_getxattr_should_return_extended_attribute, \
test_listxattr_should_list_extended_attribute
# test_removexattr_should_remove_extended_attribute, \
# test_setxattr_should_handle_create_replace_flags
@pytest.fixture(scope='module')
def server(request):
class Server(object):
def __init__(self, endpoint, credentials):
self.endpoint = endpoint
self.credentials = credentials
result = webdav.up('onedata/sabredav:v2', 'storage',
common.generate_uid())
[container] = result['docker_ids']
credentials = result['credentials'].encode('ascii')
endpoint = result['endpoint'].encode('ascii')
def fin():
docker.remove([container], force=True, volumes=True)
request.addfinalizer(fin)
time.sleep(15)
return Server(endpoint, credentials)
@pytest.fixture
def helper(server):
return WebDAVHelperProxy(server.endpoint, server.credentials)
@pytest.fixture
def helper_redirect(server):
redirect_port = "8080"
endpoint = urlparse(server.endpoint)
redirect_url = endpoint._replace(
netloc=endpoint.netloc.replace(
str(endpoint.port), redirect_port)).geturl()
return WebDAVHelperProxy(redirect_url, server.credentials)
def test_read_should_follow_temporary_redirect(helper, helper_redirect, file_id):
data = random_str()
helper.write(file_id, data, 0)
data2 = helper_redirect.read(file_id, 0, len(data))
assert data == data2
def test_write_should_follow_temporary_redirect(helper_redirect, file_id):
data = random_str()
helper_redirect.write(file_id, data, 0)
data2 = helper_redirect.read(file_id, 0, len(data))
assert data == data2
@pytest.mark.directory_operations_tests
def test_mknod_should_return_enoent_on_missing_parent(helper, file_id):
dir1_id = random_str()
dir2_id = random_str()
dir3_id = random_str()
data = random_str()
offset = random_int()
with pytest.raises(RuntimeError) as excinfo:
helper.write(dir1_id+"/"+dir2_id+"/"+dir3_id+"/"+file_id,
data, offset)
assert 'No such file or directory' in str(excinfo.value)
@pytest.mark.directory_operations_tests
def test_rmdir_should_remove_directory(helper, file_id):
dir_id = file_id
file1_id = random_str()
file2_id = random_str()
data = random_str()
offset = random_int()
try:
helper.mkdir(dir_id, 0777)
helper.write(dir_id+"/"+file1_id, data, offset)
helper.write(dir_id+"/"+file2_id, data, offset)
except:
pytest.fail("Couldn't create directory: %s"%(dir_id))
helper.unlink(dir_id+"/"+file1_id, 0)
helper.unlink(dir_id+"/"+file2_id, 0)
with pytest.raises(RuntimeError) as excinfo:
helper.read(dir_id+"/"+file1_id, offset, len(data))
assert 'No such file or directory' in str(excinfo.value)
helper.rmdir(dir_id)
with pytest.raises(RuntimeError) as excinfo:
helper.readdir(dir_id, 0, 1024)
assert 'No such file or directory' in str(excinfo.value)
def test_getattr_should_return_default_permissions(helper, file_id):
dir_id = file_id
data = random_str()
offset = random_int()
default_dir_mode = 0775
default_file_mode = 0664
try:
helper.mkdir(dir_id, 0777)
helper.write(dir_id+"/"+file_id, data, offset)
except:
pytest.fail("Couldn't create directory: %s"%(dir_id))
# WebDAV doesn't store permissions, so the dir_id directory will
# return the permissions defined in the helper not the ones used
# in mkdir call
assert helper.getattr(dir_id).st_mode&0777 == default_dir_mode
assert helper.getattr(dir_id+"/"+file_id).st_mode&0777 == default_file_mode
def test_readdir_should_handle_offset_properly(helper):
def to_python_list(readdir_result):
r = [e for e in readdir_result]
r.sort()
return r
test_dir = 'offset_test'
helper.mkdir(test_dir, 0777)
files = ['file{}.txt'.format(i,) for i in (1, 2, 3, 4, 5)]
for file in files:
helper.write(test_dir+'/'+file, random_str(), 0)
dirs = to_python_list(helper.readdir(test_dir, 0, 100))
assert dirs == files
dirs = to_python_list(helper.readdir(test_dir, 0, 1))
assert dirs == files[0:1]
dirs = to_python_list(helper.readdir(test_dir, 0, 2))
assert dirs == files[0:2]
dirs = to_python_list(helper.readdir(test_dir, 3, 100))
assert dirs == files[3:5]
dirs = to_python_list(helper.readdir(test_dir, 100, 100))
assert dirs == []
dirs = to_python_list(helper.readdir(test_dir, 0, 0))
assert dirs == []
| mit |
dushu1203/chromium.src | chrome/test/ispy/ispy_api_unittest.py | 100 | 2835 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from PIL import Image
import ispy_api
from common import cloud_bucket
from common import mock_cloud_bucket
class ISpyApiTest(unittest.TestCase):
"""Unittest for the ISpy API."""
def setUp(self):
self.cloud_bucket = mock_cloud_bucket.MockCloudBucket()
self.ispy = ispy_api.ISpyApi(self.cloud_bucket)
self.white_img = Image.new('RGBA', (10, 10), (255, 255, 255, 255))
self.black_img = Image.new('RGBA', (10, 10), (0, 0, 0, 255))
def testGenerateExpectationsRunComparison(self):
self.ispy.GenerateExpectation(
'device', 'test', '1.1.1.1', 'versions.json',
[self.white_img, self.white_img])
self.ispy.UpdateExpectationVersion('1.1.1.1', 'versions.json')
self.ispy.PerformComparison(
'test1', 'device', 'test', '1.1.1.1', 'versions.json', self.white_img)
expect_name = self.ispy._CreateExpectationName(
'device', 'test', '1.1.1.1')
self.assertFalse(self.ispy._ispy.FailureExists('test1', expect_name))
self.ispy.PerformComparison(
'test2', 'device', 'test', '1.1.1.1','versions.json', self.black_img)
self.assertTrue(self.ispy._ispy.FailureExists('test2', expect_name))
def testUpdateExpectationVersion(self):
self.ispy.UpdateExpectationVersion('1.0.0.0', 'versions.json')
self.ispy.UpdateExpectationVersion('1.0.4.0', 'versions.json')
self.ispy.UpdateExpectationVersion('2.1.5.0', 'versions.json')
self.ispy.UpdateExpectationVersion('1.1.5.0', 'versions.json')
self.ispy.UpdateExpectationVersion('0.0.0.0', 'versions.json')
self.ispy.UpdateExpectationVersion('1.1.5.0', 'versions.json')
self.ispy.UpdateExpectationVersion('0.0.0.1', 'versions.json')
versions = json.loads(self.cloud_bucket.DownloadFile('versions.json'))
self.assertEqual(versions,
['2.1.5.0', '1.1.5.0', '1.0.4.0', '1.0.0.0', '0.0.0.1', '0.0.0.0'])
def testPerformComparisonAndPrepareExpectation(self):
self.assertFalse(self.ispy.CanRebaselineToTestRun('test'))
self.assertRaises(
cloud_bucket.FileNotFoundError,
self.ispy.PerformComparisonAndPrepareExpectation,
'test', 'device', 'expect', '1.0', 'versions.json',
[self.white_img, self.white_img])
self.assertTrue(self.ispy.CanRebaselineToTestRun('test'))
self.ispy.RebaselineToTestRun('test')
versions = json.loads(self.cloud_bucket.DownloadFile('versions.json'))
self.assertEqual(versions, ['1.0'])
self.ispy.PerformComparisonAndPrepareExpectation(
'test1', 'device', 'expect', '1.1', 'versions.json',
[self.white_img, self.white_img])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ewindisch/nova | nova/tests/network/test_neutronv2.py | 1 | 98798 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import uuid
import mox
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo.config import cfg
import six
from nova.compute import flavors
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import exception
from nova.network import model
from nova.network import neutronv2
from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova.openstack.common import jsonutils
from nova.openstack.common import local
from nova import test
from nova import utils
CONF = cfg.CONF
#NOTE: Neutron client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make neutron client throw a custom
# exception class instead.
NEUTRON_CLIENT_EXCEPTION = Exception
class MyComparator(mox.Comparator):
def __init__(self, lhs):
self.lhs = lhs
def _com_dict(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for key, value in lhs.iteritems():
if key not in rhs:
return False
rhs_value = rhs[key]
if not self._com(value, rhs_value):
return False
return True
def _com_list(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for lhs_value in lhs:
if lhs_value not in rhs:
return False
return True
def _com(self, lhs, rhs):
if lhs is None:
return rhs is None
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
return False
return self._com_dict(lhs, rhs)
if isinstance(lhs, list):
if not isinstance(rhs, list):
return False
return self._com_list(lhs, rhs)
if isinstance(lhs, tuple):
if not isinstance(rhs, tuple):
return False
return self._com_list(lhs, rhs)
return lhs == rhs
def equals(self, rhs):
return self._com(self.lhs, rhs)
def __repr__(self):
return str(self.lhs)
class TestNeutronClient(test.TestCase):
def test_withtoken(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
auth_strategy=None,
endpoint_url=CONF.neutron_url,
token=my_context.auth_token,
timeout=CONF.neutron_url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
neutronv2.get_client(my_context)
def test_withouttoken(self):
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(exceptions.Unauthorized,
neutronv2.get_client,
my_context)
def test_withtoken_context_is_admin(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token',
is_admin=True)
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
auth_strategy=None,
endpoint_url=CONF.neutron_url,
token=my_context.auth_token,
timeout=CONF.neutron_url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
# Note that although we have admin set in the context we
# are not asking for an admin client, and so we auth with
# our own token
neutronv2.get_client(my_context)
def test_withouttoken_keystone_connection_error(self):
self.flags(neutron_auth_strategy='keystone')
self.flags(neutron_url='http://anyhost/')
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
neutronv2.get_client,
my_context)
class TestNeutronv2Base(test.TestCase):
def setUp(self):
super(TestNeutronv2Base, self).setUp()
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance',
'availability_zone': 'nova',
'host': 'some_host',
'security_groups': [],
'vm_state': vm_states.ACTIVE}
self.instance2 = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance2',
'availability_zone': 'nova',
'security_groups': [],
'vm_state': vm_states.ACTIVE}
self.nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
'tenant_id': 'my_tenantid'}]
self.nets2 = []
self.nets2.append(self.nets1[0])
self.nets2.append({'id': 'my_netid2',
'name': 'my_netname2',
'tenant_id': 'my_tenantid'})
self.nets3 = self.nets2 + [{'id': 'my_netid3',
'name': 'my_netname3',
'tenant_id': 'my_tenantid'}]
self.nets4 = [{'id': 'his_netid4',
'name': 'his_netname4',
'tenant_id': 'his_tenantid'}]
self.nets = [self.nets1, self.nets2, self.nets3, self.nets4]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
'device_id': self.instance2['uuid'],
'device_owner': 'compute:nova',
'id': 'my_portid1',
'status': 'DOWN',
'admin_state_up': True,
'fixed_ips': [{'ip_address': self.port_address,
'subnet_id': 'my_subid1'}],
'mac_address': 'my_mac1', }]
self.float_data1 = [{'port_id': 'my_portid1',
'fixed_ip_address': self.port_address,
'floating_ip_address': '172.0.1.2'}]
self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
'subnet_id': 'my_subid1'}],
'status': 'ACTIVE',
'admin_state_up': True}]
self.port_address2 = '10.0.2.2'
self.port_data2 = []
self.port_data2.append(self.port_data1[0])
self.port_data2.append({'network_id': 'my_netid2',
'device_id': self.instance['uuid'],
'admin_state_up': True,
'status': 'ACTIVE',
'device_owner': 'compute:nova',
'id': 'my_portid2',
'fixed_ips':
[{'ip_address': self.port_address2,
'subnet_id': 'my_subid2'}],
'mac_address': 'my_mac2', })
self.float_data2 = []
self.float_data2.append(self.float_data1[0])
self.float_data2.append({'port_id': 'my_portid2',
'fixed_ip_address': '10.0.2.2',
'floating_ip_address': '172.0.2.2'})
self.port_data3 = [{'network_id': 'my_netid1',
'device_id': 'device_id3',
'status': 'DOWN',
'admin_state_up': True,
'device_owner': 'compute:nova',
'id': 'my_portid3',
'fixed_ips': [], # no fixed ip
'mac_address': 'my_mac3', }]
self.subnet_data1 = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2 = []
self.subnet_data_n = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
{'id': 'my_subid2',
'cidr': '20.0.1.0/24',
'network_id': 'my_netid2',
'gateway_ip': '20.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2.append({'id': 'my_subid2',
'cidr': '10.0.2.0/24',
'network_id': 'my_netid2',
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
'name': 'ext_net',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
'name': 'nova',
'router:external': True,
'tenant_id': 'admin_tenantid'}
self.fip_unassociated = {'tenant_id': 'my_tenantid',
'id': 'fip_id1',
'floating_ip_address': '172.24.4.227',
'floating_network_id': self.fip_pool['id'],
'port_id': None,
'fixed_ip_address': None,
'router_id': None}
fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
self.fip_associated = {'tenant_id': 'my_tenantid',
'id': 'fip_id2',
'floating_ip_address': '172.24.4.228',
'floating_network_id': self.fip_pool['id'],
'port_id': self.port_data2[1]['id'],
'fixed_ip_address': fixed_ip_address,
'router_id': 'router_id1'}
self._returned_nw_info = []
self.mox.StubOutWithMock(neutronv2, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
self.addCleanup(CONF.reset)
self.addCleanup(self.mox.VerifyAll)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.stubs.UnsetAll)
def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
api = neutronapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
has_portbinding = False
has_extra_dhcp_opts = False
dhcp_options = kwargs.get('dhcp_options')
if dhcp_options is not None:
has_extra_dhcp_opts = True
if kwargs.get('portbinding'):
has_portbinding = True
api.extensions[constants.PORTBINDING_EXT] = 1
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
neutronv2.get_client(
mox.IgnoreArg(), admin=True).MultipleTimes().AndReturn(
self.moxed_client)
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
else:
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
ports = {}
fixed_ips = {}
macs = kwargs.get('macs')
if macs:
macs = set(macs)
req_net_ids = []
if 'requested_networks' in kwargs:
for id, fixed_ip, port_id in kwargs['requested_networks']:
if port_id:
self.moxed_client.show_port(port_id).AndReturn(
{'port': {'id': 'my_portid1',
'network_id': 'my_netid1',
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2['uuid'] or ''}})
ports['my_netid1'] = self.port_data1[0]
id = 'my_netid1'
if macs is not None:
macs.discard('my_mac1')
else:
fixed_ips[id] = fixed_ip
req_net_ids.append(id)
expected_network_order = req_net_ids
else:
expected_network_order = [n['id'] for n in nets]
if kwargs.get('_break') == 'pre_list_networks':
self.mox.ReplayAll()
return api
search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
if search_ids:
mox_list_params = {'id': mox.SameElementsAs(search_ids)}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': []})
ports_in_requested_net_order = []
for net_id in expected_network_order:
port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
port = ports.get(net_id, None)
if not has_portbinding:
api._populate_neutron_extension_values(mox.IgnoreArg(),
self.instance, mox.IgnoreArg()).AndReturn(None)
else:
# since _populate_neutron_extension_values() will call
# _has_port_binding_extension()
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
if port:
port_id = port['id']
self.moxed_client.update_port(port_id,
MyComparator(port_req_body)
).AndReturn(
{'port': port})
ports_in_requested_net_order.append(port_id)
else:
fixed_ip = fixed_ips.get(net_id)
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address':
fixed_ip}]
port_req_body['port']['network_id'] = net_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance['project_id']
if macs:
port_req_body['port']['mac_address'] = macs.pop()
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
res_port = {'port': {'id': 'fake'}}
if has_extra_dhcp_opts:
port_req_body['port']['extra_dhcp_opts'] = dhcp_options
if kwargs.get('_break') == 'mac' + net_id:
self.mox.ReplayAll()
return api
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
ports_in_requested_net_order.append(res_port['port']['id'])
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
networks=nets,
port_ids=ports_in_requested_net_order
).AndReturn(self._returned_nw_info)
self.mox.ReplayAll()
return api
def _verify_nw_info(self, nw_inf, index=0):
id_suffix = index + 1
self.assertEqual('10.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index]['address'])
self.assertEqual('172.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
self.assertEqual('my_netname%s' % id_suffix,
nw_inf[index]['network']['label'])
self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
self.assertEqual('10.0.%s.0/24' % id_suffix,
nw_inf[index]['network']['subnets'][0]['cidr'])
self.assertTrue(model.IP(address='8.8.%s.1' % id_suffix,
version=4, type='dns') in
nw_inf[index]['network']['subnets'][0]['dns'])
def _get_instance_nw_info(self, number):
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(mox.IgnoreArg(),
self.instance['uuid'],
mox.IgnoreArg())
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
# This line here does not wrap net_info_cache in jsonutils.dumps()
# intentionally to test the other code path when it's not unicode.
instance['info_cache'] = {'network_info': net_info_cache}
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
net_ids = [port['network_id'] for port in port_data]
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
for i in xrange(1, number + 1):
float_data = number == 1 and self.float_data1 or self.float_data2
for ip in port_data[i - 1]['fixed_ips']:
float_data = [x for x in float_data
if x['fixed_ip_address'] == ip['ip_address']]
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=port_data[i - 1]['id']).AndReturn(
{'floatingips': float_data})
subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
{'subnets': subnet_data})
self.moxed_client.list_ports(
network_id=subnet_data[0]['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': []})
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context, instance)
for i in xrange(0, number):
self._verify_nw_info(nw_inf, i)
def _allocate_for_instance(self, net_idx=1, **kwargs):
api = self._stub_allocate_for_instance(net_idx, **kwargs)
return api.allocate_for_instance(self.context, self.instance, **kwargs)
class TestNeutronv2(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_get_instance_nw_info_1(self):
# Test to get one port in one network and subnet.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
# Test to get one port in each of two networks and subnets.
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets_add_interface(self):
# This tests that adding an interface to an instance does not
# remove the first instance from the instance.
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': self.port_data2[0]['id'],
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
self.nets2,
[self.port_data2[1]['id']])
def test_get_instance_nw_info_remove_ports_from_neutron(self):
# This tests that when a port is removed in neutron it
# is also removed from the nova.
network_model = model.Network(id=self.port_data2[0]['network_id'],
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
self.port_data2,
None,
None)
def test_get_instance_nw_info_ignores_neturon_ports(self):
# Tests that only ports in the network_cache are updated
# and ports returned from neutron that match the same
# instance_id/device_id are ignored.
port_data2 = copy.copy(self.port_data2)
# set device_id on the ports to be the same.
port_data2[1]['device_id'] = port_data2[0]['device_id']
network_model = model.Network(id='network_id',
bridge='br-int',
injected='injected',
label='fake_network',
tenant_id='fake_tenant')
network_cache = {'info_cache': {
'network_info': [{'id': 'network_id',
'address': 'mac_address',
'network': network_model,
'type': 'ovs',
'ovs_interfaceid': 'ovs_interfaceid',
'devname': 'devname'}]}}
self._fake_get_instance_nw_info_helper(network_cache,
port_data2,
None,
None)
def _fake_get_instance_nw_info_helper(self, network_cache,
current_neutron_ports,
networks=None, port_ids=None):
"""Helper function to test get_instance_nw_info.
:param network_cache - data already in the nova network cache.
:param current_neutron_ports - updated list of ports from neutron.
:param networks - networks of ports being added to instance.
:param port_ids - new ports being added to instance.
"""
# keep a copy of the original ports/networks to pass to
# get_instance_nw_info() as the code below changes them.
original_port_ids = copy.copy(port_ids)
original_networks = copy.copy(networks)
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': current_neutron_ports})
ifaces = network_cache['info_cache']['network_info']
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
nets = [{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
if networks is None:
self.moxed_client.list_networks(
id=net_ids).AndReturn({'networks': nets})
else:
networks = networks + [
dict(id=iface['network']['id'],
name=iface['network']['label'],
tenant_id=iface['network']['meta']['tenant_id'])
for iface in ifaces]
port_ids = [iface['id'] for iface in ifaces] + port_ids
index = 0
for current_neutron_port in current_neutron_ports:
if current_neutron_port['id'] in port_ids:
for ip in current_neutron_port['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=current_neutron_port['id']).AndReturn(
{'floatingips': [self.float_data2[index]]})
self.moxed_client.list_subnets(
id=mox.SameElementsAs([ip['subnet_id']])
).AndReturn(
{'subnets': [self.subnet_data_n[index]]})
self.moxed_client.list_ports(
network_id=current_neutron_port['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': self.dhcp_port_data1})
index += 1
self.mox.ReplayAll()
self.instance['info_cache'] = network_cache
instance = copy.copy(self.instance)
instance['info_cache'] = network_cache['info_cache']
nw_inf = api.get_instance_nw_info(self.context,
instance,
networks=original_networks,
port_ids=original_port_ids)
self.assertEqual(index, len(nw_inf))
def test_get_instance_nw_info_without_subnet(self):
# Test get instance_nw_info for a port without subnet.
api = neutronapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data3})
self.moxed_client.list_networks(
id=[self.port_data1[0]['network_id']]).AndReturn(
{'networks': self.nets1})
neutronv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self.mox.StubOutWithMock(conductor_api.API,
'instance_get_by_uuid')
net_info_cache = []
for port in self.port_data3:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
instance['info_cache'] = {'network_info':
six.text_type(
jsonutils.dumps(net_info_cache))}
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
instance)
id_suffix = 3
self.assertEqual(0, len(nw_inf.fixed_ips()))
self.assertEqual('my_netname1', nw_inf[0]['network']['label'])
self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id'])
self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEqual(0, len(nw_inf[0]['network']['subnets']))
def test_refresh_neutron_extensions_cache(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': 'nvp-qos'}]})
self.mox.ReplayAll()
api._refresh_neutron_extensions_cache(mox.IgnoreArg())
self.assertEqual({'nvp-qos': {'name': 'nvp-qos'}}, api.extensions)
def test_populate_neutron_extension_values_rxtx_factor(self):
api = neutronapi.API()
# Note: Don't want the default get_client from setUp()
self.mox.ResetAll()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': 'nvp-qos'}]})
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 1
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, flavor))
instance = {'system_metadata': sys_meta}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
port_req_body)
self.assertEqual(port_req_body['port']['rxtx_factor'], 1)
def test_allocate_for_instance_1(self):
# Allocate one port in one network env.
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
# Allocate one port in two networks env.
self._allocate_for_instance(2)
def test_allocate_for_instance_accepts_macs_kwargs_None(self):
# The macs kwarg should be accepted as None.
self._allocate_for_instance(1, macs=None)
def test_allocate_for_instance_accepts_macs_kwargs_set(self):
# The macs kwarg should be accepted, as a set, the
# _allocate_for_instance helper checks that the mac is used to create a
# port.
self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
def test_allocate_for_instance_accepts_only_portid(self):
# Make sure allocate_for_instance works when only a portid is provided
self._returned_nw_info = self.port_data1
result = self._allocate_for_instance(
requested_networks=[(None, None, 'my_portid1')])
self.assertEqual(self.port_data1, result)
def test_allocate_for_instance_not_enough_macs_via_ports(self):
# using a hypervisor MAC via a pre-created port will stop it being
# used to dynamically create a port on a network. We put the network
# first in requested_networks so that if the code were to not pre-check
# requested ports, it would incorrectly assign the mac and not fail.
requested_networks = [
(self.nets2[1]['id'], None, None),
(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac1']),
_break='mac' + self.nets2[1]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['my_mac1']))
def test_allocate_for_instance_not_enough_macs(self):
# If not enough MAC addresses are available to allocate to networks, an
# error should be raised.
# We could pass in macs=set(), but that wouldn't tell us that
# allocate_for_instance tracks used macs properly, so we pass in one
# mac, and ask for two networks.
requested_networks = [
(self.nets2[1]['id'], None, None),
(self.nets2[0]['id'], None, None)]
api = self._stub_allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2']),
_break='mac' + self.nets2[0]['id'])
self.assertRaises(exception.PortNotFree,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['my_mac2']))
def test_allocate_for_instance_two_macs_two_networks(self):
# If two MACs are available and two networks requested, two new ports
# get made and no exceptions raised.
requested_networks = [
(self.nets2[1]['id'], None, None),
(self.nets2[0]['id'], None, None)]
self._allocate_for_instance(
net_idx=2, requested_networks=requested_networks,
macs=set(['my_mac2', 'my_mac1']))
def test_allocate_for_instance_mac_conflicting_requested_port(self):
# specify only first and last network
requested_networks = [(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
net_idx=1, requested_networks=requested_networks,
macs=set(['unknown:mac']),
_break='pre_list_networks')
self.assertRaises(exception.PortNotUsable,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
macs=set(['unknown:mac']))
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [
(net['id'], None, None)
for net in (self.nets3[1], self.nets3[0], self.nets3[2])]
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
# specify only first and last network
requested_networks = [(self.nets1[0]['id'], '10.0.1.0/24', None)]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
requested_networks = [(None, None, 'myportid1')]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_no_networks(self):
"""verify the exception thrown when there are no networks defined."""
api = neutronapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': model.NetworkInfo([])})
self.mox.ReplayAll()
nwinfo = api.allocate_for_instance(self.context, self.instance)
self.assertEqual(len(nwinfo), 0)
def test_allocate_for_instance_ex1(self):
"""verify we will delete created ports
if we fail to allocate all net resources.
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
"""
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
index = 0
for network in self.nets2:
binding_port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': network['id'],
'admin_state_up': True,
'tenant_id': self.instance['project_id'],
},
}
port_req_body['port'].update(binding_port_req_body['port'])
port = {'id': 'portid_' + network['id']}
api._populate_neutron_extension_values(self.context,
self.instance, binding_port_req_body).AndReturn(None)
if index == 0:
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn({'port': port})
else:
NeutronOverQuota = exceptions.NeutronClientException(
message="Quota exceeded for resources: ['port']",
status_code=409)
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
index += 1
self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
self.mox.ReplayAll()
self.assertRaises(exception.PortLimitExceeded,
api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_ex2(self):
"""verify we have no port to delete
if we fail to allocate the first net resource.
Mox to raise exception when creating the first port.
In this case, the code should not delete any ports.
"""
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
binding_port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
port_req_body = {
'port': {
'network_id': self.nets2[0]['id'],
'admin_state_up': True,
'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id'],
},
}
api._populate_neutron_extension_values(self.context,
self.instance, binding_port_req_body).AndReturn(None)
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_no_port_or_network(self):
class BailOutEarly(Exception):
pass
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_available_networks')
# Make sure we get an empty list and then bail out of the rest
# of the function
api._get_available_networks(self.context, self.instance['project_id'],
[]).AndRaise(BailOutEarly)
self.mox.ReplayAll()
self.assertRaises(BailOutEarly,
api.allocate_for_instance,
self.context, self.instance,
requested_networks=[(None, None, None)])
def test_allocate_for_instance_second_time(self):
# Make sure that allocate_for_instance only returns ports that it
# allocated during _that_ run.
new_port = {'id': 'fake'}
self._returned_nw_info = self.port_data1 + [new_port]
nw_info = self._allocate_for_instance()
self.assertEqual(nw_info, [new_port])
def test_allocate_for_instance_port_in_use(self):
# If a port is already in use, an exception should be raised.
requested_networks = [(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
requested_networks=requested_networks,
_break='pre_list_networks',
_device=True)
self.assertRaises(exception.PortInUse,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks)
def _deallocate_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
self.moxed_client.list_ports(
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
for port in reversed(port_data):
self.moxed_client.delete_port(port['id'])
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance)
def test_deallocate_for_instance_1(self):
# Test to deallocate in one port env.
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
# Test to deallocate in two ports env.
self._deallocate_for_instance(2)
def test_deallocate_for_instance_port_not_found(self):
port_data = self.port_data1
self.moxed_client.list_ports(
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=404)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id']).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
api = neutronapi.API()
api.deallocate_for_instance(self.context, self.instance)
def _test_deallocate_port_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.delete_port(port_data[0]['id'])
net_info_cache = []
for port in port_data:
net_info_cache.append({"network": {"id": port['network_id']},
"id": port['id']})
instance = copy.copy(self.instance)
instance['info_cache'] = {'network_info':
six.text_type(
jsonutils.dumps(net_info_cache))}
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data[1:]})
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
net_ids = [port['network_id'] for port in port_data]
self.moxed_client.list_networks(id=net_ids).AndReturn(
{'networks': nets})
float_data = number == 1 and self.float_data1 or self.float_data2
for data in port_data[1:]:
for ip in data['fixed_ips']:
self.moxed_client.list_floatingips(
fixed_ip_address=ip['ip_address'],
port_id=data['id']).AndReturn(
{'floatingips': float_data[1:]})
for port in port_data[1:]:
self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
self.mox.ReplayAll()
nwinfo = api.deallocate_port_for_instance(self.context, instance,
port_data[0]['id'])
self.assertEqual(len(nwinfo), len(port_data[1:]))
if len(port_data) > 1:
self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
def test_deallocate_port_for_instance_1(self):
# Test to deallocate the first and only port
self._test_deallocate_port_for_instance(1)
def test_deallocate_port_for_instance_2(self):
# Test to deallocate the first port of two
self._test_deallocate_port_for_instance(2)
def test_list_ports(self):
search_opts = {'parm': 'value'}
self.moxed_client.list_ports(**search_opts)
self.mox.ReplayAll()
neutronapi.API().list_ports(self.context, **search_opts)
def test_show_port(self):
self.moxed_client.show_port('foo')
self.mox.ReplayAll()
neutronapi.API().show_port(self.context, 'foo')
def test_validate_networks(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_ex_1(self):
requested_networks = [('my_netid1', 'test', None)]
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1'])).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2", str(ex))
def test_validate_networks_ex_2(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None),
('my_netid3', 'test3', None)]
ids = ['my_netid1', 'my_netid2', 'my_netid3']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets1})
self.mox.ReplayAll()
api = neutronapi.API()
try:
api.validate_networks(self.context, requested_networks, 1)
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2, my_netid3", str(ex))
def test_validate_networks_duplicate(self):
"""Verify that the correct exception is thrown when duplicate
network ids are passed to validate_networks.
"""
requested_networks = [('my_netid1', None, None),
('my_netid1', None, None)]
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_not_specified(self):
requested_networks = []
self.moxed_client.list_networks(
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn(
{'networks': self.nets2})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkAmbiguous,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_not_found(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = [('my_netid1', None, '3123-ad34-bc43-32332ca33e')]
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=404)
self.moxed_client.show_port(requested_networks[0][2]).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.PortNotFound,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_show_rasies_non404(self):
# Verify that the correct exception is thrown when a non existent
# port is passed to validate_networks.
requested_networks = [('my_netid1', None, '3123-ad34-bc43-32332ca33e')]
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=0)
self.moxed_client.show_port(requested_networks[0][2]).AndRaise(
NeutronNotFound)
self.mox.ReplayAll()
# Expected call from setUp.
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(neutronv2.exceptions.NeutronClientException,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_port_in_use(self):
requested_networks = [(None, None, self.port_data3[0]['id'])]
self.moxed_client.show_port(self.port_data3[0]['id']).\
AndReturn({'port': self.port_data3[0]})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.PortInUse,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_in_same_network(self):
port_a = self.port_data3[0]
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
(None, None, port_b['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
self.mox.ReplayAll()
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
api.validate_networks,
self.context, requested_networks, 1)
def test_validate_networks_ports_not_in_same_network(self):
port_a = self.port_data3[0]
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
(None, None, port_b['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
search_opts = {'id': [port_a['network_id'], port_b['network_id']]}
self.moxed_client.list_networks(
**search_opts).AndReturn({'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': []})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 50}})
self.mox.ReplayAll()
api = neutronapi.API()
api.validate_networks(self.context, requested_networks, 1)
def test_validate_networks_no_quota(self):
# Test validation for a request for one instance needing
# two ports, where the quota is 2 and 2 ports are in use
# => instances which can be created = 0
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 2}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 0)
def test_validate_networks_some_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is 5 and 2 ports are in use
# => instances which can be created = 1
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 5}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(max_count, 1)
def test_validate_networks_unlimited_quota(self):
# Test validation for a request for two instance needing
# two ports each, where the quota is -1 (unlimited)
# => instances which can be created = 1
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
ids = ['my_netid1', 'my_netid2']
self.moxed_client.list_networks(
id=mox.SameElementsAs(ids)).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': -1}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 2)
self.assertEqual(max_count, 2)
def test_validate_networks_no_quota_but_ports_supplied(self):
# Test validation for a request for one instance needing
# two ports, where the quota is 2 and 2 ports are in use
# but the request includes a port to be used
# => instances which can be created = 1
port_a = self.port_data3[0]
port_b = self.port_data2[1]
self.assertNotEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
port['device_id'] = None
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
(None, None, port_b['id'])]
self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
search_opts = {'id': [port_a['network_id'], port_b['network_id']]}
self.moxed_client.list_networks(
**search_opts).AndReturn({'networks': self.nets2})
self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
{'ports': self.port_data2})
self.moxed_client.show_quota(
tenant_id='my_tenantid').AndReturn(
{'quota': {'port': 2}})
self.mox.ReplayAll()
api = neutronapi.API()
max_count = api.validate_networks(self.context,
requested_networks, 1)
self.assertEqual(max_count, 1)
def _mock_list_ports(self, port_data=None):
if port_data is None:
port_data = self.port_data2
address = self.port_address
self.moxed_client.list_ports(
fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
{'ports': port_data})
self.mox.ReplayAll()
return address
def test_get_instance_uuids_by_ip_filter(self):
self._mock_list_ports()
filters = {'ip': '^10\\.0\\.1\\.2$'}
api = neutronapi.API()
result = api.get_instance_uuids_by_ip_filter(self.context, filters)
self.assertEqual(self.instance2['uuid'], result[0]['instance_uuid'])
self.assertEqual(self.instance['uuid'], result[1]['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_no_ports(self):
address = self._mock_list_ports(port_data=[])
api = neutronapi.API()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.get_fixed_ip_by_address,
self.context, address)
def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
address = self._mock_list_ports(port_data=self.port_data1)
api = neutronapi.API()
result = api.get_fixed_ip_by_address(self.context, address)
self.assertEqual(self.instance2['uuid'], result['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
address = self._mock_list_ports()
api = neutronapi.API()
self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
api.get_fixed_ip_by_address,
self.context, address)
def _get_available_networks(self, prv_nets, pub_nets, req_ids=None):
api = neutronapi.API()
nets = prv_nets + pub_nets
if req_ids:
mox_list_params = {'id': req_ids}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
mox_list_params = {'tenant_id': self.instance['project_id'],
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': prv_nets})
mox_list_params = {'shared': True}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': pub_nets})
self.mox.ReplayAll()
rets = api._get_available_networks(self.context,
self.instance['project_id'],
req_ids)
self.assertEqual(rets, nets)
def test_get_available_networks_all_private(self):
self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
def test_get_available_networks_all_public(self):
self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
def test_get_available_networks_private_and_public(self):
self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
def test_get_available_networks_with_network_ids(self):
prv_nets = [self.nets3[0]]
pub_nets = [self.nets3[-1]]
# specify only first and last network
req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
self._get_available_networks(prv_nets, pub_nets, req_ids)
def test_get_floating_ip_pools(self):
api = neutronapi.API()
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.mox.ReplayAll()
pools = api.get_floating_ip_pools(self.context)
expected = [{'name': self.fip_pool['name']},
{'name': self.fip_pool_nova['name']}]
self.assertEqual(expected, pools)
def _get_expected_fip_model(self, fip_data, idx=0):
expected = {'id': fip_data['id'],
'address': fip_data['floating_ip_address'],
'pool': self.fip_pool['name'],
'project_id': fip_data['tenant_id'],
'fixed_ip_id': fip_data['port_id'],
'fixed_ip':
{'address': fip_data['fixed_ip_address']},
'instance': ({'uuid': self.port_data2[idx]['device_id']}
if fip_data['port_id']
else None)}
return expected
def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
api = neutronapi.API()
fip_id = fip_data['id']
net_id = fip_data['floating_network_id']
address = fip_data['floating_ip_address']
if by_address:
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
else:
self.moxed_client.show_floatingip(fip_id).\
AndReturn({'floatingip': fip_data})
self.moxed_client.show_network(net_id).\
AndReturn({'network': self.fip_pool})
if fip_data['port_id']:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[idx]})
self.mox.ReplayAll()
expected = self._get_expected_fip_model(fip_data, idx)
if by_address:
fip = api.get_floating_ip_by_address(self.context, address)
else:
fip = api.get_floating_ip(self.context, fip_id)
self.assertEqual(expected, fip)
def test_get_floating_ip_unassociated(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0)
def test_get_floating_ip_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1)
def test_get_floating_ip_by_address(self):
self._test_get_floating_ip(self.fip_unassociated, idx=0,
by_address=True)
def test_get_floating_ip_by_address_associated(self):
self._test_get_floating_ip(self.fip_associated, idx=1,
by_address=True)
def test_get_floating_ip_by_address_not_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': []})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ip_by_id_not_found(self):
api = neutronapi.API()
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=404)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpNotFound,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_raises_non404(self):
api = neutronapi.API()
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
status_code=0)
floating_ip_id = self.fip_unassociated['id']
self.moxed_client.show_floatingip(floating_ip_id).\
AndRaise(NeutronNotFound)
self.mox.ReplayAll()
self.assertRaises(neutronv2.exceptions.NeutronClientException,
api.get_floating_ip,
self.context, floating_ip_id)
def test_get_floating_ip_by_address_multiple_found(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated] * 2})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
api.get_floating_ip_by_address,
self.context, address)
def test_get_floating_ips_by_project(self):
api = neutronapi.API()
project_id = self.context.project_id
self.moxed_client.list_floatingips(tenant_id=project_id).\
AndReturn({'floatingips': [self.fip_unassociated,
self.fip_associated]})
search_opts = {'router:external': True}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.moxed_client.list_ports(tenant_id=project_id).\
AndReturn({'ports': self.port_data2})
self.mox.ReplayAll()
expected = [self._get_expected_fip_model(self.fip_unassociated),
self._get_expected_fip_model(self.fip_associated, idx=1)]
fips = api.get_floating_ips_by_project(self.context)
self.assertEqual(expected, fips)
def _test_get_instance_id_by_floating_address(self, fip_data,
associated=False):
api = neutronapi.API()
address = fip_data['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [fip_data]})
if associated:
self.moxed_client.show_port(fip_data['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.ReplayAll()
if associated:
expected = self.port_data2[1]['device_id']
else:
expected = None
fip = api.get_instance_id_by_floating_address(self.context, address)
self.assertEqual(expected, fip)
def test_get_instance_id_by_floating_address(self):
self._test_get_instance_id_by_floating_address(self.fip_unassociated)
def test_get_instance_id_by_floating_address_associated(self):
self._test_get_instance_id_by_floating_address(self.fip_associated,
associated=True)
def test_allocate_floating_ip(self):
api = neutronapi.API()
pool_name = self.fip_pool['name']
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, 'ext_net')
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_with_pool_id(self):
api = neutronapi.API()
pool_id = self.fip_pool['id']
search_opts = {'router:external': True,
'fields': 'id',
'id': pool_id}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context, pool_id)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_allocate_floating_ip_with_default_pool(self):
api = neutronapi.API()
pool_name = self.fip_pool_nova['name']
pool_id = self.fip_pool_nova['id']
search_opts = {'router:external': True,
'fields': 'id',
'name': pool_name}
self.moxed_client.list_networks(**search_opts).\
AndReturn({'networks': [self.fip_pool_nova]})
self.moxed_client.create_floatingip(
{'floatingip': {'floating_network_id': pool_id}}).\
AndReturn({'floatingip': self.fip_unassociated})
self.mox.ReplayAll()
fip = api.allocate_floating_ip(self.context)
self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
def test_release_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fip_id = self.fip_unassociated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.delete_floatingip(fip_id)
self.mox.ReplayAll()
api.release_floating_ip(self.context, address)
def test_release_floating_ip_associated(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.mox.ReplayAll()
self.assertRaises(exception.FloatingIpAssociated,
api.release_floating_ip, self.context, address)
def _setup_mock_for_refresh_cache(self, api, instances):
nw_info = self.mox.CreateMock(model.NetworkInfo)
self.mox.StubOutWithMock(api, '_get_instance_nw_info')
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
for instance in instances:
nw_info.json()
api._get_instance_nw_info(mox.IgnoreArg(), instance).\
AndReturn(nw_info)
api.db.instance_info_cache_update(mox.IgnoreArg(),
instance['uuid'],
mox.IgnoreArg())
def test_associate_floating_ip(self):
api = neutronapi.API()
address = self.fip_unassociated['floating_ip_address']
fixed_address = self.port_address2
fip_id = self.fip_unassociated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[1]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_unassociated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
'fixed_ip_address': fixed_address}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance,
address, fixed_address)
def test_reassociate_floating_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
new_fixed_address = self.port_address
fip_id = self.fip_associated['id']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance2['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': 'my_portid1',
'fixed_ip_address': new_fixed_address}})
self.moxed_client.show_port(self.fip_associated['port_id']).\
AndReturn({'port': self.port_data2[1]})
self.mox.StubOutWithMock(api.db, 'instance_get_by_uuid')
api.db.instance_get_by_uuid(mox.IgnoreArg(),
self.instance['uuid']).\
AndReturn(self.instance)
self._setup_mock_for_refresh_cache(api, [self.instance,
self.instance2])
self.mox.ReplayAll()
api.associate_floating_ip(self.context, self.instance2,
address, new_fixed_address)
def test_associate_floating_ip_not_found_fixed_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fixed_address = self.fip_associated['fixed_ip_address']
search_opts = {'device_owner': 'compute:nova',
'device_id': self.instance['uuid']}
self.moxed_client.list_ports(**search_opts).\
AndReturn({'ports': [self.port_data2[0]]})
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.associate_floating_ip, self.context,
self.instance, address, fixed_address)
def test_disassociate_floating_ip(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
fip_id = self.fip_associated['id']
self.moxed_client.list_floatingips(floating_ip_address=address).\
AndReturn({'floatingips': [self.fip_associated]})
self.moxed_client.update_floatingip(
fip_id, {'floatingip': {'port_id': None}})
self._setup_mock_for_refresh_cache(api, [self.instance])
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
def test_add_fixed_ip_to_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
network_id = 'my_netid1'
search_opts = {'network_id': network_id}
self.moxed_client.list_subnets(
**search_opts).AndReturn({'subnets': self.subnet_data_n})
search_opts = {'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'network_id': network_id}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [{'subnet_id': 'my_subid1'},
{'subnet_id': 'my_subid1'}],
},
}
port = self.port_data1[0]
port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
def test_remove_fixed_ip_from_instance(self):
api = neutronapi.API()
self._setup_mock_for_refresh_cache(api, [self.instance])
address = '10.0.0.3'
zone = 'compute:%s' % self.instance['availability_zone']
search_opts = {'device_id': self.instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
self.moxed_client.list_ports(
**search_opts).AndReturn({'ports': self.port_data1})
port_req_body = {
'port': {
'fixed_ips': [],
},
}
port = self.port_data1[0]
port['fixed_ips'] = []
self.moxed_client.update_port('my_portid1',
MyComparator(port_req_body)).AndReturn({'port': port})
self.mox.ReplayAll()
api.remove_fixed_ip_from_instance(self.context, self.instance, address)
def test_list_floating_ips_without_l3_support(self):
api = neutronapi.API()
NeutronNotFound = exceptions.NeutronClientException(
status_code=404)
self.moxed_client.list_floatingips(
fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
self.mox.ReplayAll()
neutronv2.get_client('fake')
floatingips = api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 1)
self.assertEqual(floatingips, [])
def test_nw_info_get_ips(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'}],
'id': 'port-id',
}
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
self.mox.ReplayAll()
neutronv2.get_client('fake')
result = api._nw_info_get_ips(self.moxed_client, fake_port)
self.assertEqual(len(result), 1)
self.assertEqual(result[0]['address'], '1.1.1.1')
self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1')
def test_nw_info_get_subnets(self):
fake_port = {
'fixed_ips': [
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}],
'id': 'port-id',
}
fake_subnet = model.Subnet(cidr='1.0.0.0/8')
fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
api._get_subnets_from_port(self.context, fake_port).AndReturn(
[fake_subnet])
self.mox.ReplayAll()
neutronv2.get_client('fake')
subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
self.assertEqual(len(subnets), 1)
self.assertEqual(len(subnets[0]['ips']), 1)
self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1')
def _test_nw_info_build_network(self, vif_type):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id',
'binding:vif_type': vif_type,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(net['subnets'], fake_subnets)
self.assertEqual(net['id'], 'net-id')
self.assertEqual(net['label'], 'foo')
self.assertEqual(net.get_meta('tenant_id'), 'tenant')
self.assertEqual(net.get_meta('injected'), CONF.flat_injected)
return net, iid
def test_nw_info_build_network_ovs(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
self.assertEqual(net['bridge'], CONF.neutron_ovs_bridge)
self.assertNotIn('should_create_bridge', net)
self.assertEqual(iid, 'port-id')
def test_nw_info_build_network_bridge(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
self.assertEqual(net['bridge'], 'brqnet-id')
self.assertTrue(net['should_create_bridge'])
self.assertIsNone(iid)
def test_nw_info_build_network_other(self):
net, iid = self._test_nw_info_build_network(None)
self.assertIsNone(net['bridge'])
self.assertNotIn('should_create_bridge', net)
self.assertIsNone(iid)
def test_nw_info_build_no_match(self):
fake_port = {
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'id': 'port-id',
'network_id': 'net-id1',
'tenant_id': 'tenant',
'binding:vif_type': model.VIF_TYPE_OVS,
}
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
api = neutronapi.API()
self.mox.ReplayAll()
neutronv2.get_client('fake')
net, iid = api._nw_info_build_network(fake_port, fake_nets,
fake_subnets)
self.assertEqual(fake_subnets, net['subnets'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('net-id1', net['id'])
self.assertEqual('tenant', net['meta']['tenant_id'])
def test_build_network_info_model(self):
api = neutronapi.API()
fake_inst = {'project_id': 'fake', 'uuid': 'uuid',
'info_cache': {'network_info': []}}
fake_ports = [
# admin_state_up=True and status='ACTIVE' thus vif.active=True
{'id': 'port0',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'ACTIVE',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:01',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
},
# admin_state_up=False and status='DOWN' thus vif.active=True
{'id': 'port1',
'network_id': 'net-id',
'admin_state_up': False,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:02',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
},
# admin_state_up=True and status='DOWN' thus vif.active=False
{'id': 'port2',
'network_id': 'net-id',
'admin_state_up': True,
'status': 'DOWN',
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': 'de:ad:be:ef:00:03',
'binding:vif_type': model.VIF_TYPE_BRIDGE,
},
# This does not match the networks we provide below,
# so it should be ignored (and is here to verify that)
{'id': 'port3',
'network_id': 'other-net-id',
'admin_state_up': True,
'status': 'DOWN',
},
]
fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
fake_nets = [
{'id': 'net-id',
'name': 'foo',
'tenant_id': 'fake',
}
]
neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
).AndReturn(self.moxed_client)
self.moxed_client.list_ports(
tenant_id='fake', device_id='uuid').AndReturn(
{'ports': fake_ports})
self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
self.mox.StubOutWithMock(api, '_get_subnets_from_port')
requested_port_ids = ['port0', 'port1', 'port2']
for requested_port_id in requested_port_ids:
api._get_floating_ips_by_fixed_and_port(
self.moxed_client, '1.1.1.1', requested_port_id).AndReturn(
[{'floating_ip_address': '10.0.0.1'}])
for index in range(len(requested_port_ids)):
api._get_subnets_from_port(self.context, fake_ports[index]
).AndReturn(fake_subnets)
self.mox.ReplayAll()
neutronv2.get_client('fake')
nw_infos = api._build_network_info_model(self.context, fake_inst,
fake_nets,
[fake_ports[0]['id'],
fake_ports[1]['id'],
fake_ports[2]['id']])
self.assertEqual(len(nw_infos), 3)
index = 0
for nw_info in nw_infos:
self.assertEqual(nw_info['id'], fake_ports[index]['id'])
self.assertEqual(nw_info['address'],
fake_ports[index]['mac_address'])
self.assertEqual(nw_info['devname'], 'tapport' + str(index))
self.assertIsNone(nw_info['ovs_interfaceid'])
self.assertEqual(nw_info['type'], model.VIF_TYPE_BRIDGE)
self.assertEqual(nw_info['network']['bridge'], 'brqnet-id')
index += 1
self.assertEqual(nw_infos[0]['active'], True)
self.assertEqual(nw_infos[1]['active'], True)
self.assertEqual(nw_infos[2]['active'], False)
def test_get_all_empty_list_networks(self):
api = neutronapi.API()
self.moxed_client.list_networks().AndReturn({'networks': []})
self.mox.ReplayAll()
networks = api.get_all(self.context)
self.assertEqual(networks, [])
class TestNeutronv2ModuleMethods(test.TestCase):
def test_gather_port_ids_and_networks_wrong_params(self):
api = neutronapi.API()
# Test with networks not None and port_ids is None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
[{'network': {'name': 'foo'}}], None)
# Test with networks is None and port_ids not None
self.assertRaises(exception.NovaException,
api._gather_port_ids_and_networks,
'fake_context', 'fake_instance',
None, ['list', 'of', 'port_ids'])
def test_ensure_requested_network_ordering_no_preference_ids(self):
l = [1, 2, 3]
neutronapi._ensure_requested_network_ordering(
lambda x: x,
l,
None)
def test_ensure_requested_network_ordering_no_preference_hashes(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
None)
self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
def test_ensure_requested_network_ordering_with_preference(self):
l = [{'id': 3}, {'id': 1}, {'id': 2}]
neutronapi._ensure_requested_network_ordering(
lambda x: x['id'],
l,
[1, 2, 3])
self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
class TestNeutronv2Portbinding(TestNeutronv2Base):
def test_allocate_for_instance_portbinding(self):
self._allocate_for_instance(1, portbinding=True)
def test_populate_neutron_extension_values_binding(self):
api = neutronapi.API()
neutronv2.get_client(mox.IgnoreArg()).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.PORTBINDING_EXT}]})
self.mox.ReplayAll()
host_id = 'my_host_id'
instance = {'host': host_id}
port_req_body = {'port': {}}
api._populate_neutron_extension_values(self.context, instance,
port_req_body)
self.assertEqual(port_req_body['port']['binding:host_id'], host_id)
def test_migrate_instance_finish_binding_false(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(False)
self.mox.ReplayAll()
api.migrate_instance_finish(self.context, None, None)
def test_migrate_instance_finish_binding_true(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host', }
port_req_body = {'port':
{'binding:host_id': migration['dest_compute']}}
self.moxed_client.update_port('test1',
port_req_body).AndReturn(None)
self.mox.ReplayAll()
api.migrate_instance_finish(self.context, self.instance, migration)
def test_migrate_instance_finish_binding_true_exception(self):
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg(),
refresh_cache=True).AndReturn(True)
neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
search_opts = {'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id']}
ports = {'ports': [{'id': 'test1'}]}
self.moxed_client.list_ports(**search_opts).AndReturn(ports)
migration = {'source_compute': self.instance.get('host'),
'dest_compute': 'dest_host', }
port_req_body = {'port':
{'binding:host_id': migration['dest_compute']}}
self.moxed_client.update_port('test1',
port_req_body).AndRaise(
Exception("fail to update port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
api.migrate_instance_finish,
self.context, self.instance, migration)
class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
def setUp(self):
super(TestNeutronv2ExtraDhcpOpts, self).setUp()
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
self._allocate_for_instance(1, extra_dhcp_opts=False)
def test_allocate_for_instance_extradhcpopts(self):
dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
self._allocate_for_instance(1, dhcp_options=dhcp_opts)
class TestNeutronClientForAdminScenarios(test.TestCase):
def test_get_cached_neutron_client_for_admin(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
# Make multiple calls and ensure we get the same
# client back again and again
client = neutronv2.get_client(my_context, True)
client2 = neutronv2.get_client(my_context, True)
client3 = neutronv2.get_client(my_context, True)
self.assertEqual(client, client2)
self.assertEqual(client, client3)
# clear the cache
local.strong_store.neutron_client = None
# A new client should be created now
client4 = neutronv2.get_client(my_context, True)
self.assertNotEqual(client, client4)
def test_get_neutron_client_for_non_admin(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
# Multiple calls should return different clients
client = neutronv2.get_client(my_context)
client2 = neutronv2.get_client(my_context)
self.assertNotEqual(client, client2)
def test_get_neutron_client_for_non_admin_and_no_token(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid')
self.assertRaises(exceptions.Unauthorized,
neutronv2.get_client,
my_context)
def _test_get_client_for_admin(self, use_id=False, admin_context=False):
self.flags(neutron_auth_strategy=None)
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
if use_id:
self.flags(neutron_admin_tenant_id='admin_tenant_id')
if admin_context:
my_context = context.get_admin_context()
else:
my_context = context.RequestContext('userid', 'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
kwargs = {
'auth_url': CONF.neutron_admin_auth_url,
'password': CONF.neutron_admin_password,
'username': CONF.neutron_admin_username,
'endpoint_url': CONF.neutron_url,
'auth_strategy': None,
'timeout': CONF.neutron_url_timeout,
'insecure': False,
'ca_cert': None}
if use_id:
kwargs['tenant_id'] = CONF.neutron_admin_tenant_id
else:
kwargs['tenant_name'] = CONF.neutron_admin_tenant_name
client.Client.__init__(**kwargs).AndReturn(None)
self.mox.ReplayAll()
# clear the cache
if hasattr(local.strong_store, 'neutron_client'):
delattr(local.strong_store, 'neutron_client')
if admin_context:
# Note that the context does not contain a token but is
# an admin context which will force an elevation to admin
# credentials.
neutronv2.get_client(my_context)
else:
# Note that the context is not elevated, but the True is passed in
# which will force an elevation to admin credentials even though
# the context has an auth_token.
neutronv2.get_client(my_context, True)
def test_get_client_for_admin(self):
self._test_get_client_for_admin()
def test_get_client_for_admin_with_id(self):
self._test_get_client_for_admin(use_id=True)
def test_get_client_for_admin_context(self):
self._test_get_client_for_admin(admin_context=True)
def test_get_client_for_admin_context_with_id(self):
self._test_get_client_for_admin(use_id=True, admin_context=True)
| apache-2.0 |
Vixionar/django | tests/generic_relations/models.py | 188 | 4327 | """
Generic relations
Generic relations let an object have a foreign key to any object through a
content-type/object-id field. A ``GenericForeignKey`` field can point to any
object, be it animal, vegetable, or mineral.
The canonical example is tags (although this example implementation is *far*
from complete).
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TaggedItem(models.Model):
"""A tag on an item."""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["tag", "content_type__model"]
def __str__(self):
return self.tag
class ValuableTaggedItem(TaggedItem):
value = models.PositiveIntegerField()
class AbstractComparison(models.Model):
comparative = models.CharField(max_length=50)
content_type1 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative1_set")
object_id1 = models.PositiveIntegerField()
first_obj = GenericForeignKey(ct_field="content_type1", fk_field="object_id1")
@python_2_unicode_compatible
class Comparison(AbstractComparison):
"""
A model that tests having multiple GenericForeignKeys. One is defined
through an inherited abstract model and one defined directly on this class.
"""
content_type2 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative2_set")
object_id2 = models.PositiveIntegerField()
other_obj = GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __str__(self):
return "%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj)
@python_2_unicode_compatible
class Animal(models.Model):
common_name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
tags = GenericRelation(TaggedItem, related_query_name='animal')
comparisons = GenericRelation(Comparison,
object_id_field="object_id1",
content_type_field="content_type1")
def __str__(self):
return self.common_name
@python_2_unicode_compatible
class Vegetable(models.Model):
name = models.CharField(max_length=150)
is_yucky = models.BooleanField(default=True)
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Mineral(models.Model):
name = models.CharField(max_length=150)
hardness = models.PositiveSmallIntegerField()
# note the lack of an explicit GenericRelation here...
def __str__(self):
return self.name
class GeckoManager(models.Manager):
def get_queryset(self):
return super(GeckoManager, self).get_queryset().filter(has_tail=True)
class Gecko(models.Model):
has_tail = models.BooleanField(default=False)
objects = GeckoManager()
# To test fix for #11263
class Rock(Mineral):
tags = GenericRelation(TaggedItem)
class ManualPK(models.Model):
id = models.IntegerField(primary_key=True)
tags = GenericRelation(TaggedItem, related_query_name='manualpk')
class ForProxyModelModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey(for_concrete_model=False)
title = models.CharField(max_length=255, null=True)
class ForConcreteModelModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey()
class ConcreteRelatedModel(models.Model):
bases = GenericRelation(ForProxyModelModel, for_concrete_model=False)
class ProxyRelatedModel(ConcreteRelatedModel):
class Meta:
proxy = True
# To test fix for #7551
class AllowsNullGFK(models.Model):
content_type = models.ForeignKey(ContentType, models.SET_NULL, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey()
| bsd-3-clause |
tylert/chirp.hg | chirp/drivers/kguv9dplus.py | 1 | 65862 | # Copyright 2018 Jim Lieb <[email protected]>
#
# Driver for Wouxon KG-UV9D Plus
#
# Borrowed from other chirp drivers, especially the KG-UV8D Plus
# by Krystian Struzik <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Wouxun KG-UV9D Plus radio management module"""
import time
import os
import logging
import struct
import string
from chirp import util, chirp_common, bitwise, memmap, errors, directory
from chirp.settings import RadioSetting, RadioSettingValue, \
RadioSettingGroup, \
RadioSettingValueBoolean, RadioSettingValueList, \
RadioSettingValueInteger, RadioSettingValueString, \
RadioSettings, InvalidValueError
LOG = logging.getLogger(__name__)
CMD_IDENT = 0x80
CMD_HANGUP = 0x81
CMD_RCONF = 0x82
CMD_WCONF = 0x83
CMD_RCHAN = 0x84
CMD_WCHAN = 0x85
cmd_name = {
CMD_IDENT: "ident",
CMD_HANGUP: "hangup",
CMD_RCONF: "read config",
CMD_WCONF: "write config",
CMD_RCHAN: "read channel memory", # Unused
CMD_WCHAN: "write channel memory" # Unused because it is a hack.
}
# This is used to write the configuration of the radio base on info
# gleaned from the downloaded app. There are empty spaces and we honor
# them because we don't know what they are (yet) although we read the
# whole of memory.
#
# Channel memory is separate. There are 1000 (1-999) channels.
# These are read/written to the radio in 4 channel (96 byte)
# records starting at address 0xa00 and ending at
# 0x4800 (presuming the end of channel 1000 is 0x4860-1
config_map = ( # map address, write size, write count
(0x40, 16, 1), # Passwords
(0x740, 40, 1), # FM chan 1-20
(0x780, 16, 1), # vfo-b-150
(0x790, 16, 1), # vfo-b-450
(0x800, 16, 1), # vfo-a-150
(0x810, 16, 1), # vfo-a-450
(0x820, 16, 1), # vfo-a-300
(0x830, 16, 1), # vfo-a-700
(0x840, 16, 1), # vfo-a-200
(0x860, 16, 1), # area-a-conf
(0x870, 16, 1), # area-b-conf
(0x880, 16, 1), # radio conf 0
(0x890, 16, 1), # radio conf 1
(0x8a0, 16, 1), # radio conf 2
(0x8b0, 16, 1), # radio conf 3
(0x8c0, 16, 1), # PTT-ANI
(0x8d0, 16, 1), # SCC
(0x8e0, 16, 1), # power save
(0x8f0, 16, 1), # Display banner
(0x940, 64, 2), # Scan groups and names
(0xa00, 64, 249), # Memory Channels 1-996
(0x4840, 48, 1), # Memory Channels 997-999
(0x4900, 32, 249), # Memory Names 1-996
(0x6820, 24, 1), # Memory Names 997-999
(0x7400, 64, 5), # CALL-ID 1-20, names 1-20
)
MEM_VALID = 0xfc
MEM_INVALID = 0xff
# Radio memory map. This matches the reads/writes above.
# structure elements whose name starts with x are currently unidentified
_MEM_FORMAT02 = """
#seekto 0x40;
struct {
char reset[6];
char x46[2];
char mode_sw[6];
char x4e;
} passwords;
#seekto 0x740;
struct {
u16 fm_freq;
} fm_chans[20];
// each band has its own configuration, essentially its default params
struct vfo {
u32 freq;
u32 offset;
u16 encqt;
u16 decqt;
u8 bit7_4:3,
qt:3,
bit1_0:2;
u8 bit7:1,
scan:1,
bit5:1,
pwr:2,
mod:1,
fm_dev:2;
u8 pad2:6,
shift:2;
u8 zeros;
};
#seekto 0x780;
struct {
struct vfo band_150;
struct vfo band_450;
} vfo_b;
#seekto 0x800;
struct {
struct vfo band_150;
struct vfo band_450;
struct vfo band_300;
struct vfo band_700;
struct vfo band_200;
} vfo_a;
// There are two independent radios, aka areas (as described
// in the manual as the upper and lower portions of the display...
struct area_conf {
u8 w_mode;
u8 x861;
u8 w_chan;
u8 scan_grp;
u8 bcl;
u8 sql;
u8 cset;
u8 step;
u8 scan_mode;
u8 x869;
u8 scan_range;
u8 x86b;
u8 x86c;
u8 x86d;
u8 x86e;
u8 x86f;
};
#seekto 0x860;
struct area_conf a_conf;
#seekto 0x870;
struct area_conf b_conf;
#seekto 0x880;
struct {
u8 menu_avail;
u8 reset_avail;
u8 x882;
u8 x883;
u8 lang;
u8 x885;
u8 beep;
u8 auto_am;
u8 qt_sw;
u8 lock;
u8 x88a;
u8 pf1;
u8 pf2;
u8 pf3;
u8 s_mute;
u8 type_set;
u8 tot;
u8 toa;
u8 ptt_id;
u8 x893;
u8 id_dly;
u8 x895;
u8 voice_sw;
u8 s_tone;
u8 abr_lvl;
u8 ring_time;
u8 roger;
u8 x89b;
u8 abr;
u8 save_m;
u8 lock_m;
u8 auto_lk;
u8 rpt_ptt;
u8 rpt_spk;
u8 rpt_rct;
u8 prich_sw;
u16 pri_ch;
u8 x8a6;
u8 x8a7;
u8 dtmf_st;
u8 dtmf_tx;
u8 x8aa;
u8 sc_qt;
u8 apo_tmr;
u8 vox_grd;
u8 vox_dly;
u8 rpt_kpt;
struct {
u16 scan_st;
u16 scan_end;
} a;
struct {
u16 scan_st;
u16 scan_end;
} b;
u8 x8b8;
u8 x8b9;
u8 x8ba;
u8 ponmsg;
u8 blcdsw;
u8 bledsw;
u8 x8be;
u8 x8bf;
} settings;
#seekto 0x8c0;
struct {
u8 code[6];
char x8c6[10];
} my_callid;
#seekto 0x8d0;
struct {
u8 scc[6];
char x8d6[10];
} stun;
#seekto 0x8e0;
struct {
u16 wake;
u16 sleep;
} save[4];
#seekto 0x8f0;
struct {
char banner[16];
} display;
#seekto 0x940;
struct {
struct {
i16 scan_st;
i16 scan_end;
} addrs[10];
u8 x0968[8];
struct {
char name[8];
} names[10];
} scn_grps;
// this array of structs is marshalled via the R/WCHAN commands
#seekto 0xa00;
struct {
u32 rxfreq;
u32 txfreq;
u16 encQT;
u16 decQT;
u8 bit7_5:3, // all ones
qt:3,
bit1_0:2;
u8 bit7:1,
scan:1,
bit5:1,
pwr:2,
mod:1,
fm_dev:2;
u8 state;
u8 c3;
} chan_blk[999];
// nobody really sees this. It is marshalled with chan_blk
// in 4 entry chunks
#seekto 0x4900;
// Tracks with the index of chan_blk[]
struct {
char name[8];
} chan_name[999];
#seekto 0x7400;
struct {
u8 cid[6];
u8 pad[2];
}call_ids[20];
// This array tracks with the index of call_ids[]
struct {
char name[6];
char pad[2];
} cid_names[20];
"""
# Support for the Wouxun KG-UV9D Plus radio
# Serial coms are at 19200 baud
# The data is passed in variable length records
# Record structure:
# Offset Usage
# 0 start of record (\x7d)
# 1 Command (6 commands, see above)
# 2 direction (\xff PC-> Radio, \x00 Radio -> PC)
# 3 length of payload (excluding header/checksum) (n)
# 4 payload (n bytes)
# 4+n+1 checksum - byte sum (% 256) of bytes 1 -> 4+n
#
# Memory Read Records:
# the payload is 3 bytes, first 2 are offset (big endian),
# 3rd is number of bytes to read
# Memory Write Records:
# the maximum payload size (from the Wouxun software)
# seems to be 66 bytes (2 bytes location + 64 bytes data).
def _pkt_encode(op, payload):
"""Assemble a packet for the radio and encode it for transmission.
Yes indeed, the checksum we store is only 4 bits. Why?
I suspect it's a bug in the radio firmware guys didn't want to fix,
i.e. a typo 0xff -> 0xf..."""
data = bytearray()
data.append(0x7d) # tag that marks the beginning of the packet
data.append(op)
data.append(0xff) # 0xff is from app to radio
# calc checksum from op to end
cksum = op + 0xff
if (payload):
data.append(len(payload))
cksum += len(payload)
for byte in payload:
cksum += byte
data.append(byte)
else:
data.append(0x00)
# Yea, this is a 4 bit cksum (also known as a bug)
data.append(cksum & 0xf)
# now obfuscate by an xor starting with first payload byte ^ 0x52
# including the trailing cksum.
xorbits = 0x52
for i, byte in enumerate(data[4:]):
xord = xorbits ^ byte
data[i + 4] = xord
xorbits = xord
return(data)
def _pkt_decode(data):
"""Take a packet hot off the wire and decode it into clear text
and return the fields. We say <<cleartext>> here because all it
turns out to be is annoying obfuscation.
This is the inverse of pkt_decode"""
# we don't care about data[0].
# It is always 0x7d and not included in checksum
op = data[1]
direction = data[2]
bytecount = data[3]
# First un-obfuscate the payload and cksum
payload = bytearray()
xorbits = 0x52
for i, byte in enumerate(data[4:]):
payload.append(xorbits ^ byte)
xorbits = byte
# Calculate the checksum starting with the 3 bytes of the header
cksum = op + direction + bytecount
for byte in payload[:-1]:
cksum += byte
# yes, a 4 bit cksum to match the encode
cksum_match = (cksum & 0xf) == payload[-1]
if (not cksum_match):
LOG.debug(
"Checksum missmatch: %x != %x; " % (cksum, payload[-1]))
return (cksum_match, op, payload[:-1])
# UI callbacks to process input for mapping UI fields to memory cells
def freq2int(val, min, max):
"""Convert a frequency as a string to a u32. Units is Hz
"""
_freq = chirp_common.parse_freq(str(val))
if _freq > max or _freq < min:
raise InvalidValueError("Frequency %s is not with in %s-%s" %
(chirp_common.format_freq(_freq),
chirp_common.format_freq(min),
chirp_common.format_freq(max)))
return _freq
def int2freq(freq):
"""
Convert a u32 frequency to a string for UI data entry/display
This is stored in the radio as units of 10Hz which we compensate to Hz.
A value of -1 indicates <no freqency>, i.e. unused channel.
"""
if (int(freq) > 0):
f = chirp_common.format_freq(freq)
return f
else:
return ""
def freq2short(val, min, max):
"""Convert a frequency as a string to a u16 which is units of 10KHz
"""
_freq = chirp_common.parse_freq(str(val))
if _freq > max or _freq < min:
raise InvalidValueError("Frequency %s is not with in %s-%s" %
(chirp_common.format_freq(_freq),
chirp_common.format_freq(min),
chirp_common.format_freq(max)))
return _freq/100000 & 0xFFFF
def short2freq(freq):
"""
Convert a short frequency to a string for UI data entry/display
This is stored in the radio as units of 10KHz which we
compensate to Hz.
A value of -1 indicates <no frequency>, i.e. unused channel.
"""
if (int(freq) > 0):
f = chirp_common.format_freq(freq * 100000)
return f
else:
return ""
def tone2short(t):
"""Convert a string tone or DCS to an encoded u16
"""
tone = str(t)
if tone == "----":
u16tone = 0x0000
elif tone[0] == 'D': # This is a DCS code
c = tone[1: -1]
code = int(c, 8)
if tone[-1] == 'I':
code |= 0x4000
u16tone = code | 0x8000
else: # This is an analog CTCSS
u16tone = int(tone[0:-2]+tone[-1]) & 0xffff # strip the '.'
return u16tone
def short2tone(tone):
""" Map a binary CTCSS/DCS to a string name for the tone
"""
if tone == 0 or tone == 0xffff:
ret = "----"
else:
code = tone & 0x3fff
if tone & 0x8000: # This is a DCS
if tone & 0x4000: # This is an inverse code
ret = "D%0.3oI" % code
else:
ret = "D%0.3oN" % code
else: # Just plain old analog CTCSS
ret = "%4.1f" % (code / 10.0)
return ret
def callid2str(cid):
"""Caller ID per MDC-1200 spec? Must be 3-6 digits (100 - 999999).
One digit (binary) per byte, terminated with '0xc'
"""
bin2ascii = " 1234567890"
cidstr = ""
for i in range(0, 6):
b = cid[i].get_value()
if b == 0xc: # the cid EOL
break
if b == 0 or b > 0xa:
raise InvalidValueError(
"Caller ID code has illegal byte 0x%x" % b)
cidstr += bin2ascii[b]
return cidstr
def str2callid(val):
""" Convert caller id strings from callid2str.
"""
ascii2bin = "0123456789"
s = str(val).strip()
if len(s) < 3 or len(s) > 6:
raise InvalidValueError(
"Caller ID must be at least 3 and no more than 6 digits")
if s[0] == '0':
raise InvalidValueError(
"First digit of a Caller ID cannot be a zero '0'")
blk = bytearray()
for c in s:
if c not in ascii2bin:
raise InvalidValueError(
"Caller ID must be all digits 0x%x" % c)
b = (0xa, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9)[int(c)]
blk.append(b)
if len(blk) < 6:
blk.append(0xc) # EOL a short ID
if len(blk) < 6:
for i in range(0, (6 - len(blk))):
blk.append(0xf0)
return blk
def digits2str(digits, padding=' ', width=6):
"""Convert a password or SCC digit string to a string
Passwords are expanded to and must be 6 chars. Fill them with '0'
"""
bin2ascii = "0123456789"
digitsstr = ""
for i in range(0, 6):
b = digits[i].get_value()
if b == 0xc: # the digits EOL
break
if b >= 0xa:
raise InvalidValueError(
"Value has illegal byte 0x%x" % ord(b))
digitsstr += bin2ascii[b]
digitsstr = digitsstr.ljust(width, padding)
return digitsstr
def str2digits(val):
""" Callback for edited strings from digits2str.
"""
ascii2bin = " 0123456789"
s = str(val).strip()
if len(s) < 3 or len(s) > 6:
raise InvalidValueError(
"Value must be at least 3 and no more than 6 digits")
blk = bytearray()
for c in s:
if c not in ascii2bin:
raise InvalidValueError("Value must be all digits 0x%x" % c)
blk.append(int(c))
for i in range(len(blk), 6):
blk.append(0xc) # EOL a short ID
return blk
def name2str(name):
""" Convert a callid or scan group name to a string
Deal with fixed field padding (\0 or \0xff)
"""
namestr = ""
for i in range(0, len(name)):
b = ord(name[i].get_value())
if b != 0 and b != 0xff:
namestr += chr(b)
return namestr
def str2name(val, size=6, fillchar='\0', emptyfill='\0'):
""" Convert a string to a name. A name is a 6 element bytearray
with ascii chars.
"""
val = str(val).rstrip(' \t\r\n\0\0xff')
if len(val) == 0:
name = "".ljust(size, emptyfill)
else:
name = val.ljust(size, fillchar)
return name
def pw2str(pw):
"""Convert a password string (6 digits) to a string
Passwords must be 6 digits. If it is shorter, pad right with '0'
"""
pwstr = ""
ascii2bin = "0123456789"
for i in range(0, len(pw)):
b = pw[i].get_value()
if b not in ascii2bin:
raise InvalidValueError("Value must be digits 0-9")
pwstr += b
pwstr = pwstr.ljust(6, '0')
return pwstr
def str2pw(val):
"""Store a password from UI to memory obj
If we clear the password (make it go away), change the
empty string to '000000' since the radio must have *something*
Also, fill a < 6 digit pw with 0's
"""
ascii2bin = "0123456789"
val = str(val).rstrip(' \t\r\n\0\0xff')
if len(val) == 0: # a null password
val = "000000"
for i in range(0, len(val)):
b = val[i]
if b not in ascii2bin:
raise InvalidValueError("Value must be digits 0-9")
if len(val) == 0:
pw = "".ljust(6, '\0')
else:
pw = val.ljust(6, '0')
return pw
# Helpers to replace python2 things like confused str/byte
def _hex_print(data, addrfmt=None):
"""Return a hexdump-like encoding of @data
We expect data to be a bytearray, not a string.
Expanded from borrowed code to use the first 2 bytes as the address
per comm packet format.
"""
if addrfmt is None:
addrfmt = '%(addr)03i'
addr = 0
else: # assume first 2 bytes are address
a = struct.unpack(">H", data[0:2])
addr = a[0]
data = data[2:]
block_size = 16
lines = (len(data) / block_size)
if (len(data) % block_size > 0):
lines += 1
out = ""
left = len(data)
for block in range(0, lines):
addr += block * block_size
try:
out += addrfmt % locals()
except (OverflowError, ValueError, TypeError, KeyError):
out += "%03i" % addr
out += ': '
if left < block_size:
limit = left
else:
limit = block_size
for j in range(0, block_size):
if (j < limit):
out += "%02x " % data[(block * block_size) + j]
else:
out += " "
out += " "
for j in range(0, block_size):
if (j < limit):
_byte = data[(block * block_size) + j]
if _byte >= 0x20 and _byte < 0x7F:
out += "%s" % chr(_byte)
else:
out += "."
else:
out += " "
out += "\n"
if (left > block_size):
left -= block_size
return out
# Useful UI lists
STEPS = [2.5, 5.0, 6.25, 10.0, 12.5, 25.0, 50.0, 100.0]
S_TONES = [str(x) for x in [1000, 1450, 1750, 2100]]
STEP_LIST = [str(x)+"kHz" for x in STEPS]
ROGER_LIST = ["Off", "Begin", "End", "Both"]
TIMEOUT_LIST = [str(x) + "s" for x in range(15, 601, 15)]
TOA_LIST = ["Off"] + ["%ds" % t for t in range(1, 10)]
BANDWIDTH_LIST = ["Wide", "Narrow"]
LANGUAGE_LIST = ["English", "Chinese"]
PF1KEY_LIST = ["OFF", "call id", "r-alarm", "SOS", "SF-TX"]
PF2KEY_LIST = ["OFF", "Scan", "Second", "lamp", "SDF-DIR", "K-lamp"]
PF3KEY_LIST = ["OFF", "Call ID", "R-ALARM", "SOS", "SF-TX"]
WORKMODE_LIST = ["VFO freq", "Channel No.", "Ch. No.+Freq.",
"Ch. No.+Name"]
BACKLIGHT_LIST = ["Off"] + ["%sS" % t for t in range(1, 31)] + \
["Always On"]
SAVE_MODES = ["Off", "1", "2", "3", "4"]
LOCK_MODES = ["key-lk", "key+pg", "key+ptt", "all"]
APO_TIMES = ["Off"] + ["%dm" % t for t in range(15, 151, 15)]
OFFSET_LIST = ["none", "+", "-"]
PONMSG_LIST = ["Battery Volts", "Bitmap"]
SPMUTE_LIST = ["QT", "QT*T", "QT&T"]
DTMFST_LIST = ["Off", "DT-ST", "ANI-ST", "DT-ANI"]
DTMF_TIMES = ["%d" % x for x in range(80, 501, 20)]
PTTID_LIST = ["Off", "Begin", "End", "Both"]
ID_DLY_LIST = ["%dms" % t for t in range(100, 3001, 100)]
VOX_GRDS = ["Off"] + ["%dlevel" % l for l in range(1, 11)]
VOX_DLYS = ["Off"] + ["%ds" % t for t in range(1, 5)]
RPT_KPTS = ["Off"] + ["%dms" % t for t in range(100, 5001, 100)]
LIST_1_5 = ["%s" % x for x in range(1, 6)]
LIST_0_9 = ["%s" % x for x in range(0, 10)]
LIST_1_20 = ["%s" % x for x in range(1, 21)]
LIST_OFF_10 = ["Off"] + ["%s" % x for x in range(1, 11)]
SCANGRP_LIST = ["All"] + ["%s" % x for x in range(1, 11)]
SCANMODE_LIST = ["TO", "CO", "SE"]
SCANRANGE_LIST = ["Current band", "freq range", "ALL"]
SCQT_LIST = ["Decoder", "Encoder", "Both"]
S_MUTE_LIST = ["off", "rx mute", "tx mute", "r/t mute"]
POWER_LIST = ["Low", "Med", "High"]
RPTMODE_LIST = ["Radio", "One direction Repeater",
"Two direction repeater"]
TONE_LIST = ["----"] + ["%s" % str(t) for t in chirp_common.TONES] + \
["D%0.3dN" % dts for dts in chirp_common.DTCS_CODES] + \
["D%0.3dI" % dts for dts in chirp_common.DTCS_CODES]
@directory.register
class KGUV9DPlusRadio(chirp_common.CloneModeRadio,
chirp_common.ExperimentalRadio):
"""Wouxun KG-UV9D Plus"""
VENDOR = "Wouxun"
MODEL = "KG-UV9D Plus"
_model = "KG-UV9D"
_rev = "00" # default rev for the radio I know about...
_file_ident = "kg-uv9d"
BAUD_RATE = 19200
POWER_LEVELS = [chirp_common.PowerLevel("L", watts=1),
chirp_common.PowerLevel("M", watts=2),
chirp_common.PowerLevel("H", watts=5)]
_mmap = ""
def _read_record(self):
""" Read and validate the header of a radio reply.
A record is a formatted byte stream as follows:
0x7D All records start with this
opcode This is in the set of legal commands.
The radio reply matches the request
dir This is the direction, 0xFF to the radio,
0x00 from the radio.
cnt Count of bytes in payload
(not including the trailing checksum byte)
<cnt bytes>
<checksum byte>
"""
# first get the header and validate it
data = bytearray(self.pipe.read(4))
if (len(data) < 4):
raise errors.RadioError('Radio did not respond')
if (data[0] != 0x7D):
raise errors.RadioError(
'Radio reply garbled (%02x)' % data[0])
if (data[1] not in cmd_name):
raise errors.RadioError(
"Unrecognized opcode (%02x)" % data[1])
if (data[2] != 0x00):
raise errors.RadioError(
"Direction incorrect. Got (%02x)" % data[2])
payload_len = data[3]
# don't forget to read the checksum byte
data.extend(self.pipe.read(payload_len + 1))
if (len(data) != (payload_len + 5)): # we got a short read
raise errors.RadioError(
"Radio reply wrong size. Wanted %d, got %d" %
((payload_len + 1), (len(data) - 4)))
return _pkt_decode(data)
def _write_record(self, cmd, payload=None):
""" Write a request packet to the radio.
"""
packet = _pkt_encode(cmd, payload)
self.pipe.write(packet)
@classmethod
def match_model(cls, filedata, filename):
"""Look for bits in the file image and see if it looks
like ours...
TODO: there is a bunch of rubbish between 0x50 and 0x160
that is still a known unknown
"""
return cls._file_ident in filedata[0x51:0x59].lower()
def _identify(self):
""" Identify the radio
The ident block identifies the radio and its capabilities.
This block is always 78 bytes. The rev == '01' is the base
radio and '02' seems to be the '-Plus' version.
I don't really trust the content after the model and revision.
One would assume this is pretty much constant data but I have
seen differences between my radio and the dump named
KG-UV9D-Plus-OutOfBox-Read.txt from bug #3509. The first
five bands match the OEM windows
app except the 350-400 band. The OOB trace has the 700MHz
band different. This is speculation at this point.
TODO: This could be smarter and reject a radio not actually
a UV9D...
"""
for _i in range(0, 10): # retry 10 times if we get junk
self._write_record(CMD_IDENT)
chksum_match, op, _resp = self._read_record()
if len(_resp) == 0:
raise Exception("Radio not responding")
if len(_resp) != 74:
LOG.error(
"Expected and IDENT reply of 78 bytes. Got (%d)" %
len(_resp))
continue
if not chksum_match:
LOG.error("Checksum error: retrying ident...")
time.sleep(0.100)
continue
if op != CMD_IDENT:
LOG.error("Expected IDENT reply. Got (%02x)" % op)
continue
LOG.debug("Got:\n%s" % _hex_print(_resp))
(mod, rev) = struct.unpack(">7s2s", _resp[0:9])
LOG.debug("Model %s, rev %s" % (mod, rev))
if mod == self._model:
self._rev = rev
return
else:
raise Exception("Unable to identify radio")
raise Exception("All retries to identify failed")
def process_mmap(self):
if self._rev == "02" or self._rev == "00":
self._memobj = bitwise.parse(_MEM_FORMAT02, self._mmap)
else: # this is where you elif the other variants and non-Plus radios
raise errors.RadioError(
"Unrecognized model variation (%s). No memory map for it" %
self._rev)
def sync_in(self):
""" Public sync_in
Download contents of the radio. Throw errors back
to the core if the radio does not respond.
"""
try:
self._identify()
self._mmap = self._do_download()
self._write_record(CMD_HANGUP)
except errors.RadioError:
raise
except Exception, e:
LOG.exception('Unknown error during download process')
raise errors.RadioError(
"Failed to communicate with radio: %s" % e)
self.process_mmap()
def sync_out(self):
""" Public sync_out
Upload the modified memory image into the radio.
"""
try:
self._identify()
self._do_upload()
self._write_record(CMD_HANGUP)
except errors.RadioError:
raise
except Exception, e:
raise errors.RadioError(
"Failed to communicate with radio: %s" % e)
return
def _do_download(self):
""" Read the whole of radio memory in 64 byte chunks.
We load the config space followed by loading memory channels.
The radio seems to be a "clone" type and the memory channels
are actually within the config space. There are separate
commands (CMD_RCHAN, CMD_WCHAN) for reading channel memory but
these seem to be a hack that can only do 4 channels at a time.
Since the radio only supports 999, (can only support 3 chars
in the display UI?) although the vendors app reads 1000
channels, it hacks back to config writes (CMD_WCONF) for the
last 3 channels and names. We keep it simple and just read
the whole thing even though the vendor app doesn't. Channels
are separate in their app simply because the radio protocol
has read/write commands to access it. What they do is simply
marshal the frequency+mode bits in 4 channel chunks followed
by a separate chunk of for names. In config space, they are two
separate arrays 1..999. Given that this space is not a
multiple of 4, there is hackery on upload to do the writes to
config space. See upload for this.
"""
mem = bytearray(0x8000) # The radio's memory map is 32k
for addr in range(0, 0x8000, 64):
req = bytearray(struct.pack(">HB", addr, 64))
self._write_record(CMD_RCONF, req)
chksum_match, op, resp = self._read_record()
if not chksum_match:
LOG.debug(_hex_print(resp))
raise Exception(
"Checksum error while reading configuration (0x%x)" %
addr)
pa = struct.unpack(">H", resp[0:2])
pkt_addr = pa[0]
payload = resp[2:]
if op != CMD_RCONF or addr != pkt_addr:
raise Exception(
"Expected CMD_RCONF (%x) reply. Got (%02x: %x)" %
(addr, op, pkt_addr))
LOG.debug("Config read (0x%x):\n%s" %
(addr, _hex_print(resp, '0x%(addr)04x')))
for i in range(0, len(payload) - 1):
mem[addr + i] = payload[i]
if self.status_fn:
status = chirp_common.Status()
status.cur = addr
status.max = 0x8000
status.msg = "Cloning from radio"
self.status_fn(status)
strmem = "".join([chr(x) for x in mem])
return memmap.MemoryMap(strmem)
def _do_upload(self):
"""Walk through the config map and write updated records to
the radio. The config map contains only the regions we know
about. We don't use the channel memory commands to avoid the
hackery of using config write commands to fill in the last
3 channel memory and names slots. As we discover other useful
goodies in the map, we can add more slots...
"""
for ar, size, count in config_map:
for addr in range(ar, ar + (size*count), size):
req = bytearray(struct.pack(">H", addr))
req.extend(self.get_mmap()[addr:addr + size])
self._write_record(CMD_WCONF, req)
LOG.debug("Config write (0x%x):\n%s" %
(addr, _hex_print(req)))
chksum_match, op, ack = self._read_record()
LOG.debug("Config write ack [%x]\n%s" %
(addr, _hex_print(ack)))
a = struct.unpack(">H", ack) # big endian short...
ack = a[0]
if not chksum_match or op != CMD_WCONF or addr != ack:
msg = ""
if not chksum_match:
msg += "Checksum err, "
if op != CMD_WCONF:
msg += "cmd mismatch %x != %x, " % \
(op, CMD_WCONF)
if addr != ack:
msg += "ack error %x != %x, " % (addr, ack)
raise Exception("Radio did not ack block: %s error" % msg)
if self.status_fn:
status = chirp_common.Status()
status.cur = addr
status.max = 0x8000
status.msg = "Update radio"
self.status_fn(status)
def get_features(self):
""" Public get_features
Return the features of this radio once we have identified
it and gotten its bits
"""
rf = chirp_common.RadioFeatures()
rf.has_settings = True
rf.has_ctone = True
rf.has_rx_dtcs = True
rf.has_cross = True
rf.has_tuning_step = False
rf.has_bank = False
rf.can_odd_split = True
rf.valid_skips = ["", "S"]
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS", "Cross"]
rf.valid_cross_modes = [
"Tone->Tone",
"Tone->DTCS",
"DTCS->Tone",
"DTCS->",
"->Tone",
"->DTCS",
"DTCS->DTCS",
]
rf.valid_modes = ["FM", "NFM", "AM"]
rf.valid_power_levels = self.POWER_LEVELS
rf.valid_name_length = 8
rf.valid_duplexes = ["", "-", "+", "split", "off"]
rf.valid_bands = [(108000000, 136000000), # Aircraft AM
(136000000, 180000000), # supports 2m
(230000000, 250000000),
(350000000, 400000000),
(400000000, 520000000), # supports 70cm
(700000000, 985000000)]
rf.valid_characters = chirp_common.CHARSET_ASCII
rf.valid_tuning_steps = STEPS
rf.memory_bounds = (1, 999) # 999 memories
return rf
@classmethod
def get_prompts(cls):
rp = chirp_common.RadioPrompts()
rp.experimental = ("This radio driver is currently under development. "
"There are no known issues with it, but you should "
"proceed with caution.")
return rp
def get_raw_memory(self, number):
return repr(self._memobj.chan_blk[number])
def _get_tone(self, _mem, mem):
"""Decode both the encode and decode CTSS/DCS codes from
the memory channel and stuff them into the UI
memory channel row.
"""
txtone = short2tone(_mem.encQT)
rxtone = short2tone(_mem.decQT)
pt = "N"
pr = "N"
if txtone == "----":
txmode = ""
elif txtone[0] == "D":
mem.dtcs = int(txtone[1:4])
if txtone[4] == "I":
pt = "R"
txmode = "DTCS"
else:
mem.rtone = float(txtone)
txmode = "Tone"
if rxtone == "----":
rxmode = ""
elif rxtone[0] == "D":
mem.rx_dtcs = int(rxtone[1:4])
if rxtone[4] == "I":
pr = "R"
rxmode = "DTCS"
else:
mem.ctone = float(rxtone)
rxmode = "Tone"
if txmode == "Tone" and len(rxmode) == 0:
mem.tmode = "Tone"
elif (txmode == rxmode and txmode == "Tone" and
mem.rtone == mem.ctone):
mem.tmode = "TSQL"
elif (txmode == rxmode and txmode == "DTCS" and
mem.dtcs == mem.rx_dtcs):
mem.tmode = "DTCS"
elif (len(rxmode) + len(txmode)) > 0:
mem.tmode = "Cross"
mem.cross_mode = "%s->%s" % (txmode, rxmode)
mem.dtcs_polarity = pt + pr
LOG.debug("_get_tone: Got TX %s (%i) RX %s (%i)" %
(txmode, _mem.encQT, rxmode, _mem.decQT))
def get_memory(self, number):
""" Public get_memory
Return the channel memory referenced by number to the UI.
"""
_mem = self._memobj.chan_blk[number - 1]
_nam = self._memobj.chan_name[number - 1]
mem = chirp_common.Memory()
mem.number = number
_valid = _mem.state
if _valid != MEM_VALID and _valid != 0 and _valid != 2:
# In Issue #6995 we can find _valid values of 0 and 2 in the IMG
# so these values should be treated like MEM_VALID.
mem.empty = True
return mem
else:
mem.empty = False
mem.freq = int(_mem.rxfreq) * 10
if _mem.txfreq == 0xFFFFFFFF:
# TX freq not set
mem.duplex = "off"
mem.offset = 0
elif int(_mem.rxfreq) == int(_mem.txfreq):
mem.duplex = ""
mem.offset = 0
elif abs(int(_mem.rxfreq) * 10 - int(_mem.txfreq) * 10) > 70000000:
mem.duplex = "split"
mem.offset = int(_mem.txfreq) * 10
else:
mem.duplex = int(_mem.rxfreq) > int(_mem.txfreq) and "-" or "+"
mem.offset = abs(int(_mem.rxfreq) - int(_mem.txfreq)) * 10
mem.name = name2str(_nam.name)
self._get_tone(_mem, mem)
mem.skip = "" if bool(_mem.scan) else "S"
mem.power = self.POWER_LEVELS[_mem.pwr]
if _mem.mod == 1:
mem.mode = "AM"
elif _mem.fm_dev == 0:
mem.mode = "FM"
else:
mem.mode = "NFM"
# qt has no home in the UI
return mem
def _set_tone(self, mem, _mem):
"""Update the memory channel block CTCC/DCS tones
from the UI fields
"""
def _set_dcs(code, pol):
val = int("%i" % code, 8) | 0x8000
if pol == "R":
val |= 0x4000
return val
rx_mode = tx_mode = None
rxtone = txtone = 0x0000
if mem.tmode == "Tone":
tx_mode = "Tone"
txtone = int(mem.rtone * 10)
elif mem.tmode == "TSQL":
rx_mode = tx_mode = "Tone"
rxtone = txtone = int(mem.ctone * 10)
elif mem.tmode == "DTCS":
tx_mode = rx_mode = "DTCS"
txtone = _set_dcs(mem.dtcs, mem.dtcs_polarity[0])
rxtone = _set_dcs(mem.dtcs, mem.dtcs_polarity[1])
elif mem.tmode == "Cross":
tx_mode, rx_mode = mem.cross_mode.split("->")
if tx_mode == "DTCS":
txtone = _set_dcs(mem.dtcs, mem.dtcs_polarity[0])
elif tx_mode == "Tone":
txtone = int(mem.rtone * 10)
if rx_mode == "DTCS":
rxtone = _set_dcs(mem.rx_dtcs, mem.dtcs_polarity[1])
elif rx_mode == "Tone":
rxtone = int(mem.ctone * 10)
_mem.decQT = rxtone
_mem.encQT = txtone
LOG.debug("Set TX %s (%i) RX %s (%i)" %
(tx_mode, _mem.encQT, rx_mode, _mem.decQT))
def set_memory(self, mem):
""" Public set_memory
Inverse of get_memory. Update the radio memory image
from the mem object
"""
number = mem.number
_mem = self._memobj.chan_blk[number - 1]
_nam = self._memobj.chan_name[number - 1]
if mem.empty:
_mem.set_raw("\xFF" * (_mem.size() / 8))
_nam.name = str2name("", 8, '\0', '\0')
_mem.state = MEM_INVALID
return
_mem.rxfreq = int(mem.freq / 10)
if mem.duplex == "off":
_mem.txfreq = 0xFFFFFFFF
elif mem.duplex == "split":
_mem.txfreq = int(mem.offset / 10)
elif mem.duplex == "+":
_mem.txfreq = int(mem.freq / 10) + int(mem.offset / 10)
elif mem.duplex == "-":
_mem.txfreq = int(mem.freq / 10) - int(mem.offset / 10)
else:
_mem.txfreq = int(mem.freq / 10)
_mem.scan = int(mem.skip != "S")
if mem.mode == "FM":
_mem.mod = 0 # make sure forced AM is off
_mem.fm_dev = 0
elif mem.mode == "NFM":
_mem.mod = 0
_mem.fm_dev = 1
elif mem.mode == "AM":
_mem.mod = 1 # AM on
_mem.fm_dev = 1 # set NFM bandwidth
else:
_mem.mod = 0
_mem.fm_dev = 0 # Catchall default is FM
# set the tone
self._set_tone(mem, _mem)
# set the power
if mem.power:
_mem.pwr = self.POWER_LEVELS.index(mem.power)
else:
_mem.pwr = True
# Set fields we can't access via the UI table to safe defaults
_mem.qt = 0 # mute mode to QT
_nam.name = str2name(mem.name, 8, '\0', '\0')
_mem.state = MEM_VALID
# Build the UI configuration tabs
# the channel memory tab is built by the core.
# We have no control over it
def _core_tab(self):
""" Build Core Configuration tab
Radio settings common to all modes and areas go here.
"""
s = self._memobj.settings
cf = RadioSettingGroup("cfg_grp", "Configuration")
cf.append(RadioSetting("auto_am",
"Auto detect AM(53)",
RadioSettingValueBoolean(s.auto_am)))
cf.append(RadioSetting("qt_sw",
"Scan tone detect(59)",
RadioSettingValueBoolean(s.qt_sw)))
cf.append(
RadioSetting("s_mute",
"SubFreq Mute(60)",
RadioSettingValueList(S_MUTE_LIST,
S_MUTE_LIST[s.s_mute])))
cf.append(
RadioSetting("tot",
"Transmit timeout Timer(10)",
RadioSettingValueList(TIMEOUT_LIST,
TIMEOUT_LIST[s.tot])))
cf.append(
RadioSetting("toa",
"Transmit Timeout Alarm(11)",
RadioSettingValueList(TOA_LIST,
TOA_LIST[s.toa])))
cf.append(
RadioSetting("ptt_id",
"PTT Caller ID mode(23)",
RadioSettingValueList(PTTID_LIST,
PTTID_LIST[s.ptt_id])))
cf.append(
RadioSetting("id_dly",
"Caller ID Delay time(25)",
RadioSettingValueList(ID_DLY_LIST,
ID_DLY_LIST[s.id_dly])))
cf.append(RadioSetting("voice_sw",
"Voice Guide(12)",
RadioSettingValueBoolean(s.voice_sw)))
cf.append(RadioSetting("beep",
"Keypad Beep(13)",
RadioSettingValueBoolean(s.beep)))
cf.append(
RadioSetting("s_tone",
"Side Tone(36)",
RadioSettingValueList(S_TONES,
S_TONES[s.s_tone])))
cf.append(
RadioSetting("ring_time",
"Ring Time(26)",
RadioSettingValueList(
LIST_OFF_10,
LIST_OFF_10[s.ring_time])))
cf.append(
RadioSetting("roger",
"Roger Beep(9)",
RadioSettingValueList(ROGER_LIST,
ROGER_LIST[s.roger])))
cf.append(RadioSetting("blcdsw",
"Backlight(41)",
RadioSettingValueBoolean(s.blcdsw)))
cf.append(
RadioSetting("abr",
"Auto Backlight Time(1)",
RadioSettingValueList(BACKLIGHT_LIST,
BACKLIGHT_LIST[s.abr])))
cf.append(
RadioSetting("abr_lvl",
"Backlight Brightness(27)",
RadioSettingValueList(LIST_1_5,
LIST_1_5[s.abr_lvl])))
cf.append(RadioSetting("lock",
"Keypad Lock",
RadioSettingValueBoolean(s.lock)))
cf.append(
RadioSetting("lock_m",
"Keypad Lock Mode(35)",
RadioSettingValueList(LOCK_MODES,
LOCK_MODES[s.lock_m])))
cf.append(RadioSetting("auto_lk",
"Keypad Autolock(34)",
RadioSettingValueBoolean(s.auto_lk)))
cf.append(RadioSetting("prich_sw",
"Priority Channel Scan(33)",
RadioSettingValueBoolean(s.prich_sw)))
cf.append(RadioSetting("pri_ch",
"Priority Channel(32)",
RadioSettingValueInteger(1, 999,
s.pri_ch)))
cf.append(
RadioSetting("dtmf_st",
"DTMF Sidetone(22)",
RadioSettingValueList(DTMFST_LIST,
DTMFST_LIST[s.dtmf_st])))
cf.append(RadioSetting("sc_qt",
"Scan QT Save Mode(38)",
RadioSettingValueList(
SCQT_LIST,
SCQT_LIST[s.sc_qt])))
cf.append(
RadioSetting("apo_tmr",
"Automatic Power-off(39)",
RadioSettingValueList(APO_TIMES,
APO_TIMES[s.apo_tmr])))
cf.append( # VOX "guard" is really VOX trigger audio level
RadioSetting("vox_grd",
"VOX level(7)",
RadioSettingValueList(VOX_GRDS,
VOX_GRDS[s.vox_grd])))
cf.append(
RadioSetting("vox_dly",
"VOX Delay(37)",
RadioSettingValueList(VOX_DLYS,
VOX_DLYS[s.vox_dly])))
cf.append(
RadioSetting("lang",
"Menu Language(14)",
RadioSettingValueList(LANGUAGE_LIST,
LANGUAGE_LIST[s.lang])))
cf.append(RadioSetting("ponmsg",
"Poweron message(40)",
RadioSettingValueList(
PONMSG_LIST, PONMSG_LIST[s.ponmsg])))
cf.append(RadioSetting("bledsw",
"Receive LED(42)",
RadioSettingValueBoolean(s.bledsw)))
return cf
def _repeater_tab(self):
"""Repeater mode functions
"""
s = self._memobj.settings
cf = RadioSettingGroup("repeater", "Repeater Functions")
cf.append(
RadioSetting("type_set",
"Radio Mode(43)",
RadioSettingValueList(
RPTMODE_LIST,
RPTMODE_LIST[s.type_set])))
cf.append(RadioSetting("rpt_ptt",
"Repeater PTT(45)",
RadioSettingValueBoolean(s.rpt_ptt)))
cf.append(RadioSetting("rpt_spk",
"Repeater Mode Speaker(44)",
RadioSettingValueBoolean(s.rpt_spk)))
cf.append(
RadioSetting("rpt_kpt",
"Repeater Hold Time(46)",
RadioSettingValueList(RPT_KPTS,
RPT_KPTS[s.rpt_kpt])))
cf.append(RadioSetting("rpt_rct",
"Repeater Receipt Tone(47)",
RadioSettingValueBoolean(s.rpt_rct)))
return cf
def _admin_tab(self):
"""Admin functions not present in radio menu...
These are admin functions not radio operation configuration
"""
def apply_cid(setting, obj):
c = str2callid(setting.value)
obj.code = c
def apply_scc(setting, obj):
c = str2digits(setting.value)
obj.scc = c
def apply_mode_sw(setting, obj):
pw = str2pw(setting.value)
obj.mode_sw = pw
setting.value = pw2str(obj.mode_sw)
def apply_reset(setting, obj):
pw = str2pw(setting.value)
obj.reset = pw
setting.value = pw2str(obj.reset)
def apply_wake(setting, obj):
obj.wake = int(setting.value)/10
def apply_sleep(setting, obj):
obj.sleep = int(setting.value)/10
pw = self._memobj.passwords # admin passwords
s = self._memobj.settings
cf = RadioSettingGroup("admin", "Admin Functions")
cf.append(RadioSetting("menu_avail",
"Menu available in channel mode",
RadioSettingValueBoolean(s.menu_avail)))
mode_sw = RadioSettingValueString(0, 6,
pw2str(pw.mode_sw), False)
rs = RadioSetting("passwords.mode_sw",
"Mode Switch Password", mode_sw)
rs.set_apply_callback(apply_mode_sw, pw)
cf.append(rs)
cf.append(RadioSetting("reset_avail",
"Radio Reset Available",
RadioSettingValueBoolean(s.reset_avail)))
reset = RadioSettingValueString(0, 6, pw2str(pw.reset), False)
rs = RadioSetting("passwords.reset",
"Radio Reset Password", reset)
rs.set_apply_callback(apply_reset, pw)
cf.append(rs)
cf.append(
RadioSetting("dtmf_tx",
"DTMF Tx Duration",
RadioSettingValueList(DTMF_TIMES,
DTMF_TIMES[s.dtmf_tx])))
cid = self._memobj.my_callid
my_callid = RadioSettingValueString(3, 6,
callid2str(cid.code), False)
rs = RadioSetting("my_callid.code",
"PTT Caller ID code(24)", my_callid)
rs.set_apply_callback(apply_cid, cid)
cf.append(rs)
stun = self._memobj.stun
st = RadioSettingValueString(0, 6, digits2str(stun.scc), False)
rs = RadioSetting("stun.scc", "Security code", st)
rs.set_apply_callback(apply_scc, stun)
cf.append(rs)
cf.append(
RadioSetting("settings.save_m",
"Save Mode (2)",
RadioSettingValueList(SAVE_MODES,
SAVE_MODES[s.save_m])))
for i in range(0, 4):
sm = self._memobj.save[i]
wake = RadioSettingValueInteger(0, 18000, sm.wake * 10, 1)
wf = RadioSetting("save[%i].wake" % i,
"Save Mode %d Wake Time" % (i+1), wake)
wf.set_apply_callback(apply_wake, sm)
cf.append(wf)
slp = RadioSettingValueInteger(0, 18000, sm.sleep * 10, 1)
wf = RadioSetting("save[%i].sleep" % i,
"Save Mode %d Sleep Time" % (i+1), slp)
wf.set_apply_callback(apply_sleep, sm)
cf.append(wf)
_msg = str(self._memobj.display.banner).split("\0")[0]
val = RadioSettingValueString(0, 16, _msg)
val.set_mutable(True)
cf.append(RadioSetting("display.banner",
"Display Message", val))
return cf
def _fm_tab(self):
"""FM Broadcast channels
"""
def apply_fm(setting, obj):
f = freq2short(setting.value, 76000000, 108000000)
obj.fm_freq = f
fm = RadioSettingGroup("fm_chans", "FM Broadcast")
for ch in range(0, 20):
chan = self._memobj.fm_chans[ch]
freq = RadioSettingValueString(0, 20,
short2freq(chan.fm_freq))
rs = RadioSetting("fm_%d" % (ch + 1),
"FM Channel %d" % (ch + 1), freq)
rs.set_apply_callback(apply_fm, chan)
fm.append(rs)
return fm
def _scan_grp(self):
"""Scan groups
"""
def apply_name(setting, obj):
name = str2name(setting.value, 8, '\0', '\0')
obj.name = name
def apply_start(setting, obj):
"""Do a callback to deal with RadioSettingInteger limitation
on memory address resolution
"""
obj.scan_st = int(setting.value)
def apply_end(setting, obj):
"""Do a callback to deal with RadioSettingInteger limitation
on memory address resolution
"""
obj.scan_end = int(setting.value)
sgrp = self._memobj.scn_grps
scan = RadioSettingGroup("scn_grps", "Channel Scanner Groups")
for i in range(0, 10):
s_grp = sgrp.addrs[i]
s_name = sgrp.names[i]
rs_name = RadioSettingValueString(0, 8,
name2str(s_name.name))
rs = RadioSetting("scn_grps.names[%i].name" % i,
"Group %i Name" % (i + 1), rs_name)
rs.set_apply_callback(apply_name, s_name)
scan.append(rs)
rs_st = RadioSettingValueInteger(1, 999, s_grp.scan_st)
rs = RadioSetting("scn_grps.addrs[%i].scan_st" % i,
"Starting Channel", rs_st)
rs.set_apply_callback(apply_start, s_grp)
scan.append(rs)
rs_end = RadioSettingValueInteger(1, 999, s_grp.scan_end)
rs = RadioSetting("scn_grps.addrs[%i].scan_end" % i,
"Last Channel", rs_end)
rs.set_apply_callback(apply_end, s_grp)
scan.append(rs)
return scan
def _callid_grp(self):
"""Caller IDs to be recognized by radio
This really should be a table in the UI
"""
def apply_callid(setting, obj):
c = str2callid(setting.value)
obj.cid = c
def apply_name(setting, obj):
name = str2name(setting.value, 6, '\0', '\xff')
obj.name = name
cid = RadioSettingGroup("callids", "Caller IDs")
for i in range(0, 20):
callid = self._memobj.call_ids[i]
name = self._memobj.cid_names[i]
c_name = RadioSettingValueString(0, 6, name2str(name.name))
rs = RadioSetting("cid_names[%i].name" % i,
"Caller ID %i Name" % (i + 1), c_name)
rs.set_apply_callback(apply_name, name)
cid.append(rs)
c_id = RadioSettingValueString(0, 6,
callid2str(callid.cid),
False)
rs = RadioSetting("call_ids[%i].cid" % i,
"Caller ID Code", c_id)
rs.set_apply_callback(apply_callid, callid)
cid.append(rs)
return cid
def _band_tab(self, area, band):
""" Build a band tab inside a VFO/Area
"""
def apply_freq(setting, lo, hi, obj):
f = freq2int(setting.value, lo, hi)
obj.freq = f/10
def apply_offset(setting, obj):
f = freq2int(setting.value, 0, 5000000)
obj.offset = f/10
def apply_enc(setting, obj):
t = tone2short(setting.value)
obj.encqt = t
def apply_dec(setting, obj):
t = tone2short(setting.value)
obj.decqt = t
if area == "a":
if band == 150:
c = self._memobj.vfo_a.band_150
lo = 108000000
hi = 180000000
elif band == 200:
c = self._memobj.vfo_a.band_200
lo = 230000000
hi = 250000000
elif band == 300:
c = self._memobj.vfo_a.band_300
lo = 350000000
hi = 400000000
elif band == 450:
c = self._memobj.vfo_a.band_450
lo = 400000000
hi = 512000000
else: # 700
c = self._memobj.vfo_a.band_700
lo = 700000000
hi = 985000000
else: # area 'b'
if band == 150:
c = self._memobj.vfo_b.band_150
lo = 136000000
hi = 180000000
else: # 450
c = self._memobj.vfo_b.band_450
lo = 400000000
hi = 512000000
prefix = "vfo_%s.band_%d" % (area, band)
bf = RadioSettingGroup(prefix, "%dMHz Band" % band)
freq = RadioSettingValueString(0, 15, int2freq(c.freq * 10))
rs = RadioSetting(prefix + ".freq", "Rx Frequency", freq)
rs.set_apply_callback(apply_freq, lo, hi, c)
bf.append(rs)
off = RadioSettingValueString(0, 15, int2freq(c.offset * 10))
rs = RadioSetting(prefix + ".offset", "Tx Offset(28)", off)
rs.set_apply_callback(apply_offset, c)
bf.append(rs)
rs = RadioSetting(prefix + ".encqt",
"Encode QT(17,19)",
RadioSettingValueList(TONE_LIST,
short2tone(c.encqt)))
rs.set_apply_callback(apply_enc, c)
bf.append(rs)
rs = RadioSetting(prefix + ".decqt",
"Decode QT(16,18)",
RadioSettingValueList(TONE_LIST,
short2tone(c.decqt)))
rs.set_apply_callback(apply_dec, c)
bf.append(rs)
bf.append(RadioSetting(prefix + ".qt",
"Mute Mode(21)",
RadioSettingValueList(SPMUTE_LIST,
SPMUTE_LIST[c.qt])))
bf.append(RadioSetting(prefix + ".scan",
"Scan this(48)",
RadioSettingValueBoolean(c.scan)))
bf.append(RadioSetting(prefix + ".pwr",
"Power(5)",
RadioSettingValueList(
POWER_LIST, POWER_LIST[c.pwr])))
bf.append(RadioSetting(prefix + ".mod",
"AM Modulation(54)",
RadioSettingValueBoolean(c.mod)))
bf.append(RadioSetting(prefix + ".fm_dev",
"FM Deviation(4)",
RadioSettingValueList(
BANDWIDTH_LIST,
BANDWIDTH_LIST[c.fm_dev])))
bf.append(
RadioSetting(prefix + ".shift",
"Frequency Shift(6)",
RadioSettingValueList(OFFSET_LIST,
OFFSET_LIST[c.shift])))
return bf
def _area_tab(self, area):
"""Build a VFO tab
"""
def apply_scan_st(setting, scan_lo, scan_hi, obj):
f = freq2short(setting.value, scan_lo, scan_hi)
obj.scan_st = f
def apply_scan_end(setting, scan_lo, scan_hi, obj):
f = freq2short(setting.value, scan_lo, scan_hi)
obj.scan_end = f
if area == "a":
desc = "Area A Settings"
c = self._memobj.a_conf
scan_lo = 108000000
scan_hi = 985000000
scan_rng = self._memobj.settings.a
band_list = (150, 200, 300, 450, 700)
else:
desc = "Area B Settings"
c = self._memobj.b_conf
scan_lo = 136000000
scan_hi = 512000000
scan_rng = self._memobj.settings.b
band_list = (150, 450)
prefix = "%s_conf" % area
af = RadioSettingGroup(prefix, desc)
af.append(
RadioSetting(prefix + ".w_mode",
"Workmode",
RadioSettingValueList(
WORKMODE_LIST,
WORKMODE_LIST[c.w_mode])))
af.append(RadioSetting(prefix + ".w_chan",
"Channel",
RadioSettingValueInteger(1, 999,
c.w_chan)))
af.append(
RadioSetting(prefix + ".scan_grp",
"Scan Group(49)",
RadioSettingValueList(
SCANGRP_LIST,
SCANGRP_LIST[c.scan_grp])))
af.append(RadioSetting(prefix + ".bcl",
"Busy Channel Lock-out(15)",
RadioSettingValueBoolean(c.bcl)))
af.append(
RadioSetting(prefix + ".sql",
"Squelch Level(8)",
RadioSettingValueList(LIST_0_9,
LIST_0_9[c.sql])))
af.append(
RadioSetting(prefix + ".cset",
"Call ID Group(52)",
RadioSettingValueList(LIST_1_20,
LIST_1_20[c.cset])))
af.append(
RadioSetting(prefix + ".step",
"Frequency Step(3)",
RadioSettingValueList(
STEP_LIST, STEP_LIST[c.step])))
af.append(
RadioSetting(prefix + ".scan_mode",
"Scan Mode(20)",
RadioSettingValueList(
SCANMODE_LIST,
SCANMODE_LIST[c.scan_mode])))
af.append(
RadioSetting(prefix + ".scan_range",
"Scan Range(50)",
RadioSettingValueList(
SCANRANGE_LIST,
SCANRANGE_LIST[c.scan_range])))
st = RadioSettingValueString(0, 15,
short2freq(scan_rng.scan_st))
rs = RadioSetting("settings.%s.scan_st" % area,
"Frequency Scan Start", st)
rs.set_apply_callback(apply_scan_st, scan_lo, scan_hi, scan_rng)
af.append(rs)
end = RadioSettingValueString(0, 15,
short2freq(scan_rng.scan_end))
rs = RadioSetting("settings.%s.scan_end" % area,
"Frequency Scan End", end)
rs.set_apply_callback(apply_scan_end, scan_lo, scan_hi,
scan_rng)
af.append(rs)
# Each area has its own set of bands
for band in (band_list):
af.append(self._band_tab(area, band))
return af
def _key_tab(self):
"""Build radio key/button menu
"""
s = self._memobj.settings
kf = RadioSettingGroup("key_grp", "Key Settings")
kf.append(RadioSetting("settings.pf1",
"PF1 Key function(55)",
RadioSettingValueList(
PF1KEY_LIST,
PF1KEY_LIST[s.pf1])))
kf.append(RadioSetting("settings.pf2",
"PF2 Key function(56)",
RadioSettingValueList(
PF2KEY_LIST,
PF2KEY_LIST[s.pf2])))
kf.append(RadioSetting("settings.pf3",
"PF3 Key function(57)",
RadioSettingValueList(
PF3KEY_LIST,
PF3KEY_LIST[s.pf3])))
return kf
def _get_settings(self):
"""Build the radio configuration settings menus
"""
core_grp = self._core_tab()
fm_grp = self._fm_tab()
area_a_grp = self._area_tab("a")
area_b_grp = self._area_tab("b")
key_grp = self._key_tab()
scan_grp = self._scan_grp()
callid_grp = self._callid_grp()
admin_grp = self._admin_tab()
rpt_grp = self._repeater_tab()
core_grp.append(key_grp)
core_grp.append(admin_grp)
core_grp.append(rpt_grp)
group = RadioSettings(core_grp,
area_a_grp,
area_b_grp,
fm_grp,
scan_grp,
callid_grp
)
return group
def get_settings(self):
""" Public build out linkage between radio settings and UI
"""
try:
return self._get_settings()
except Exception:
import traceback
LOG.error("Failed to parse settings: %s",
traceback.format_exc())
return None
def _is_freq(self, element):
"""This is a hack to smoke out whether we need to do
frequency translations for otherwise innocent u16s and u32s
"""
return "rxfreq" in element.get_name() or \
"txfreq" in element.get_name() or \
"scan_st" in element.get_name() or \
"scan_end" in element.get_name() or \
"offset" in element.get_name() or \
"fm_stop" in element.get_name()
def set_settings(self, settings):
""" Public update radio settings via UI callback
A lot of this should be in common code....
"""
for element in settings:
if not isinstance(element, RadioSetting):
LOG.debug("set_settings: not instance %s" %
element.get_name())
self.set_settings(element)
continue
else:
try:
if "." in element.get_name():
bits = element.get_name().split(".")
obj = self._memobj
for bit in bits[:-1]:
# decode an array index
if "[" in bit and "]" in bit:
bit, index = bit.split("[", 1)
index, junk = index.split("]", 1)
index = int(index)
obj = getattr(obj, bit)[index]
else:
obj = getattr(obj, bit)
setting = bits[-1]
else:
obj = self._memobj.settings
setting = element.get_name()
if element.has_apply_callback():
LOG.debug("Using apply callback")
element.run_apply_callback()
else:
LOG.debug("Setting %s = %s" %
(setting, element.value))
if self._is_freq(element):
setattr(obj, setting, int(element.value)/10)
else:
setattr(obj, setting, element.value)
except Exception, e:
LOG.debug("set_settings: Exception with %s" %
element.get_name())
raise
| gpl-3.0 |
stxnext-kindergarten/presence-analyzer-przemekmazurek | bootstrap.py | 299 | 5686 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
to_reload = False
try:
import pkg_resources
import setuptools
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# XXX use a more permanent ez_setup.py URL when available.
exec(urlopen('https://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py'
).read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| mit |
feincms/feincms-links | feincms_links/models.py | 1 | 2131 | from django.db import models
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from feincms.module.page.models import Page
from feincms.admin.item_editor import FeinCMSInline
class Category(models.Model):
name = models.CharField(_('name'), max_length=100)
description = models.CharField(_('description'), max_length=200,
blank=True)
ordering = models.PositiveIntegerField(_('ordering'), default=0)
def __unicode__(self):
return self.name
class Meta:
ordering = ('ordering', 'name')
verbose_name = _('category')
verbose_name_plural = _('categories')
class Link(models.Model):
name = models.CharField(_('name'), max_length=100)
description = models.CharField(_('description'), max_length=200,
blank=True)
url = models.URLField(_('URL'))
category = models.ForeignKey(Category, verbose_name=_('category'))
ordering = models.PositiveIntegerField(_('ordering'), default=0)
class Meta:
ordering = ('ordering', 'name')
verbose_name = _('link')
verbose_name_plural = _('links')
def __unicode__(self):
return self.name
def get_absolute_url(self):
return self.url
class LinkContent(models.Model):
""" Content type which renders all links from a selected category"""
category = models.ForeignKey(Category, blank=True, null=True,
verbose_name=_('category'),
help_text=_('Leave blank to list all categories.'))
class Meta:
abstract = True
verbose_name = _('link list')
verbose_name_plural = _('link lists')
def render(self, **kwargs):
ctx = {'content': self}
if self.category:
ctx['links'] = self.category.link_set.all()
else:
ctx['links'] = Link.objects.order_by(
'category__ordering',
'category__name',
'ordering',
'name',
)
return render_to_string('content/links/default.html', ctx,
context_instance=kwargs.get('context'))
| bsd-3-clause |
gchp/django | tests/delete/tests.py | 10 | 19299 | from __future__ import unicode_literals
from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils.six.moves import range
from .models import (
A, M, MR, R, S, T, Avatar, Base, Child, HiddenUser, HiddenUserProfile,
M2MFrom, M2MTo, MRNull, Parent, RChild, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
RChild.objects.create(r_ptr=r)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (RChild, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (RChild, 1), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_proxied_model_duplicate_queries(self):
"""
#25685 - Deleting instances of a model with existing proxy
classes should not issue multiple queries during cascade
deletion of referring models.
"""
avatar = Avatar.objects.create()
# One query for the Avatar table and a second for the User one.
with self.assertNumQueries(2):
avatar.delete()
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
# However, this doesn't work as child.parent access creates a query,
# and this means we will be generating extra queries (a lot for large
# querysets). This is not a fast-delete problem.
# self.assertNumQueries(2, c.delete)
c.delete()
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
def test_fast_delete_empty_no_update_can_self_select(self):
"""
#25932 - Fast deleting on backends that don't have the
`no_update_can_self_select` feature should work even if the specified
filter doesn't match any row.
"""
with self.assertNumQueries(1):
self.assertEqual(
User.objects.filter(avatar__desc='missing').delete(),
(0, {'delete.User': 0})
)
| bsd-3-clause |
ArthurGarnier/SickRage | lib/feedparser/sgml.py | 21 | 2683 | from __future__ import absolute_import
import re
__all__ = [
'_SGML_AVAILABLE',
'sgmllib',
'charref',
'tagfind',
'attrfind',
'entityref',
'incomplete',
'interesting',
'shorttag',
'shorttagopen',
'starttagopen',
'endbracket',
]
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content sanitizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
| gpl-3.0 |
mszewczy/odoo | addons/account/project/project.py | 273 | 2423 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_journal(osv.osv):
_name = 'account.analytic.journal'
_description = 'Analytic Journal'
_columns = {
'name': fields.char('Journal Name', required=True),
'code': fields.char('Journal Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."),
'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, Odoo will look for a matching journal of the same type."),
'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines', copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': True,
'type': 'general',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
class account_journal(osv.osv):
_inherit="account.journal"
_columns = {
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
WhySoGeeky/DroidPot | venv/lib/python2.7/site-packages/django/contrib/auth/urls.py | 568 | 1036 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import url
from django.contrib.auth import views
urlpatterns = [
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^password_change/$', views.password_change, name='password_change'),
url(r'^password_change/done/$', views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', views.password_reset_complete, name='password_reset_complete'),
]
| mit |
ESOedX/edx-platform | common/djangoapps/util/tests/test_organizations_helpers.py | 2 | 3116 | """
Tests for the organizations helpers library, which is the integration point for the edx-organizations API
"""
from __future__ import absolute_import
import six
from mock import patch
from util import organizations_helpers
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': False})
class OrganizationsHelpersTestCase(ModuleStoreTestCase):
"""
Main test suite for Organizations API client library
"""
CREATE_USER = False
def setUp(self):
"""
Test case scaffolding
"""
super(OrganizationsHelpersTestCase, self).setUp()
self.course = CourseFactory.create()
self.organization = {
'name': 'Test Organization',
'short_name': 'Orgx',
'description': 'Testing Organization Helpers Library',
}
def test_get_organization_returns_none_when_app_disabled(self):
response = organizations_helpers.get_organization(1)
self.assertEqual(len(response), 0)
def test_get_organizations_returns_none_when_app_disabled(self):
response = organizations_helpers.get_organizations()
self.assertEqual(len(response), 0)
def test_get_organization_courses_returns_none_when_app_disabled(self):
response = organizations_helpers.get_organization_courses(1)
self.assertEqual(len(response), 0)
def test_get_course_organizations_returns_none_when_app_disabled(self):
response = organizations_helpers.get_course_organizations(six.text_type(self.course.id))
self.assertEqual(len(response), 0)
def test_add_organization_returns_none_when_app_disabled(self):
response = organizations_helpers.add_organization(organization_data=self.organization)
self.assertIsNone(response)
def test_add_organization_course_returns_none_when_app_disabled(self):
response = organizations_helpers.add_organization_course(self.organization, self.course.id)
self.assertIsNone(response)
def test_get_organization_by_short_name_when_app_disabled(self):
"""
Tests get_organization_by_short_name api when app is disabled.
"""
response = organizations_helpers.get_organization_by_short_name(self.organization['short_name'])
self.assertIsNone(response)
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': True})
def test_get_organization_by_short_name_when_app_enabled(self):
"""
Tests get_organization_by_short_name api when app is enabled.
"""
response = organizations_helpers.add_organization(organization_data=self.organization)
self.assertIsNotNone(response['id'])
response = organizations_helpers.get_organization_by_short_name(self.organization['short_name'])
self.assertIsNotNone(response['id'])
# fetch non existing org
response = organizations_helpers.get_organization_by_short_name('non_existing')
self.assertIsNone(response)
| agpl-3.0 |
chiefspace/udemy-rest-api | udemy_rest_api_section4/env/lib/python3.4/site-packages/setuptools/dist.py | 148 | 32599 | __all__ = ['Distribution']
import re
import os
import sys
import warnings
import distutils.log
import distutils.core
import distutils.cmd
from distutils.core import Distribution as _Distribution
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from setuptools.depends import Require
from setuptools.compat import numeric_types, basestring
import pkg_resources
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x='+value)
assert not ep.extras
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr,value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value)!=value
except (TypeError,ValueError,AttributeError,AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr,value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
raise DistutilsSetupError(
"%r must be a boolean value (got %r)" % (attr,value)
)
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError,ValueError):
raise DistutilsSetupError(
"%r must be a string or list of strings "
"containing valid project/version requirement specifiers" % (attr,)
)
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError:
e = sys.exc_info()[1]
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value,basestring):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value,dict):
for k,v in value.items():
if not isinstance(k,str): break
try: iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr+" must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only"
".-separated package names in setup.py", pkgname
)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self,'dependency_links',self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs.pop('setup_requires'))
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
if not hasattr(self,ep.name):
setattr(self,ep.name,None)
_Distribution.__init__(self,attrs)
if isinstance(self.metadata.version, numeric_types):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self,name):
"""Convert feature name to corresponding option attribute name"""
return 'with_'+name.replace('-','_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
from pkg_resources import working_set, parse_requirements
for dist in working_set.resolve(
parse_requirements(requires), installer=self.fetch_build_egg,
replace_conflicting=True
):
working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self,ep.name,None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args':['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
cmd = easy_install(
dist, args=["x"], install_dir=os.curdir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name,feature in self.features.items():
self._set_feature(name,None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef=''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-'+name, None, 'include '+descr+incdef))
go.append(('without-'+name, None, 'exclude '+descr+excdef))
no['without-'+name] = 'with-'+name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name,feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name,1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name,feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name,0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands',command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
cmdclass = ep.load(False) # don't require extras, we're not running
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def _set_feature(self,name,status):
"""Set feature's inclusion status"""
setattr(self,self._feature_attrname(name),status)
def feature_is_included(self,name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self,self._feature_attrname(name))
def include_feature(self,name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name)==0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name,1)
def include(self,**attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k,v in attrs.items():
include = getattr(self, '_include_'+k, None)
if include:
include(v)
else:
self._include_misc(k,v)
def exclude_package(self,package):
"""Remove packages, modules, and extensions in named package"""
pfx = package+'.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self,package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package+'.'
for p in self.iter_distribution_names():
if p==package or p.startswith(pfx):
return True
def _exclude_misc(self,name,value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self,name,[item for item in old if item not in value])
def _include_misc(self,name,value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value,sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self,name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self,name,value)
elif not isinstance(old,sequence):
raise DistutilsSetupError(
name+": this setting cannot be changed via include/exclude"
)
else:
setattr(self,name,old+[item for item in value if item not in old])
def exclude(self,**attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k,v in attrs.items():
exclude = getattr(self, '_exclude_'+k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k,v)
def _exclude_packages(self,packages):
if not isinstance(packages,sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src,alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias,True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class,'command_consumes_arguments',None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd,opts in self.command_options.items():
for opt,(src,val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_','-')
if val==0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj,'negative_opt',{}))
for neg,pos in neg_opt.items():
if pos==opt:
opt=neg
val=None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val==1:
val = None
d.setdefault(cmd,{})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext,tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if sys.version_info < (3,) or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
# Install it throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = Distribution
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://bitbucket.org/pypa/setuptools/issue/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See http://bitbucket.org/pypa/setuptools/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features,(str,Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r,str)
]
er = [r for r in require_features if not isinstance(r,str)]
if er: extras['require_features'] = er
if isinstance(remove,str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self,dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description+" is required,"
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self,dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self,dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.